source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
celloServer.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from cello.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'cello'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from cello.celloImpl import cello # noqa @IgnorePep8
impl_cello = cello(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'cello'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_cello.run_cello,
name='cello.run_cello',
types=[dict])
self.method_authentication['cello.run_cello'] = 'required' # noqa
self.rpc_service.add(impl_cello.status,
name='cello.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'cello ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
Engine.py | # coding: utf-8
# Author: Lyderic LEFEBVRE
# Twitter: @lydericlefebvre
# Mail: lylefebvre.infosec@gmail.com
# LinkedIn: https://www.linkedin.com/in/lydericlefebvre
# Imports
import logging, traceback
from core.User import *
from core.Resources import *
from core.Targets import *
from core.SprayLove import *
from core.Colors import *
from core.Utils import *
from multiprocessing import Process
def run(args):
jobs = []
user = User(args.domain, args.username, args.password)
local_ip = retrieveMyIP()
try:
targets = listPwnableTargets(args.targets, user)
logging.warning("%sLet's spray some love... Be patient." % (warningGre))
for target in targets:
jobs.append(Process(target=sprayLove, args=(user, target, local_ip, args.remove)))
jobs[-1].start()
joinThreads(jobs, args.wait)
logging.warning("\n%sCredentials logged into: %s" % (warningGre, os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), 'misc', 'results', 'creds.txt')))
except KeyboardInterrupt:
logging.warning("%sKeyboard interrupt. Exiting." % (warningRed))
except Exception as e:
logging.warning("%sA problem occurs. Err: %s" % (warningRed, red))
logging.debug("%s==== STACKTRACE ====" % (blue))
if logging.getLogger().getEffectiveLevel() <= 10: traceback.print_exc(file=sys.stdout)
logging.debug("%s==== STACKTRACE ====%s" % (blue, white))
finally:
exit_gracefully(jobs, 10)
|
python_exporter.py | #!/usr/bin/env python
# Copyright (c) 2018-2020 The Mode Group
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Export shell and Python functions into Prometheus.
"""
import sys
import os
import yaml
import imp
import subprocess
import operator
import signal
import getopt
from multiprocessing import Process
from flask import *
# Stop logging all the request messages
import logging
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
# Globals
CONFIG_DATA = None
# Cache for importing Python modules. So we dont always re-import them
MODULE_CACHE = {}
# Listening for Flask
LISTEN_PORT = None
LISTEN_ON = None
# When we are quitting, this is true
CONTROL_QUITTING = False
def GetQuitting():
"""Are we quitting? Loaded modules can check."""
global CONTROL_QUITTING
return CONTROL_QUITTING
def ProcessYamls(config_data):
"""Process all the YAML data, starting Exporters as needed."""
exporter_port = {}
for path, data in config_data.items():
# print 'ProcessYamls: %s (%s)' % (path, data['port'])
export_list = []
for command_data in data['commands']:
export_data = ExecuteCommand(command_data)
# Append to list in place
export_list += export_data
exporter_port[data['port']] = export_list
output = ''
for port, export_port_data in exporter_port.items():
# print 'Port: %s' % port
output += ExporterFormat(export_port_data)
# print output
return output
def ExporterFormat(export_port_data):
output = ''
# Sort the items by metric name, so we group them nicely
export_port_data.sort(key=operator.itemgetter('metric'))
# Print out the HELP/TYPE only once per metric
help_cache = {}
for item in export_port_data:
if item['metric'] not in help_cache:
help_cache[item['metric']] = True
if 'help' in item:
output += '# HELP %s\n' % item['help']
if 'type' in item:
output += '# TYPE %s\n' % item['type']
# Metric - Labelset - Value
output += '%s%s %s\n' % (item['metric'], FormatLabelset(item['labelset']), item['value'])
return output
def FormatLabelset(labelset):
output_list = []
keys = labelset.keys()
keys.sort()
for key in keys:
key_pair = '%s="%s"' % (key, labelset[key])
output_list.append(key_pair)
output = '{%s}' % ','.join(output_list)
return output
def ExecuteCommand(command_data):
"""Execute one of our YAML commands, and parse it for exporter data"""
# If we are executing a Shell Command, handle that
if 'shell' in command_data:
if 'shell_vars' in command_data and len(command_data['shell_vars']) > 0:
shell = command_data['shell'] % command_data['shell_vars']
else:
shell = command_data['shell']
# print 'Execute: %s' % shell
output = subprocess.Popen(shell, shell=True, stdout=subprocess.PIPE).stdout.read()
# Else, we are not executing a Shell Command, so output=None
else:
output = None
# print 'Output: %s' % output
command_module = LoadModule(command_data['module'])
export_data = command_module.Parse(output, command_data)
# print 'Export Data: %s' % export_data
return export_data
def LoadModule(path):
"""Load the pluggable module.
TODO(geoff): Cache these so we dont have to do it every request.
"""
global MODULE_CACHE
if path in MODULE_CACHE:
return MODULE_CACHE[path]
print 'Load Module: %s' % path
module = imp.load_source(path, path)
module.GetQuitting = GetQuitting
print 'Load Module: Result: %s' % module
MODULE_CACHE[path] = module
return module
def Usage(error=None):
if error:
status = 1
print '\nERROR: %s' % error
else:
status = 0
print '\nusage: %s <yaml> <optional yaml>...' % os.path.basename(sys.argv[0])
sys.exit(status)
def RunFlask(port, listen_on='0.0.0.0'):
"""Run the Flask server. Implement all functions as sub-functions so we can set the port dynamically"""
if listen_on == None:
listen_on = '0.0.0.0'
print 'Starting Flask: %s (%s)' % (port, listen_on)
app = Flask(__name__)
# app.add_url_rule('/', endpoint='index', view_func=RedirectToMetric)
# app.add_url_rule('/metric', endpoint='metric', view_func=Metric)
app.add_url_rule('/', endpoint='RedirectToMetric', view_func=RedirectToMetric)
app.add_url_rule('/metrics', endpoint='Metric', view_func=Metric)
app.run(host=listen_on, port=port, threaded=True)
def RedirectToMetric():
return redirect('/metrics')
def Metric():
try:
output = ProcessYamls(CONFIG_DATA)
response = Response(output, mimetype='text/plain')
except Exception, e:
print 'Metric Exception: %s' % e
return response
def QuittingHandler(signum, frame):
global CONTROL_QUITTING
print 'Quit Signal: %s' % signum
CONTROL_QUITTING = True
# Sleep a little, and then exit
# time.sleep(0) # No need to delay, leaving so this can be used as a template/pattern
raise RuntimeError('Quitting')
def IgnoreHandler(signum, frame):
print 'Ignore Signal: %s' % signum
def Go():
"""Run in our own process, launching off of Main()"""
global LISTEN_PORT, LISTEN_ON
# Run the Flask server
RunFlask(LISTEN_PORT, LISTEN_ON)
def Usage(error=None):
if error:
exit_code = 1
print 'Error: %s\n' % error
else:
exit_code = 0
print 'Usage: %s <options>' % os.path.basename(sys.argv[0])
print ''
print ' -h --help Help'
print ' --daemon Use python multiprocessing to daemonize'
print
sys.exit(exit_code)
def Main(args=None):
global CONFIG_DATA
global LISTEN_PORT, LISTEN_ON
port = None
if not args:
args = []
(options, args) = getopt.getopt(args, 'h', ['help', 'daemon'])
if len(args) < 1:
Usage("Need at least 1 argument. 0 given.")
# Run as a daemon with Python Multiprocessing. Not useful under systemd, we lose the logs
run_as_daemon = False
for option, value in options:
if option in ('-h', '--help'):
Usage()
elif option in ('--daemon'):
run_as_daemon = True
config_data = {}
# Change the working directory based on the executable, so we can use relative paths for modules
cwd = os.path.dirname(sys.argv[0])
os.chdir(cwd)
print 'Working Directory: %s' % cwd
# print 'Data Directory: %s' % os.path.abspath(os.path.dirname(sys.argv[0]))
# print 'User Directory: %s' % os.path.abspath(os.path.expanduser("~"))
# Listen on IP
listen_on = None
for arg in args:
if not os.path.isfile(arg):
Usage('%s is not a valid file')
with open(arg, "r") as stream:
try:
content = yaml.load(stream)
except Exception, e:
Usage("Failed to load YAML file: %s :: %s" % (arg, e))
if port == None:
port = int(content['port'])
config_data[arg] = content
# If we specify a listening port, take the first one
if 'listen_on' in content and listen_on == None:
listen_on = content['listen_on']
if len(args) > 1:
print 'All exporter results will return on the same port. Multi-port not-yet implemented.'
# Send this to our global, because of the Flask disconnect...
CONFIG_DATA = config_data
# Set Flask info
LISTEN_ON = listen_on
LISTEN_PORT = port
# Handle the program quitting, so we can gracefully close our threads
signal.signal(signal.SIGTERM, QuittingHandler) # Kill -15
signal.signal(signal.SIGINT, QuittingHandler) # Keyboard Interrupt
signal.signal(signal.SIGHUP, IgnoreHandler) # Terminal Hangup, keep on going
# If we arent running as a daemon, just run. This is better under systemd
if not run_as_daemon:
Go()
# Else, run as a daeomn with Python Multiprocessing
else:
# Initialize everything
p = Process(target=Go)
p.start()
# When the above function completes, we will terminate it
p.terminate()
# Exit without error
sys.exit(0)
if __name__ == '__main__':
Main(sys.argv[1:])
|
PCpc.py | import os
import wx
from wx import adv
from wx.core import Bitmap, MenuItem
from photoshop import PhotoshopConnection
from PCSocketServer import PCSocketServer
from PCpc_UIClasses import *
class TaskBarIcon(wx.adv.TaskBarIcon):
TRAY_ICON_PSoff = None
TRAY_ICON_PSon = None
def __init__(self):
super(TaskBarIcon, self).__init__()
self.TRAY_ICON_PSoff = wx.Icon()
self.TRAY_ICON_PSon = wx.Icon()
self.TRAY_ICON_PSon.CopyFromBitmap(wx.Bitmap('icons_/icon_PSon.png'))
self.TRAY_ICON_PSoff.CopyFromBitmap(wx.Bitmap('icons_/icon_PSoff.png'))
self.SetIcon(self.TRAY_ICON_PSon, 'PhantomCaptcher')
self.Bind(wx.adv.EVT_TASKBAR_LEFT_DOWN, self.on_left_down)
def on_left_down(self, event):
print('Tray icon was left-clicked.')
def on_ABOUT(self, event):
print('"ABOUT" was left-clicked.')
wx.MessageBox("Phantom Captcher 幻影捕手","About",wx.OK | wx.ICON_INFORMATION)
def on_ConnectPhotoshop(self, event):
try:
with PhotoshopConnection(password='123456') as conn:
pass
self.SetIcon(self.TRAY_ICON_PSon,'PhantomCaptcher')
wx.MessageBox("成功與Photoshop連線!","PS連線測試",wx.OK | wx.ICON_INFORMATION)
except:
self.SetIcon(self.TRAY_ICON_PSoff,'PhantomCaptcher')
wx.MessageBox("與Photoshop連線失敗。","PS連線測試",wx.OK | wx.ICON_ERROR)
def on_exit(self, event):
wx.CallAfter(self.Destroy)
os._exit(0)
def create_menu_item(self,menu, label, func=None, ifClickable=True):
item = wx.MenuItem(menu, -1, label)
if ifClickable:
item.SetBackgroundColour(wx.Colour(0,0,100))
item.SetTextColour(wx.Colour(180,180,0))
else:
item.SetBackgroundColour(wx.Colour(0,0,64))
item.SetTextColour(wx.Colour(250,250,0))
menu.Bind(wx.EVT_MENU, func, id=item.GetId())
menu.Append(item)
return item
def CreatePopupMenu(self):
menu = wx.Menu()
self.create_menu_item(menu,'Phantom Captcher',ifClickable=False)
# menu.AppendSeparator()
self.create_menu_item(menu, 'PS連線測試', self.on_ConnectPhotoshop)
self.create_menu_item(menu, '檢視個人素材', self.on_ABOUT)
# menu.AppendSeparator()
self.create_menu_item(menu, 'Exit', self.on_exit)
return menu
class TaskBarApp(wx.App):
def OnInit(self):
self.SetTopWindow(wx.Frame(None, -1))
TaskBarIcon()
return True
###########################################################################
threadDict_={}
###########################################################################
#- def runTaskBarApp() ------------------------------------------------
def runTaskBarApp():
app = TaskBarApp()
dialog=LoginDialog()
result = dialog.ShowModal()
print('login ## ',result)
app.MainLoop()
threadDict_[runTaskBarApp]=True
#- def runSocketServer() --------------------------------------------------
def runSocketServer():
PCSocketServer().run()
threadDict_[runSocketServer]=True
# ̄ ̄ for test  ̄ ̄
# threadDict_[runSocketServer]=False
# threadDict_[runTaskBarApp]=False
#__ for test __
import threading
for func_ in threadDict_ :
if threadDict_[func_]:
threadDict_[func_] = threading.Thread(target = func_)
threadDict_[func_].start() |
test_threading.py | """
Tests for the threading module.
"""
import test.support
from test.support import verbose, import_module, cpython_only, unlink
from test.support.script_helper import assert_python_ok, assert_python_failure
import random
import sys
import _thread
import threading
import time
import unittest
import weakref
import os
import subprocess
import signal
import textwrap
import traceback
from test import lock_tests
from test import support
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('netbsd5', 'hp-ux11')
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print('task %s will run for %.1f usec' %
(self.name, delay * 1e6))
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print(self.nrunning.get(), 'tasks are running')
self.testcase.assertLessEqual(self.nrunning.get(), 3)
time.sleep(delay)
if verbose:
print('task', self.name, 'done')
with self.mutex:
self.nrunning.dec()
self.testcase.assertGreaterEqual(self.nrunning.get(), 0)
if verbose:
print('%s is finished. %d tasks are running' %
(self.name, self.nrunning.get()))
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.support.threading_setup()
def tearDown(self):
test.support.threading_cleanup(*self._threads)
test.support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertIsNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, initial\)>$')
t.start()
if hasattr(threading, 'get_native_id'):
native_ids = set(t.native_id for t in threads) | {threading.get_native_id()}
self.assertNotIn(None, native_ids)
self.assertEqual(len(native_ids), NUMTASKS + 1)
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join()
self.assertFalse(t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertIsNotNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, stopped -?\d+\)>$')
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertIsNotNone(threading.currentThread().ident)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
with support.wait_threads_exit():
tid = _thread.start_new_thread(f, ())
done.wait()
self.assertEqual(ident[0], tid)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256 KiB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256 KiB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1 MiB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1 MiB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
with support.wait_threads_exit():
tid = _thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
#Issue 29376
self.assertTrue(threading._active[tid].is_alive())
self.assertRegex(repr(threading._active[tid]), '_DummyThread')
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
ctypes = import_module("ctypes")
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
set_async_exc.argtypes = (ctypes.c_ulong, ctypes.py_object)
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = threading.get_ident()
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
try:
result = set_async_exc(tid, exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = threading.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(-1, exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print(" verifying worker hasn't exited")
self.assertFalse(t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(t.id, exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=support.SHORT_TIMEOUT)
self.assertTrue(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise threading.ThreadError()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(threading.ThreadError, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_running_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
import_module("ctypes")
rc, out, err = assert_python_failure("-c", """if 1:
import ctypes, sys, time, _thread
# This lock is used as a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""")
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
assert_python_ok("-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
""")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
rc, out, err = assert_python_ok("-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is:", sleep)
threading.Thread(target=child).start()
raise SystemExit
""")
self.assertEqual(out.strip(),
b"Woke up, sleep function is: <built-in function sleep>")
self.assertEqual(err, b"")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getswitchinterval()
try:
for i in range(1, 100):
sys.setswitchinterval(i * 0.0002)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setswitchinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertIsNone(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertIsNone(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
t.isDaemon()
t.setDaemon(True)
t.getName()
t.setName("name")
e = threading.Event()
e.isSet()
threading.activeCount()
def test_repr_daemon(self):
t = threading.Thread()
self.assertNotIn('daemon', repr(t))
t.daemon = True
self.assertIn('daemon', repr(t))
def test_daemon_param(self):
t = threading.Thread()
self.assertFalse(t.daemon)
t = threading.Thread(daemon=False)
self.assertFalse(t.daemon)
t = threading.Thread(daemon=True)
self.assertTrue(t.daemon)
@unittest.skipUnless(hasattr(os, 'fork'), 'needs os.fork()')
def test_fork_at_exit(self):
# bpo-42350: Calling os.fork() after threading._shutdown() must
# not log an error.
code = textwrap.dedent("""
import atexit
import os
import sys
from test.support import wait_process
# Import the threading module to register its "at fork" callback
import threading
def exit_handler():
pid = os.fork()
if not pid:
print("child process ok", file=sys.stderr, flush=True)
# child process
sys.exit()
else:
wait_process(pid, exitcode=0)
# exit_handler() will be called after threading._shutdown()
atexit.register(exit_handler)
""")
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err.rstrip(), b'child process ok')
@unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import _thread, threading, os, time
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
_thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
old_interval = sys.getswitchinterval()
self.addCleanup(sys.setswitchinterval, old_interval)
# Make the bug more likely to manifest.
test.support.setswitchinterval(1e-6)
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
pid = os.fork()
if pid == 0:
os._exit(11 if t.is_alive() else 10)
else:
t.join()
support.wait_process(pid, exitcode=10)
def test_main_thread(self):
main = threading.main_thread()
self.assertEqual(main.name, 'MainThread')
self.assertEqual(main.ident, threading.current_thread().ident)
self.assertEqual(main.ident, threading.get_ident())
def f():
self.assertNotEqual(threading.main_thread().ident,
threading.current_thread().ident)
th = threading.Thread(target=f)
th.start()
th.join()
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork(self):
code = """if 1:
import os, threading
from test import support
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
else:
support.wait_process(pid, exitcode=0)
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "MainThread\nTrue\nTrue\n")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork_from_nonmain_thread(self):
code = """if 1:
import os, threading, sys
from test import support
def f():
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
# stdout is fully buffered because not a tty,
# we have to flush before exit.
sys.stdout.flush()
else:
support.wait_process(pid, exitcode=0)
th = threading.Thread(target=f)
th.start()
th.join()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "Thread-1\nTrue\nTrue\n")
def test_main_thread_during_shutdown(self):
# bpo-31516: current_thread() should still point to the main thread
# at shutdown
code = """if 1:
import gc, threading
main_thread = threading.current_thread()
assert main_thread is threading.main_thread() # sanity check
class RefCycle:
def __init__(self):
self.cycle = self
def __del__(self):
print("GC:",
threading.current_thread() is main_thread,
threading.main_thread() is main_thread,
threading.enumerate() == [main_thread])
RefCycle()
gc.collect() # sanity check
x = RefCycle()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode()
self.assertEqual(err, b"")
self.assertEqual(data.splitlines(),
["GC: True True True"] * 2)
def test_finalization_shutdown(self):
# bpo-36402: Py_Finalize() calls threading._shutdown() which must wait
# until Python thread states of all non-daemon threads get deleted.
#
# Test similar to SubinterpThreadingTests.test_threads_join_2(), but
# test the finalization of the main interpreter.
code = """if 1:
import os
import threading
import time
import random
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
class Sleeper:
def __del__(self):
random_sleep()
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_Finalize() is called.
random_sleep()
tls.x = Sleeper()
random_sleep()
threading.Thread(target=f).start()
random_sleep()
"""
rc, out, err = assert_python_ok("-c", code)
self.assertEqual(err, b"")
def test_tstate_lock(self):
# Test an implementation detail of Thread objects.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
time.sleep(0.01)
# The tstate lock is None until the thread is started
t = threading.Thread(target=f)
self.assertIs(t._tstate_lock, None)
t.start()
started.acquire()
self.assertTrue(t.is_alive())
# The tstate lock can't be acquired when the thread is running
# (or suspended).
tstate_lock = t._tstate_lock
self.assertFalse(tstate_lock.acquire(timeout=0), False)
finish.release()
# When the thread ends, the state_lock can be successfully
# acquired.
self.assertTrue(tstate_lock.acquire(timeout=support.SHORT_TIMEOUT), False)
# But is_alive() is still True: we hold _tstate_lock now, which
# prevents is_alive() from knowing the thread's end-of-life C code
# is done.
self.assertTrue(t.is_alive())
# Let is_alive() find out the C code is done.
tstate_lock.release()
self.assertFalse(t.is_alive())
# And verify the thread disposed of _tstate_lock.
self.assertIsNone(t._tstate_lock)
t.join()
def test_repr_stopped(self):
# Verify that "stopped" shows up in repr(Thread) appropriately.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
t = threading.Thread(target=f)
t.start()
started.acquire()
self.assertIn("started", repr(t))
finish.release()
# "stopped" should appear in the repr in a reasonable amount of time.
# Implementation detail: as of this writing, that's trivially true
# if .join() is called, and almost trivially true if .is_alive() is
# called. The detail we're testing here is that "stopped" shows up
# "all on its own".
LOOKING_FOR = "stopped"
for i in range(500):
if LOOKING_FOR in repr(t):
break
time.sleep(0.01)
self.assertIn(LOOKING_FOR, repr(t)) # we waited at least 5 seconds
t.join()
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
@cpython_only
def test_frame_tstate_tracing(self):
# Issue #14432: Crash when a generator is created in a C thread that is
# destroyed while the generator is still used. The issue was that a
# generator contains a frame, and the frame kept a reference to the
# Python state of the destroyed C thread. The crash occurs when a trace
# function is setup.
def noop_trace(frame, event, arg):
# no operation
return noop_trace
def generator():
while 1:
yield "generator"
def callback():
if callback.gen is None:
callback.gen = generator()
return next(callback.gen)
callback.gen = None
old_trace = sys.gettrace()
sys.settrace(noop_trace)
try:
# Install a trace function
threading.settrace(noop_trace)
# Create a generator in a C thread which exits after the call
import _testcapi
_testcapi.call_in_temporary_c_thread(callback)
# Call the generator in a different Python thread, check that the
# generator didn't keep a reference to the destroyed thread state
for test in range(3):
# The trace function is still called here
callback()
finally:
sys.settrace(old_trace)
@cpython_only
def test_shutdown_locks(self):
for daemon in (False, True):
with self.subTest(daemon=daemon):
event = threading.Event()
thread = threading.Thread(target=event.wait, daemon=daemon)
# Thread.start() must add lock to _shutdown_locks,
# but only for non-daemon thread
thread.start()
tstate_lock = thread._tstate_lock
if not daemon:
self.assertIn(tstate_lock, threading._shutdown_locks)
else:
self.assertNotIn(tstate_lock, threading._shutdown_locks)
# unblock the thread and join it
event.set()
thread.join()
# Thread._stop() must remove tstate_lock from _shutdown_locks.
# Daemon threads must never add it to _shutdown_locks.
self.assertNotIn(tstate_lock, threading._shutdown_locks)
def test_locals_at_exit(self):
# bpo-19466: thread locals must not be deleted before destructors
# are called
rc, out, err = assert_python_ok("-c", """if 1:
import threading
class Atexit:
def __del__(self):
print("thread_dict.atexit = %r" % thread_dict.atexit)
thread_dict = threading.local()
thread_dict.atexit = "value"
atexit = Atexit()
""")
self.assertEqual(out.rstrip(), b"thread_dict.atexit = 'value'")
def test_leak_without_join(self):
# bpo-37788: Test that a thread which is not joined explicitly
# does not leak. Test written for reference leak checks.
def noop(): pass
with support.wait_threads_exit():
threading.Thread(target=noop).start()
# Thread.join() is not called
def test_import_from_another_thread(self):
# bpo-1596321: If the threading module is first import from a thread
# different than the main thread, threading._shutdown() must handle
# this case without logging an error at Python exit.
code = textwrap.dedent('''
import _thread
import sys
event = _thread.allocate_lock()
event.acquire()
def import_threading():
import threading
event.release()
if 'threading' in sys.modules:
raise Exception('threading is already imported')
_thread.start_new_thread(import_threading, ())
# wait until the threading module is imported
event.acquire()
event.release()
if 'threading' not in sys.modules:
raise Exception('threading is not imported')
# don't wait until the thread completes
''')
rc, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
class ThreadJoinOnShutdown(BaseTestCase):
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
from test import support
childpid = os.fork()
if childpid != 0:
# parent process
support.wait_process(childpid, exitcode=0)
sys.exit(0)
# child process
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
from test import support
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
# parent process
support.wait_process(childpid, exitcode=0)
sys.exit(0)
# child process
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_daemon_threads(self):
# Check that a daemon thread cannot crash the interpreter on shutdown
# by manipulating internal structures that are being disposed of in
# the main thread.
script = """if True:
import os
import random
import sys
import time
import threading
thread_has_run = set()
def random_io():
'''Loop for a while sleeping random tiny amounts and doing some I/O.'''
while True:
with open(os.__file__, 'rb') as in_f:
stuff = in_f.read(200)
with open(os.devnull, 'wb') as null_f:
null_f.write(stuff)
time.sleep(random.random() / 1995)
thread_has_run.add(threading.current_thread())
def main():
count = 0
for _ in range(40):
new_thread = threading.Thread(target=random_io)
new_thread.daemon = True
new_thread.start()
count += 1
while len(thread_has_run) < count:
time.sleep(0.001)
# Trigger process shutdown
sys.exit(0)
main()
"""
rc, out, err = assert_python_ok('-c', script)
self.assertFalse(err)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
support.wait_process(pid, exitcode=50)
else:
os._exit(50)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_clear_threads_states_after_fork(self):
# Issue #17094: check that threads states are cleared after fork()
# start a bunch of threads
threads = []
for i in range(16):
t = threading.Thread(target=lambda : time.sleep(0.3))
threads.append(t)
t.start()
pid = os.fork()
if pid == 0:
# check that threads states have been cleared
if len(sys._current_frames()) == 1:
os._exit(51)
else:
os._exit(52)
else:
support.wait_process(pid, exitcode=51)
for t in threads:
t.join()
class SubinterpThreadingTests(BaseTestCase):
def pipe(self):
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
if hasattr(os, 'set_blocking'):
os.set_blocking(r, False)
return (r, w)
def test_threads_join(self):
# Non-daemon threads should be joined at subinterpreter shutdown
# (issue #18808)
r, w = self.pipe()
code = textwrap.dedent(r"""
import os
import random
import threading
import time
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
random_sleep()
os.write(%d, b"x")
threading.Thread(target=f).start()
random_sleep()
""" % (w,))
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
def test_threads_join_2(self):
# Same as above, but a delay gets introduced after the thread's
# Python code returned but before the thread state is deleted.
# To achieve this, we register a thread-local object which sleeps
# a bit when deallocated.
r, w = self.pipe()
code = textwrap.dedent(r"""
import os
import random
import threading
import time
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
class Sleeper:
def __del__(self):
random_sleep()
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
random_sleep()
tls.x = Sleeper()
os.write(%d, b"x")
threading.Thread(target=f).start()
random_sleep()
""" % (w,))
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
@cpython_only
def test_daemon_threads_fatal_error(self):
subinterp_code = f"""if 1:
import os
import threading
import time
def f():
# Make sure the daemon thread is still running when
# Py_EndInterpreter is called.
time.sleep({test.support.SHORT_TIMEOUT})
threading.Thread(target=f, daemon=True).start()
"""
script = r"""if 1:
import _testcapi
_testcapi.run_in_subinterp(%r)
""" % (subinterp_code,)
with test.support.SuppressCrashReport():
rc, out, err = assert_python_failure("-c", script)
self.assertIn("Fatal Python error: Py_EndInterpreter: "
"not the last thread", err.decode())
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
thread.join()
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
thread.join()
def test_releasing_unacquired_lock(self):
lock = threading.Lock()
self.assertRaises(RuntimeError, lock.release)
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RecursionError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error: " + stderr.decode())
self.assertEqual(data, expected_output)
def test_print_exception(self):
script = r"""if True:
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_1(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
sys.stderr = None
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_2(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
sys.stderr = None
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
self.assertNotIn("Unhandled exception", err.decode())
def test_bare_raise_in_brand_new_thread(self):
def bare_raise():
raise
class Issue27558(threading.Thread):
exc = None
def run(self):
try:
bare_raise()
except Exception as exc:
self.exc = exc
thread = Issue27558()
thread.start()
thread.join()
self.assertIsNotNone(thread.exc)
self.assertIsInstance(thread.exc, RuntimeError)
# explicitly break the reference cycle to not leak a dangling thread
thread.exc = None
def test_multithread_modify_file_noerror(self):
# See issue25872
def modify_file():
with open(test.support.TESTFN, 'w', encoding='utf-8') as fp:
fp.write(' ')
traceback.format_stack()
self.addCleanup(unlink, test.support.TESTFN)
threads = [
threading.Thread(target=modify_file)
for i in range(100)
]
for t in threads:
t.start()
t.join()
class ThreadRunFail(threading.Thread):
def run(self):
raise ValueError("run failed")
class ExceptHookTests(BaseTestCase):
def test_excepthook(self):
with support.captured_output("stderr") as stderr:
thread = ThreadRunFail(name="excepthook thread")
thread.start()
thread.join()
stderr = stderr.getvalue().strip()
self.assertIn(f'Exception in thread {thread.name}:\n', stderr)
self.assertIn('Traceback (most recent call last):\n', stderr)
self.assertIn(' raise ValueError("run failed")', stderr)
self.assertIn('ValueError: run failed', stderr)
@support.cpython_only
def test_excepthook_thread_None(self):
# threading.excepthook called with thread=None: log the thread
# identifier in this case.
with support.captured_output("stderr") as stderr:
try:
raise ValueError("bug")
except Exception as exc:
args = threading.ExceptHookArgs([*sys.exc_info(), None])
try:
threading.excepthook(args)
finally:
# Explicitly break a reference cycle
args = None
stderr = stderr.getvalue().strip()
self.assertIn(f'Exception in thread {threading.get_ident()}:\n', stderr)
self.assertIn('Traceback (most recent call last):\n', stderr)
self.assertIn(' raise ValueError("bug")', stderr)
self.assertIn('ValueError: bug', stderr)
def test_system_exit(self):
class ThreadExit(threading.Thread):
def run(self):
sys.exit(1)
# threading.excepthook() silently ignores SystemExit
with support.captured_output("stderr") as stderr:
thread = ThreadExit()
thread.start()
thread.join()
self.assertEqual(stderr.getvalue(), '')
def test_custom_excepthook(self):
args = None
def hook(hook_args):
nonlocal args
args = hook_args
try:
with support.swap_attr(threading, 'excepthook', hook):
thread = ThreadRunFail()
thread.start()
thread.join()
self.assertEqual(args.exc_type, ValueError)
self.assertEqual(str(args.exc_value), 'run failed')
self.assertEqual(args.exc_traceback, args.exc_value.__traceback__)
self.assertIs(args.thread, thread)
finally:
# Break reference cycle
args = None
def test_custom_excepthook_fail(self):
def threading_hook(args):
raise ValueError("threading_hook failed")
err_str = None
def sys_hook(exc_type, exc_value, exc_traceback):
nonlocal err_str
err_str = str(exc_value)
with support.swap_attr(threading, 'excepthook', threading_hook), \
support.swap_attr(sys, 'excepthook', sys_hook), \
support.captured_output('stderr') as stderr:
thread = ThreadRunFail()
thread.start()
thread.join()
self.assertEqual(stderr.getvalue(),
'Exception in threading.excepthook:\n')
self.assertEqual(err_str, 'threading_hook failed')
class TimerTests(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.callback_args = []
self.callback_event = threading.Event()
def test_init_immutable_default_args(self):
# Issue 17435: constructor defaults were mutable objects, they could be
# mutated via the object attributes and affect other Timer objects.
timer1 = threading.Timer(0.01, self._callback_spy)
timer1.start()
self.callback_event.wait()
timer1.args.append("blah")
timer1.kwargs["foo"] = "bar"
self.callback_event.clear()
timer2 = threading.Timer(0.01, self._callback_spy)
timer2.start()
self.callback_event.wait()
self.assertEqual(len(self.callback_args), 2)
self.assertEqual(self.callback_args, [((), {}), ((), {})])
timer1.join()
timer2.join()
def _callback_spy(self, *args, **kwargs):
self.callback_args.append((args[:], kwargs.copy()))
self.callback_event.set()
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class PyRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._PyRLock)
@unittest.skipIf(threading._CRLock is None, 'RLock not implemented in C')
class CRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._CRLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
class BarrierTests(lock_tests.BarrierTests):
barriertype = staticmethod(threading.Barrier)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
extra = {"ThreadError"}
blacklist = {'currentThread', 'activeCount'}
support.check__all__(self, threading, ('threading', '_thread'),
extra=extra, blacklist=blacklist)
class InterruptMainTests(unittest.TestCase):
def test_interrupt_main_subthread(self):
# Calling start_new_thread with a function that executes interrupt_main
# should raise KeyboardInterrupt upon completion.
def call_interrupt():
_thread.interrupt_main()
t = threading.Thread(target=call_interrupt)
with self.assertRaises(KeyboardInterrupt):
t.start()
t.join()
t.join()
def test_interrupt_main_mainthread(self):
# Make sure that if interrupt_main is called in main thread that
# KeyboardInterrupt is raised instantly.
with self.assertRaises(KeyboardInterrupt):
_thread.interrupt_main()
def test_interrupt_main_noerror(self):
handler = signal.getsignal(signal.SIGINT)
try:
# No exception should arise.
signal.signal(signal.SIGINT, signal.SIG_IGN)
_thread.interrupt_main()
signal.signal(signal.SIGINT, signal.SIG_DFL)
_thread.interrupt_main()
finally:
# Restore original handler
signal.signal(signal.SIGINT, handler)
class AtexitTests(unittest.TestCase):
def test_atexit_output(self):
rc, out, err = assert_python_ok("-c", """if True:
import threading
def run_last():
print('parrot')
threading._register_atexit(run_last)
""")
self.assertFalse(err)
self.assertEqual(out.strip(), b'parrot')
def test_atexit_called_once(self):
rc, out, err = assert_python_ok("-c", """if True:
import threading
from unittest.mock import Mock
mock = Mock()
threading._register_atexit(mock)
mock.assert_not_called()
# force early shutdown to ensure it was called once
threading._shutdown()
mock.assert_called_once()
""")
self.assertFalse(err)
def test_atexit_after_shutdown(self):
# The only way to do this is by registering an atexit within
# an atexit, which is intended to raise an exception.
rc, out, err = assert_python_ok("-c", """if True:
import threading
def func():
pass
def run_last():
threading._register_atexit(func)
threading._register_atexit(run_last)
""")
self.assertTrue(err)
self.assertIn("RuntimeError: can't register atexit after shutdown",
err.decode())
if __name__ == "__main__":
unittest.main()
|
starter_peaks.py | from app.imports import *
from app.watchdogs.custom import CustomFileEventHandler
from app.imports.crysalis.controller import CrysalisController
class Starter:
"""
Handles file tracking for peak hunt files of Crysalis (Rigaku)
"""
WATCHDOG_DELAY = 0.5
DEBUG_MODE = logging.INFO
def __init__(self, path=None):
self.path = path
# thread for watchdog
self.th_watchdog = None
self.file_observer = None
# status and output
self.lbl_status = None
self.output = None
self.output_lock = threading.Lock()
self.lbl_statusline = None
self.lbl_clean_status = None
# path and process button
self.btn_process = None
self.lbl_path = None
self.int_cleanup_radius = None
self.int_group = None
# last path
self.last_path = None
self.setup_gui()
self.start_watchdog()
# queue stop - controls
self.queue_stop = Queue()
# self lock
self.lock = threading.Lock()
# crysalis controller
self.ctrl_crysalis = CrysalisController(debug_mode=self.DEBUG_MODE)
def __del__(self):
self.cleanup()
def cleanup(self):
"""
Cleans up running thread
:return:
"""
QUIT_MSG = "quit"
for el in [self.th_watchdog]:
self.queue_stop.put(QUIT_MSG)
try:
self.set_output("Cleaning up threads\n")
self.queue_stop.join()
self.set_output("All good\n")
except AttributeError:
pass
def setup_gui(self):
"""
Setups gui
:return:
"""
self.lbl_status = HTML("")
tmsg = ""
if os.path.isdir(self.path):
tmsg = f"Path ({self.path}) is valid"
else:
tmsg = f"Path ({self.path}) is invalid"
self.lbl_status.value = tmsg
self.output = Output()
display(self.lbl_status)
self.lbl_clean_status = HTML("")
display(self.lbl_clean_status)
display(self.output)
def start_watchdog(self):
"""
Setups watchdog - thread actively looking at the file changes
:return:
"""
if os.path.isdir(self.path):
self.th_watchdog = threading.Thread(target=self._thread_watchdog, args=[])
self.th_watchdog.setDaemon(True)
self.th_watchdog.start()
def _thread_watchdog(self):
"""
Thread operating with the watchdog
:return:
"""
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
self.lbl_status.value = f"Starting watchdog for path ({self.path})"
self.file_event_handler = CustomFileEventHandler(self)
self.file_observer = Observer()
self.file_observer.schedule(self.file_event_handler, self.path, recursive=True)
self.file_observer.start()
try:
while True:
# test for cleaning up message
try:
self.queue_stop.get()
self.queue_stop.task_done()
except Empty:
break
time.sleep(self.WATCHDOG_DELAY)
finally:
self.file_observer.stop()
self.file_observer.join()
def clear_output(self):
"""
Clears output
:return:
"""
with self.output_lock:
if isinstance(self.output, Output):
self.output.clear_output()
def set_output(self, msg):
"""
Adds information into the output field
:param msg:
:return:
"""
with self.output_lock:
if self.lbl_statusline is None:
self.lbl_statusline = HTML("")
display(self.lbl_statusline)
self.lbl_statusline.value = f"<div class=''>{msg}</div>"
def set_output_cleaning(self, tmsg):
"""
Sets message which is cleaned after certain time
:param tmsg:
:return:
"""
with self.lock:
self.lbl_clean_status.value = f"<div>{tmsg}</div>"
if len(tmsg) > 0:
th = threading.Thread(target=self.clean_output_cleaning, args=[])
th.setDaemon(True)
th.start()
def clean_output_cleaning(self, delay=10):
"""
Cleaning the status message
:param delay:
:return:
"""
for i in range(delay):
time.sleep(1)
self.set_output_cleaning("")
def add_file(self, filepath, deleted=False, prefix=None):
"""
Adds filepath and gui elements if non existent, fills them with data
:param filepath:
:return:
"""
if isinstance(filepath, str):
filepath = filepath.replace("\\", "/")
with self.lock:
if None in (self.btn_process, self.lbl_path, self.int_cleanup_radius, self.int_group):
self.btn_process = Button(
description='Process',
disabled=True,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Starts processing',
icon='check',
width='100px'
)
self.btn_process.on_click(self.process_peakfile)
self.int_cleanup_radius = BoundedIntText(
value=7,
min=1,
max=20,
step=1,
description='Cleanup Radius:',
disabled=False
)
self.int_group = BoundedIntText(
value=2,
min=1,
max=10,
step=1,
description='Cleanup group:',
disabled=False
)
self.lbl_path = HTML("")
display(self.lbl_path)
display(HBox([self.int_group, self.int_cleanup_radius, self.btn_process]))
if prefix is not None:
self.set_output(prefix)
if self.last_path is not None:
if filepath in self.last_path and deleted:
self.btn_process.disabled = True
self.last_path = None
return
if not deleted:
self.btn_process.disabled = False
self.last_path = filepath
self.lbl_path.value = f"<div class='peakfile'><b>Latest file: {self.last_path}</b></div>"
def process_peakfile(self, *args, **kwargs):
"""
Starts a thread changing a file
:return:
"""
radius, group = 1, 2
logging.info("Started")
with self.lock:
path = self.last_path
self.btn_process.disabled = True
radius = int(self.int_cleanup_radius.value)
group = int(self.int_group.value)
th = threading.Thread(target=self._process_peakfile, args=[path, radius, group])
th.setDaemon(True)
th.start()
th.join()
def _process_peakfile(self, path, radius, group):
"""
Does real processing - open file, read, write
:param radius:
:return:
"""
ts = time.time()
tmsg = None
if self.last_path is not None and os.path.isfile(self.last_path):
tc = None
with self.lock:
tc = self.ctrl_crysalis.getTabbin(debug_mode=self.DEBUG_MODE)
if tc is not None:
tpath = path
p = re.compile("(.*)\.tabbin", re.I)
m = p.match(path)
if m is not None:
tpath = m.groups()[0]
tc.read_file(path)
rd_t = f"rd t \"{tpath}\""
msg_prefix = f"""File {path} was changed. Operation took {time.time() - ts:6.2f} s<br/>
{rd_t}<br/>"""
pc.copy(rd_t)
tc.mod_list_pixelmultiframe(path, group=group, radius=radius, frame_threshold=7)
self.set_output_cleaning(msg_prefix)
self.add_file(path) |
echobot.py | # -*- coding: utf-8 -*-
import LineAlpha
from LineAlpha.lib.curve.ttypes import *
from datetime import datetime
# from imgurpython import ImgurClient - Digunain kalo make imgur buat upload image
import time,random,sys,json,codecs,threading,glob,os,subprocess,multiprocessing
#login type bisa, token, qr
#bot.login(qr=True)
#bot.login(token="AuthToken")
cl = LineAlpha.LINE()
cl.login(token="AuthToken")
cl.loginResult()
kk = LineAlpha.LINE()
kk.login(token="AuthToken")
kk.loginResult()
ki = LineAlpha.LINE()
ki.login(token="AuthToken")
ki.loginResult()
kc = LineAlpha.LINE()
kc.login(token="AuthToken")
kc.loginResult()
kg = LineAlpha.LINE()
kg.login(token="AuthToken")
kg.loginResult()
#selfbot (akun sendiri) cuman dibutuhin kalo mau auto join kalo di kick
adm = LineAlpha.LINE()
adm.login(token="AuthToken")
adm.loginResult()
#imgur stuff
# client_id = ''
# client_secret = ''
# access_token = ''
# refresh_token = ''
# client = ImgurClient(client_id, client_secret, access_token, refresh_token)
print "login success"
reload(sys)
sys.setdefaultencoding('utf-8')
# album = None
# image_path = 'tmp/tmp.jpg'
# kk=ki=kc=cl
helpMessage ="""[Ardh-] Bot(s) Command list:
Use Prefix 「Ar」 to use the Bot(s)
Prefix is Case sensitive but the commands is not.
[Gid] - Show Group ID
[Mid all] - Show all the Bot(s) MID
[Bot 1/2/3/4/5] - Shows the specific Bot MID
[Bot all] - Show all the Bot(s) Contact
[Bot 1/2/3/4/5] - Shows the specific Bot Contact
[Yid] - Show your ID
[Contact 「mid」] - Give Contact by MID
[Join on/off] - Auto join group
[Leave on/off] - Allows the bot to leave the group
[*] Command in the groups [*]
[Ginfo] - Group Info
[Banlist] - Check Banlist
[Cancel] - Cancel all pending(s) invitation
[Stalk 「ID」] - Upload lastest instagram picture from ID
[*] Admin and Staff Commands [*]
[Absen] - Check if bot is Online
[Glink on/off] - Turn invitation link for group on/off
[Cancel on/off] - Turn auto cancel invite on/off
[Gn 「group name」] - Change Group Name
[Sp/Speed] - Check bot response speed
[Random:「A」] - Randomize group name A times
[Bc 「text」] - Let the bot send a text
[*] Admin only Commands [*]
[Cleanse] - Clear all members in the group
[Bye all] - Bot Leave
[Ban 「@」] - Ban By Tag
[Unban 「@」] - Unban By Tag
[Ban] - By Sharing Contact
[Unban] - By Sharing Contact
[Kill ban] - Kick all banned contact(s)
[Staff add/remove @] - Add or Remove Staff By Tag
"""
KAC=[cl,ki,kk,kc,kg]
mid = cl.getProfile().mid
Amid = kk.getProfile().mid
Bmid = ki.getProfile().mid
Cmid = kc.getProfile().mid
Dmid = kg.getProfile().mid
Bots = [mid,Amid,Bmid,Cmid,Dmid]
#nyalain bot dulu baru ketik "Ar Yid buat ngedapetin MID akun line mu"
admin = ["MID_ADMIN"]
staff = ["MID_ADMIN"]
adminMID = "MID_ADMIN"
wait = {
'contact':True,
'autoJoin':True,
'autoCancel':{"on":True,"members":1},
'leaveRoom':True,
'timeline':True,
'autoAdd':True,
'message':"Thanks for add me",
"lang":"JP",
"comment":"Thanks for add me",
"commentOn":False,
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":True,
"cName":"[Ardh-]BOT1",
"cName2":"[Ardh-]BOT2",
"cName3":"[Ardh-]BOT3",
"cName4":"[Ardh-]BOT4",
"cName5":"[Ardh-]BOT5",
"blacklist":{},
"wblacklist":False,
"dblacklist":False,
"protectionOn":True
}
wait2 = {
'readPoint':{},
'readMember':{},
'setTime':{},
'ROM':{}
}
cancelinvite = {
'autoCancel':True,
'autoCancelUrl':True
}
setTime = {}
setTime = wait2['setTime']
#imgur stuff too
# def upload_tempimage(client):
# '''
# Upload a picture of a kitten. We don't ship one, so get creative!
# '''
# # Here's the metadata for the upload. All of these are optional, including
# # this config dict itself.
# config = {
# 'album': album,
# 'name': 'bot auto upload',
# 'title': 'bot auto upload',
# 'description': 'bot auto upload'
# }
# print("Uploading image... ")
# image = client.upload_from_path(image_path, config=config, anon=False)
# print("Done")
# print()
# return image
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def NOTIFIED_READ_MESSAGE(op):
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n・" + Name
wait2['ROM'][op.param1][op.param2] = "・" + Name
else:
pass
except:
pass
def bot(op):
try:
if op.type == 0:
return
if op.type == 11:
if cancelinvite["autoCancelUrl"] == True:
if cl.getGroup(op.param1).preventJoinByTicket == False:
if op.param2 in Bots:
pass
if op.param2 in admin:
pass
else:
cl.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
cl.reissueGroupTicket(op.param1)
X = cl.getGroup(op.param1)
X.preventJoinByTicket = True
cl.updateGroup(X)
print "Url Opened, Autokick on"
else:
print "random group update"
else:
pass
if op.type == 13:
if mid in op.param3:
if wait["autoJoin"] == True:
cl.acceptGroupInvitation(op.param1)
print "BOT 1 Joined"
else:
print "autoJoin is Off"
if Amid in op.param3:
if wait["autoJoin"] == True:
kk.acceptGroupInvitation(op.param1)
print "BOT 2 Joined"
else:
print "autoJoin is Off"
if Bmid in op.param3:
if wait["autoJoin"] == True:
ki.acceptGroupInvitation(op.param1)
print "BOT 3 Joined"
else:
print "autoJoin is Off"
if Cmid in op.param3:
if wait["autoJoin"] == True:
kc.acceptGroupInvitation(op.param1)
print "BOT 4 Joined"
else:
print "autoJoin is Off"
if Dmid in op.param3:
if wait["autoJoin"] == True:
kg.acceptGroupInvitation(op.param1)
else:
if cancelinvite["autoCancel"] == True:
try:
X = cl.getGroup(op.param1)
gInviMids = [contact.mid for contact in X.invitee]
cl.cancelGroupInvitation(op.param1, gInviMids)
print gInviMids + "invite canceled"
except:
try:
print "Retry canceling invitation"
X = random.choice(KAC).getGroup(op.param1)
gInviMids = [contact.mid for contact in X.invitee]
random.choice(KAC).cancelGroupInvitation(op.param1, gInviMids)
print gInviMids + "invite canceled"
except:
print "Bot can't cancel the invitation"
pass
if op.type == 15:
random.choice(KAC).sendText(op.param1, "Good Bye :)")
print op.param3 + "has left the group"
if op.type == 17:
if op.param3 in wait["blacklist"]:
try:
cl.kickoutFromGroup(op.param1, op.param3)
except:
random.choice(KAC).kickoutFromGroup(op.param1, op.param3)
if op.type == 19:
print "someone was kicked"
if op.param3 in admin:
print "Admin has been kicked"
if op.param2 in Bots:
pass
else:
cl.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
print "kicker kicked"
try:
cl.inviteIntoGroup(op.param1,op.param3)
adm.acceptGroupInvitation(op.param1)
except:
random.choice(KAC).inviteIntoGroup(op.param1,op.param3)
adm.acceptGroupInvitation(op.param1)
print "Admin Joined"
if mid in op.param3:
print "BOT1 has been kicked"
if op.param2 in Bots:
pass
if op.param2 in admin:
pass
else:
cl.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
print "kicker kicked"
try:
kk.inviteIntoGroup(op.param1,op.param3)
cl.acceptGroupInvitation(op.param1)
except:
random.choice(KAC).inviteIntoGroup(op.param1,op.param3)
cl.acceptGroupInvitation(op.param1)
print "BOT1 Joined"
if Amid in op.param3:
print "BOT2 has been kicked"
if op.param2 in Bots:
pass
if op.param2 in admin:
pass
else:
cl.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
print "kicker kicked"
try:
ki.inviteIntoGroup(op.param1,op.param3)
kk.acceptGroupInvitation(op.param1)
except:
random.choice(KAC).inviteIntoGroup(op.param1,op.param3)
kk.acceptGroupInvitation(op.param1)
print "BOT2 Joined"
if Bmid in op.param3:
print "BOT3 has been kicked"
if op.param2 in Bots:
pass
if op.param2 in admin:
pass
else:
cl.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
print "kicker kicked"
try:
kc.inviteIntoGroup(op.param1,op.param3)
ki.acceptGroupInvitation(op.param1)
except:
random.choice(KAC).inviteIntoGroup(op.param1,op.param3)
ki.acceptGroupInvitation(op.param1)
print "BOT3 Joined"
if Cmid in op.param3:
print "BOT4 has been kicked"
if op.param2 in Bots:
pass
if op.param2 in admin:
pass
else:
cl.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
print "kicker kicked"
try:
kg.inviteIntoGroup(op.param1,op.param3)
kc.acceptGroupInvitation(op.param1)
except:
random.choice(KAC).inviteIntoGroup(op.param1,op.param3)
kc.acceptGroupInvitation(op.param1)
print "BOT4 Joined"
if Dmid in op.param3:
print "BOT5 has been kicked"
if op.param2 in Bots:
pass
if op.param2 in admin:
pass
else:
cl.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
print "kicker kicked"
try:
cl.inviteIntoGroup(op.param1,op.param3)
kg.acceptGroupInvitation(op.param1)
except:
random.choice(KAC).inviteIntoGroup(op.param1,op.param3)
kg.acceptGroupInvitation(op.param1)
print "BOT5 Joined"
else:
cl.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
ki.kickoutFromGroup(op.param1,[op.param2])
kc.kickoutFromGroup(op.param1,[op.param2])
kg.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
print "autokick executed"
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
print "BOT(s) Leaving chat Room"
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
print "BOT(s) Leaving chat Room"
if op.type == 26:
msg = op.message
if msg.contentType == 13:
if wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"Already in the Blacklist")
wait["wblacklist"] = False
print "MID Already in the Blacklist"
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"Added to the Blacklist")
print [msg.contentMetadata["mid"]] + " Added to the Blacklist"
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"Deleted from the Blacklist")
wait["dblacklist"] = False
print [msg.contentMetadata["mid"]] + " Removed from the Blacklist"
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"Contact not in Blacklist")
print "MID not in blacklist"
elif wait["contact"] == True:
if msg.from_ in admin:
msg.contentType = 0
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[Display Name]:\n" + msg.contentMetadata["displayName"] + "\n\n[MID]:\n" + msg.contentMetadata["mid"] + "\n\n[Status Message]:\n" + contact.statusMessage + "\n\n[Profile Picture]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\n[Cover Picture]:\n" + str(cu))
print "Contact sent"
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n\n[MID]:\n" + msg.contentMetadata["mid"] + "\n\n[Status Message]:\n" + contact.statusMessage + "\n\n[Profile Picture]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\n[Cover Picture]:\n" + str(cu))
print "Contact sent"
#-----------------------[Help Section]------------------------
elif msg.text in ["Ar /help","Ar /Help"]:
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,helpMessage)
print "[Command]/help executed"
else:
cl.sendText(msg.to,helpt)
#-----------------------[Group Name Section]------------------------
elif "Ar Gn " in msg.text:
if msg.toType == 2:
if msg.from_ in staff:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Ar Gn ","")
random.choice(KAC).updateGroup(X)
print "[Command]Gn executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Staff or higher permission required.")
print "[Error]Command denied - staff or higher permission required"
else:
cl.sendText(msg.to,"It can't be used besides the group.")
print "Gn executed outside group chat"
elif "Ar gn " in msg.text:
if msg.toType == 2:
if msg.from_ in staff:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Ar gn ","")
random.choice(KAC).updateGroup(X)
print "[Command]Gn executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Staff or higher permission required.")
print "[Error]Command denied - staff or higher permission required"
else:
cl.sendText(msg.to,"It can't be used besides the group.")
print "Gn executed outside group chat"
#-----------------------[Kick Section]------------------------
elif "Ar Kick " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Ar Kick ","")
cl.sendText(msg.to,"Good bye.")
random.choice(KAC).kickoutFromGroup(msg.to,[midd])
print "[Command]Kick executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
print "[Error]Command denied - Admin permission required"
elif "Ar kick " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Ar kick ","")
cl.sendText(msg.to,"Good bye.")
random.choice(KAC).kickoutFromGroup(msg.to,[midd])
print "[Command]Kick executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
print "[Error]Command denied - Admin permission required"
elif msg.text in ["Ar Kill ban","Ar kill ban"]:
if msg.toType == 2:
if msg.from_ in admin:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
if matched_list != []:
cl.sendText(msg.to,"Blacklisted contact noticed...")
cl.sendText(msg.to,"Begin Kicking contact")
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
cl.sendText(msg.to,"It looks empty here.")
return
for jj in matched_list:
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
print "[Command]Kill ban executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
print "[Error]Command denied - Admin permission required"
#-----------------------[Send Profile Section]------------------------
elif msg.text in ["Ar Bot all","Ar bot all"]:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
msg.contentMetadata = {'mid': Amid}
kk.sendMessage(msg)
msg.contentMetadata = {'mid': Bmid}
ki.sendMessage(msg)
msg.contentMetadata = {'mid': Cmid}
kc.sendMessage(msg)
msg.contentMetadata = {'mid': Dmid}
kg.sendMessage(msg)
print "[Command]Bot all executed"
elif msg.text in ["Ar Bot 1","Ar bot 1"]:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
print "[Command]Bot 1 executed"
elif msg.text in ["Ar Bot 2","Ar bot 2"]:
msg.contentType = 13
msg.contentMetadata = {'mid': Amid}
kk.sendMessage(msg)
print "[Command]Bot 2 executed"
elif msg.text in ["Ar Bot 3","Ar bot 3"]:
msg.contentType = 13
msg.contentMetadata = {'mid': Bmid}
ki.sendMessage(msg)
print "[Command]Bot 3 executed"
elif msg.text in ["Ar Bot 4","Ar bot 4"]:
msg.contentType = 13
msg.contentMetadata = {'mid': Cmid}
kc.sendMessage(msg)
print "[Command]Bot 4 executed"
elif msg.text in ["Ar Bot 5","Ar bot 5"]:
msg.contentType = 13
msg.contentMetadata = {'mid': Dmid}
kg.sendMessage(msg)
print "[Command]Bot 5 executed"
#-----------------------[Cancel invitation Section]------------------------
elif msg.text in ["cancel","Cancel"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
cl.sendText(msg.to,"Canceling all pending(s) invitation")
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
cl.cancelGroupInvitation(msg.to, gInviMids)
print "[Command]Cancel executed"
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"This group doesn't have any pending invitation")
print "[Command]Group don't have pending invitation"
else:
cl.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
print "Cancel executed outside group chat"
else:
cl.sendText(msg.to,"Not for use less than group")
#-----------------------[Group link Section]------------------------
elif msg.text in ["Ar Glink off","Ar Link off","Ar glink off","Ar link off"]:
if msg.toType == 2:
if msg.from_ in staff:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = True
cl.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invitation link turned off")
print "[Command]Glink off executed"
else:
cl.sendText(msg.to,"Already turned off")
print "[Command]Glink off executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Staff or higher permission required.")
print "[Error]Command denied - staff or higher permission required"
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
print "[Command]Glink off executed outside group chat"
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Ar Glink on","Ar Link on","Ar glink on","Ar link on"]:
if msg.toType == 2:
if msg.from_ in staff:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invitation link turned on")
print "[Command]Glink on executed"
else:
cl.sendText(msg.to,"Already turned on")
print "[Command]Glink on executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Staff or higher permission required.")
print "[Error]Command denied - staff or higher permission required"
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
print "[Command]Glink on executed outside group chat"
else:
cl.sendText(msg.to,"Not for use less than group")
#-----------------------[Group info Section]------------------------
elif msg.text in ["Ar Ginfo","Ar ginfo"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
if ginfo.preventJoinByTicket == True:
u = "close"
else:
u = "open"
random.choice(KAC).sendText(msg.to,"[Group Name]\n" + str(ginfo.name) + "\n\n[Group ID]\n" + msg.to + "\n\n[Group Creator]\n" + gCreator + "\n\n[Group Status]\nGroup Picture:\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus + "\n\nMembers:" + str(len(ginfo.members)) + "\nPending:" + sinvitee)
print "[Command]Ginfo executed"
else:
random.choice(KAC).sendText(msg.to,"[Group Name]\n" + str(ginfo.name) + "\n\n[Group ID]\n" + msg.to + "\n\n[Group Creator]\n" + gCreator + "\n\n[Group Status]\nGroup Picture:\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus)
print "[Command]Ginfo executed"
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
print "[Command]Ginfo executed outside group chat"
else:
cl.sendText(msg.to,"Not for use less than group")
#-----------------------[Bot/User/Group ID Section]------------------------
elif msg.text in ["Ar Gid","Ar gid"]:
cl.sendText(msg.to,msg.to)
print "[Command]Gid executed"
elif msg.text in ["Ar Mid all","Ar mid all"]:
cl.sendText(msg.to,"[Ardh-]Bot(s) ID\n[Ardh-]BOT1\n" + mid + "\n\n[Ardh-]BOT2\n" + Amid + "\n\n[Ardh-]BOT3\n" + Bmid + "\n\n[Ardh-]BOT4\n" + Cmid + "\n\n[Ardh-]BOT5\n" + Dmid)
print "[Command]Mid executed"
elif msg.text in ["Ar Mid 1","Ar mid 1"]:
cl.sendText(msg.to,mid)
print "[Command]Mid 1 executed"
elif msg.text in ["Ar Mid 2","Ar mid 2"]:
kk.sendText(msg.to,Amid)
print "[Command]Mid 2 executed"
elif msg.text in ["Ar Mid 3","Ar mid 3"]:
ki.sendText(msg.to,Bmid)
print "[Command]Mid 3 executed"
elif msg.text in ["Ar Mid 4","Ar mid 4"]:
kc.sendText(msg.to,Cmid)
print "[Command]Mid 4 executed"
elif msg.text in ["Ar Mid 5","Ar mid 5"]:
kc.sendText(msg.to,Dmid)
print "[Command]Mid 5 executed"
elif msg.text in ["Ar Yid","Ar yid"]:
cl.sendText(msg.to,msg.from_)
print "[Command]Yid executed"
#-----------------------[Send Contact Section]------------------------
elif "Ar Contact" in msg.text:
mmid = msg.text.replace("Ar Contact ","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
print "[Command]Contact executed"
elif "Ar contact" in msg.text:
mmid = msg.text.replace("Ar contact ","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
print "[Command]Contact executed"
#-----------------------[Auto Join Section]------------------------
elif msg.text in ["Ar Join on","Ar join on"]:
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Auto join already on")
print "[Command]Join on executed"
else:
cl.sendText(msg.to,"Auto join already on")
print "[Command]Join on executed"
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Auto join turned on")
print "[Command]Join on executed"
else:
cl.sendText(msg.to,"Auto join turned on")
print "Join on executed"
elif msg.text in ["Ar Join off","Ar join off"]:
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Auto join already off")
print "[Command]Join off executed"
else:
cl.sendText(msg.to,"Auto join already off")
print "[Command]Join off executed"
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Auto join turned off")
print "[Command]Join off executed"
else:
cl.sendText(msg.to,"Auto join turned off")
print "[Command]Join off executed"
#-----------------------[Group Url Section]------------------------
elif msg.text in ["Ar Gurl","Ar gurl"]:
if msg.toType == 2:
if msg.from_ in admin:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
cl.updateGroup(x)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"line://ti/g/" + gurl)
print "[Command]Gurl executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
print "[Error]Command denied - Admin permission required"
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
print "[Command]Gurl executed outside group chat"
else:
cl.sendText(msg.to,"Not for use less than group")
#-----------------------[All bots join group Section]------------------------
elif msg.text in ["Ar Join all","Ar join all"]:
if msg.from_ in admin:
try:
ginfo = cl.getGroup(msg.to)
ginfo.preventJoinByTicket = False
cl.updateGroup(ginfo)
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
kk.acceptGroupInvitationByTicket(msg.to,Ticket)
kc.acceptGroupInvitationByTicket(msg.to,Ticket)
kg.acceptGroupInvitationByTicket(msg.to,Ticket)
ginfo = random.choice(KAC).getGroup(msg.to)
ginfo.preventJoinByTicket = True
random.choice(KAC).updateGroup(ginfo)
except:
print "Somethings wrong with the url"
print "[Command]Join all executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
print "[Error]Command denied - Admin permission required"
#-----------------------[Bot(s) Leave Section]------------------------
elif msg.text in ["Ar Bye all","Ar bye all"]:
if msg.toType == 2:
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
try:
cl.leaveGroup(msg.to)
kk.leaveGroup(msg.to)
ki.leaveGroup(msg.to)
kc.leaveGroup(msg.to)
kg.leaveGroup(msg.to)
except:
pass
print "[Command]Bye all executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
print "[Error]Command denied - Admin permission required"
elif msg.text in ["Ar Bye bot 1","Ar bye bot 1"]:
if msg.toType == 2:
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
try:
cl.leaveGroup(msg.to)
except:
pass
print "[Command]Bye bot 1 executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
print "[Error]Command denied - Admin permission required"
elif msg.text in ["Ar Bye bot 2","Ar bye bot 2"]:
if msg.toType == 2:
if msg.from_ in admin:
ginfo = kk.getGroup(msg.to)
try:
kk.leaveGroup(msg.to)
except:
pass
print "[Command]Bye bot 2 executed"
else:
kk.sendText(msg.to,"Command denied.")
kk.sendText(msg.to,"Admin permission required.")
print "[Error]Command denied - Admin permission required"
elif msg.text in ["Ar Bye bot 3","Ar bye bot 3"]:
if msg.toType == 2:
if msg.from_ in admin:
ginfo = ki.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
except:
pass
print "[Command]Bye bot 3 executed"
else:
ki.sendText(msg.to,"Command denied.")
ki.sendText(msg.to,"Admin permission required.")
print "[Error]Command denied - Admin permission required"
elif msg.text in ["Ar Bye bot 4","Ar bye bot 4"]:
if msg.toType == 2:
if msg.from_ in admin:
ginfo = kc.getGroup(msg.to)
try:
kc.leaveGroup(msg.to)
except:
pass
print "[Command]Bye bot 4 executed"
else:
kc.sendText(msg.to,"Command denied.")
kc.sendText(msg.to,"Admin permission required.")
print "[Error]Command denied - Admin permission required"
elif msg.text in ["Ar Bye bot 5","Ar bye bot 5"]:
if msg.toType == 2:
if msg.from_ in admin:
ginfo = kc.getGroup(msg.to)
try:
kg.leaveGroup(msg.to)
except:
pass
print "[Command]Bye bot 5 executed"
else:
kg.sendText(msg.to,"Command denied.")
kg.sendText(msg.to,"Admin permission required.")
print "[Error]Command denied - Admin permission required"
#-----------------------[Cleanse Section (USE AT YOUR OWN RISK!)]------------------------
elif msg.text in ["Ar Cleanse","Ar cleanse"]:
if msg.toType == 2:
if msg.from_ in admin:
print "[Command]Cleanse executing"
_name = msg.text.replace("Cleanse","")
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
kk.sendText(msg.to,"Group cleansing begin")
kc.sendText(msg.to,"Goodbye :)")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
# --------------[Bot and Admin MID]----------------
targets.remove(adminMID)
targets.remove(mid)
targets.remove(Amid)
targets.remove(Bmid)
targets.remove(Cmid)
targets.remove(Dmid)
# --------------[Bot and Admin MID]----------------
if targets == []:
ki.sendText(msg.to,"Not found.")
else:
for target in targets:
try:
klist=[ki,kk,kc,cl,kg]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
ki.sendText(msg.to,"Group cleansed")
print "[Command]Cleanse executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
print "[Error]Command denied - Admin permission required"
#-----------------------[Ban/Unban Section]------------------------
elif "Ar Ban @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
print "[Command]Ban executed"
_name = msg.text.replace("Ar Ban @","")
_nametarget = _name.rstrip(' ')
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = kg.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Added to Blacklist")
except:
ki.sendText(msg.to,"Error")
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif "Ar Unban @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
print "[Command]Unban executed"
_name = msg.text.replace("Ar Unban @","")
_nametarget = _name.rstrip(' ')
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = kg.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Added to Whitelist")
except:
ki.sendText(msg.to,"Added to Whitelist")
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif "Ar ban @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
print "[Command]Ban executed"
_name = msg.text.replace("Ar ban @","")
_nametarget = _name.rstrip(' ')
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = kg.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Added to Blacklist")
except:
ki.sendText(msg.to,"Error")
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif "Ar unban @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
print "[Command]Unban executed"
_name = msg.text.replace("Ar unban @","")
_nametarget = _name.rstrip(' ')
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = kg.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Added to Whitelist")
except:
ki.sendText(msg.to,"Added to Whitelist")
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif msg.text in ["Ar Ban","Ar ban"]:
if msg.from_ in admin:
wait["wblacklist"] = True
cl.sendText(msg.to,"Send Contact to Ban")
print "[Command]Ban executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
print "[Error]Command denied - Admin permission required"
elif msg.text in ["Ar Unban","Ar unban"]:
if msg.from_ in admin:
wait["dblacklist"] = True
cl.sendText(msg.to,"Send Contact to Unban")
print "[Command]Unban executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
print "[Error]Command denied - Admin permission required"
elif msg.text in ["Ar Banlist","Ar banlist"]:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"No user is Blacklisted")
else:
cl.sendText(msg.to,"Blacklisted user(s)")
mc = ""
for mi_d in wait["blacklist"]:
mc += "->" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
print "[Command]Banlist executed"
#-----------------------[Bot Speak Section]------------------------
elif "Ar Bc " in msg.text:
if msg.from_ in staff:
bctxt = msg.text.replace("Ar Bc ","")
random.choice(KAC).sendText(msg.to,(bctxt))
print "[Command]Bc executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Staff or higher permission required.")
print "[Error]Command denied - staff or higher permission required"
elif "Ar bc " in msg.text:
if msg.from_ in staff:
bctxt = msg.text.replace("Ar bc ","")
cl.sendText(msg.to,(bctxt))
print "[Command]Bc executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Staff or higher permission required.")
print "[Error]Command denied - staff or higher permission required"
#-----------------------[Bot speed test Section]------------------------
elif msg.text in ["Ar Sp all","Ar Speed all","Ar sp all","Ar speed all"]:
if msg.from_ in staff:
start = time.time()
cl.sendText(msg.to, "Bot 1 Processing Request")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
start2 = time.time()
kk.sendText(msg.to, "Bot 2 Processing Request")
elapsed_time2 = time.time() - start2
kk.sendText(msg.to, "%sseconds" % (elapsed_time2))
start3 = time.time()
ki.sendText(msg.to, "Bot 3 Processing Request")
elapsed_time3 = time.time() - start3
ki.sendText(msg.to, "%sseconds" % (elapsed_time3))
start4 = time.time()
kc.sendText(msg.to, "Bot 4 Processing Request")
elapsed_time4 = time.time() - start4
kc.sendText(msg.to, "%sseconds" % (elapsed_time4))
start5 = time.time()
kg.sendText(msg.to, "Bot 5 Processing Request")
elapsed_time5 = time.time() - start5
kg.sendText(msg.to, "%sseconds" % (elapsed_time5))
print "[Command]Speed all executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Staff or higher permission required.")
print "[Error]Command denied - staff or higher permission required"
elif msg.text in ["Ar Sp 1","Ar Speed 1","Ar sp 1","Ar speed 1"]:
if msg.from_ in staff:
start = time.time()
cl.sendText(msg.to, "Progress...")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed 1 executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Staff or higher permission required.")
print "[Error]Command denied - staff or higher permission required"
elif msg.text in ["Ar Sp 2","Ar Speed 2","Ar sp 2","Ar speed 2"]:
if msg.from_ in staff:
start = time.time()
kk.sendText(msg.to, "Progress...")
elapsed_time = time.time() - start
kk.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed 2 executed"
else:
kk.sendText(msg.to,"Command denied.")
kk.sendText(msg.to,"Staff or higher permission required.")
print "[Error]Command denied - staff or higher permission required"
elif msg.text in ["Ar Sp 3","Ar Speed 3","Ar sp 3","Ar speed 3"]:
if msg.from_ in staff:
start = time.time()
ki.sendText(msg.to, "Progress...")
elapsed_time = time.time() - start
ki.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed 3 executed"
else:
ki.sendText(msg.to,"Command denied.")
ki.sendText(msg.to,"Staff or higher permission required.")
print "[Error]Command denied - staff or higher permission required"
elif msg.text in ["Ar Sp 4","Ar Speed 4","Ar sp 4","Ar speed 4"]:
if msg.from_ in staff:
start = time.time()
kc.sendText(msg.to, "Progress...")
elapsed_time = time.time() - start
kc.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed 4 executed"
else:
kc.sendText(msg.to,"Command denied.")
kc.sendText(msg.to,"Staff or higher permission required.")
print "[Error]Command denied - staff or higher permission required"
elif msg.text in ["Ar Sp 5","Ar Speed 5","Ar sp 5","Ar speed 5"]:
if msg.from_ in staff:
start = time.time()
kg.sendText(msg.to, "Progress...")
elapsed_time = time.time() - start
kg.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed 5 executed"
else:
kc.sendText(msg.to,"Command denied.")
kc.sendText(msg.to,"Staff or higher permission required.")
print "[Error]Command denied - staff or higher permission required"
#-----------------------[Auto Cancel Section]------------------------
elif "Ar staff add @" in msg.text:
if msg.from_ in admin:
print "[Command]Staff add executing"
_name = msg.text.replace("Ar staff add @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = kg.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
staff.append(target)
cl.sendText(msg.to,"Added to the staff list")
except:
pass
print "[Command]Staff add executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif "Ar Staff add @" in msg.text:
if msg.from_ in admin:
print "[Command]Staff add executing"
_name = msg.text.replace("Ar Staff add @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = kg.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
staff.append(target)
cl.sendText(msg.to,"Added to the staff list")
except:
pass
print "[Command]Staff add executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif "Ar staff remove @" in msg.text:
if msg.from_ in admin:
print "[Command]Staff remove executing"
_name = msg.text.replace("Ar staff remove @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = kg.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
staff.remove(target)
cl.sendText(msg.to,"Removed to the staff list")
except:
pass
print "[Command]Staff remove executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif "Ar Staff remove @" in msg.text:
if msg.from_ in admin:
print "[Command]Staff remove executing"
_name = msg.text.replace("Ar Staff remove @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = kg.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
staff.remove(target)
cl.sendText(msg.to,"Removed to the staff list")
except:
pass
print "[Command]Staff remove executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif msg.text in ["Ar Stafflist","Ar stafflist"]:
if staff == []:
cl.sendText(msg.to,"The stafflist is empty")
else:
cl.sendText(msg.to,"Staff list:")
mc = ""
for mi_d in staff:
mc += "->" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
print "[Command]Stafflist executed"
#-----------------------[Auto cancel Section]------------------------
elif msg.text in ["Ar Cancel off","Ar cancel off"]:
if msg.from_ in staff:
if cancelinvite["autoCancel"] == True:
cancelinvite["autoCancel"] = False
cl.sendText(msg.to, "Auto Cancel turned off")
print "[Command]Cancel off executed"
else:
cl.sendText(msg.to, "Auto Cancel already turned off")
print "[Command]Cancel off executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Staff or higher permission required.")
print "[Error]Command denied - staff or higher permission required"
elif msg.text in ["Ar Cancel on","Ar cancel on"]:
if msg.from_ in staff:
if cancelinvite["autoCancel"] == False:
cancelinvite["autoCancel"] = True
cl.sendText(msg.to, "Auto Cancel turned on")
print "[Command]Cancel on executed"
else:
cl.sendText(msg.to, "Auto Cancel already turned on")
print "[Command]Cancel on executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Staff or higher permission required.")
print "[Error]Command denied - staff or higher permission required"
elif msg.text in ["Ar Url off","Ar url off"]:
if msg.from_ in staff:
if cancelinvite["autoCancelUrl"] == True:
cancelinvite["autoCancelUrl"] = False
cl.sendText(msg.to, "Auto Cancel Url turned off")
print "[Command]Url off executed"
else:
cl.sendText(msg.to, "Auto Cancel already turned off")
print "[Command]Url off executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Staff or higher permission required.")
print "[Error]Command denied - staff or higher permission required"
elif msg.text in ["Ar Url on","Ar url on"]:
if msg.from_ in staff:
if cancelinvite["autoCancelUrl"] == True:
cancelinvite["autoCancelUrl"] = False
cl.sendText(msg.to, "Auto Cancel Url turned off")
print "[Command]Url on executed"
else:
cl.sendText(msg.to, "Auto Cancel already turned off")
print "[Command]Url on executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Staff or higher permission required.")
print "[Error]Command denied - staff or higher permission required"
#-----------------------[Misc Section]-------------------------------------------
elif "Ar random:" in msg.text:
if msg.toType == 2:
if msg.from_ in staff:
strnum = msg.text.replace("Ar random:","")
source_str = 'abcdefghijklmnopqrstuvwxyz1234567890@:;./_][!&%$#)(=~^|'
try:
num = int(strnum)
group = cl.getGroup(msg.to)
for var in range(0,num):
name = "".join([random.choice(source_str) for x in xrange(10)])
time.sleep(0.05)
group.name = name
random.choice(KAC).updateGroup(group)
except:
cl.sendText(msg.to,"Error")
print "[Command]Random executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Staff or higher permission required.")
print "[Error]Command denied - staff or higher permission required"
elif "Ar Random:" in msg.text:
if msg.toType == 2:
if msg.from_ in staff:
strnum = msg.text.replace("Ar Random:","")
source_str = 'abcdefghijklmnopqrstuvwxyz1234567890@:;./_][!&%$#)(=~^|'
try:
num = int(strnum)
group = cl.getGroup(msg.to)
for var in range(0,num):
name = "".join([random.choice(source_str) for x in xrange(10)])
time.sleep(0.01)
group.name = name
cl.updateGroup(group)
except:
cl.sendText(msg.to,"Error")
print "[Command]Random executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Staff or higher permission required.")
print "[Error]Command denied - staff or higher permission required"
elif msg.text in ["Ar Absen","Ar absen"]:
if msg.from_ in staff:
cl.sendText(msg.to, "Hadir")
kk.sendText(msg.to, "Hadir")
ki.sendText(msg.to, "Hadir")
kc.sendText(msg.to, "Hadir")
kg.sendText(msg.to, "Hadir")
print "[Command]Absen executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Staff or higher permission required.")
print "[Error]Command denied - staff or higher permission required"
#VPS STUFF - VPS NEEDED TO RUN THIS COMMAND :)
# elif msg.text in ["Ar Kernel","Ar kernel"]:
# if msg.from_ in admin:
# botKernel = subprocess.Popen(["uname","-svmo"], stdout=subprocess.PIPE).communicate()[0]
# cl.sendText(msg.to, botKernel)
# print "[Command]Kernel executed"
# else:
# cl.sendText(msg.to,"Command denied.")
# cl.sendText(msg.to,"Admin permission required.")
# print "[Error]Command denied - Admin permission required"
# elif "Ar Stalk " in msg.text:
# print "[Command]Stalk executing"
# stalkID = msg.text.replace("Ar Stalk ","")
# subprocess.call(["instaLooter",stalkID,"tmp/","-n","1"])
# files = glob.glob("tmp/*.jpg")
# for file in files:
# os.rename(file,"tmp/tmp.jpg")
# fileTmp = glob.glob("tmp/tmp.jpg")
# if not fileTmp:
# cl.sendText(msg.to, "Image not found, maybe the account haven't post a single picture or the account is private")
# print "[Command]Stalk executed - no image found"
# else:
# image = upload_tempimage(client)
# cl.sendText(msg.to, format(image['link']))
# print "[Command]Stalk executed - success"
# elif "Ar stalk " in msg.text:
# print "[Command]Stalk executing"
# stalkID = msg.text.replace("Ar stalk ","")
# subprocess.call(["instaLooter",stalkID,"tmp/","-n","1"])
# files = glob.glob("tmp/*.jpg")
# for file in files:
# os.rename(file,"tmp/tmp.jpg")
# fileTmp = glob.glob("tmp/tmp.jpg")
# if not fileTmp:
# cl.sendText(msg.to, "Image not found, maybe the account haven't post a single picture or the account is private")
# print "[Command]Stalk executed - no image found"
# else:
# image = upload_tempimage(client)
# cl.sendText(msg.to, format(image['link']))
# subprocess.call(["sudo","rm","-rf","tmp/tmp.jpg"])
# print "[Command]Stalk executed - success"
elif "Ar img" in msg.text:
path = "a.png"
try:
cl.sendImage(msg.to, path)
except:
cl.sendText(msg.to, "Failed to upload image")
else:
if cl.getGroup(msg.to).preventJoinByTicket == False:
cl.reissueGroupTicket(msg.to)
X = cl.getGroup(msg.to)
X.preventJoinByTicket = True
random.choice(KAC).updateGroup(X)
else:
if msg.from_ in Bots:
pass
else:
print "No Action"
if op.type == 59:
print op
except Exception as error:
print error
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def nameUpdate():
while True:
try:
#while a2():
#pass
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"]
cl.updateProfile(profile)
profile2 = kk.getProfile()
profile2.displayName = wait["cName2"]
kk.updateProfile(profile2)
profile3 = ki.getProfile()
profile3.displayName = wait["cName3"]
ki.updateProfile(profile3)
profile4 = kc.getProfile()
profile4.displayName = wait["cName4"]
kc.updateProfile(profile4)
profile5 = kg.getProfile()
profile5.displayName = wait["cName5"]
kg.updateProfile(profile5)
time.sleep(600)
except:
pass
thread2 = threading.Thread(target=nameUpdate)
thread2.daemon = True
thread2.start()
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
|
test_thread_local.py | #! /usr/bin/env python
""" Simple test script for Thread.local
"""
from thread import _local as local
import unittest
from test import test_support
import threading
class ThreadLocalTestCase(unittest.TestCase):
def test_two_locals(self):
'''Ensures that two locals in the same thread have separate dicts.'''
first = local()
first.x = 7
second = local()
second.x = 12
self.assertEquals(7, first.x)
self.assertEquals(12, second.x)
def test_local(self):
mydata = local()
mydata.number = 42
self.assertEqual(mydata.number,42)
self.assertEqual(mydata.__dict__,{'number': 42})
mydata.__dict__.setdefault('widgets', [])
self.assertEqual(mydata.widgets,[])
log=[]
def f():
items = mydata.__dict__.items()
items.sort()
log.append(items)
mydata.number = 11
log.append(mydata.number)
thread = threading.Thread(target=f)
thread.start()
thread.join()
self.assertEqual(log,[[], 11])
self.assertEqual(mydata.number,42)
def test_subclass_local(self):
def f():
items = mydata.__dict__.items()
items.sort()
log.append(items)
mydata.number = 11
log.append(mydata.number)
class MyLocal(local):
number = 2
initialized = False
def __init__(self, **kw):
if self.initialized:
raise SystemError('__init__ called too many times')
self.initialized = True
self.__dict__.update(kw)
def squared(self):
return self.number ** 2
class SubSubLocal(MyLocal):
pass
mydata = MyLocal(color='red')
self.assertEqual(mydata.number,2)
self.assertEqual(mydata.color,'red')
del mydata.color
log=[]
self.assertEqual(mydata.squared(),4)
thread = threading.Thread(target=f)
thread.start()
thread.join()
self.assertEqual(log,[[('color', 'red'), ('initialized', True)], 11])
self.assertEqual(mydata.number,2)
self.assertRaises(TypeError, local, 'any arguments')
SubSubLocal(color='red')
def accessColor():
mydata.color
self.assertRaises(AttributeError,accessColor)
class MyLocal(local):
__slots__ = 'number'
mydata = MyLocal()
mydata.number = 42
mydata.color = 'red'
thread = threading.Thread(target=f)
thread.start()
thread.join()
self.assertEqual(mydata.number,11)
def test_main():
test_support.run_unittest(ThreadLocalTestCase)
if __name__ == "__main__":
test_main()
|
xxe.py | import socket
import sys
import argparse
import requests
import threading
import time
import hashlib
import os
import sendrequest as req
import utils.logger as logger
import utils.logs as logs
from utils.db import Database_update
dbupdate = Database_update()
# Dummy response
data = b'''\
HTTP/1.1 200 OK\r\n\
Connection: close\r\n\
Content-Type: text/html\r\n\
Content-Length: 6\r\n\
\r\n\
Hello!\
'''
class xxe_scan:
def __init__(self):
self.port = 1111
self.host = socket.gethostbyname(socket.gethostname())
def generate_hash(self):
return hashlib.md5(str(time.time())).hexdigest()
def start_server(self):
self.s = socket.socket()
try:
self.s.bind((self.host, self.port))
logs.logging.info("XXE: Server started.")
return True
except socket.error:
logs.logging.info("XXE: Can't bind to port. Port may be busy or check firewall setting.")
def start_listening(self):
global vulnerable
vulnerable = False
try:
while True:
# Wait for 5 seconds
self.s.listen(5)
self.conn, self.addr = self.s.accept()
self.data = self.conn.recv(1024)
if self.data and unique_id in self.data:
#External DTD is enable. URL is suspecious to XXE
self.conn.sendall(data)
vulnerable = True
self.conn.close()
except socket.error:
print "[-]URL might not be vulnerable to XXE. We reccomend you to check it manually"
self.conn.close()
def fetch_xxe_payload(self):
# Returns xxe payloads in list type
payload_list = []
if os.getcwd().split('/')[-1] == 'API':
path = '../Payloads/xxe.txt'
else:
path = 'Payloads/xxe.txt'
with open(path) as f:
for line in f:
if line:
payload_list.append(line.rstrip())
return payload_list
def send_request(self,url,method,temp_headers,xxe_payloads,scanid=None):
# Test if if server is accepiting XML data
sample_xml = '''<?xml version="1.0" encoding="UTF-8"?><text>hello world</text>'''
xml_request = requests.post(url, headers=temp_headers, data=sample_xml)
if xml_request.status_code == 415:
# Media type not supported.
return
global unique_id
unique_id = self.generate_hash()
host = "http://"+str(self.host)+":"+str(self.port)+"/"+unique_id
for payload in xxe_payloads:
payload = payload.replace("{host}",host)
xxe_request = requests.post(url, headers=temp_headers, data=payload)
time.sleep(10)
if vulnerable is True:
print "[+]{0} is vulnerable to XML External Entity Attack".format(url)
attack_result = { "id" : 14, "scanid" : scanid, "url" : url, "alert": "XML External Entity Attack", "impact": "High", "req_headers": temp_headers, "req_body":payload, "res_headers": xxe_request.headers ,"res_body": xxe_request.text}
dbupdate.insert_record(attack_result)
break
def xxe_test(self,url,method,headers,body,scanid=None):
temp_headers = {}
temp_headers.update(headers)
xxe = xxe_scan()
socketresult = xxe.start_server()
if socketresult is True:
t = threading.Thread(target=xxe.start_listening)
t.daemon = True
t.start()
temp_headers['Content-Type'] = 'text/xml'
xxe_payloads = self.fetch_xxe_payload()
self.send_request(url,method,temp_headers,xxe_payloads,scanid) |
datasets.py | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
Dataloaders and dataset utils
"""
import glob
import hashlib
import json
import os
import random
import shutil
import time
from itertools import repeat
from multiprocessing.pool import Pool, ThreadPool
from pathlib import Path
from threading import Thread
from zipfile import ZipFile
import cv2
import numpy as np
import torch
import torch.nn.functional as F
import yaml
from PIL import ExifTags, Image, ImageOps
from torch.utils.data import DataLoader, Dataset, dataloader, distributed
from tqdm import tqdm
from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective
from utils.general import (LOGGER, check_dataset, check_requirements, check_yaml, clean_str, segments2boxes, xyn2xy,
xywh2xyxy, xywhn2xyxy, xyxy2xywhn)
from utils.torch_utils import torch_distributed_zero_first
# Parameters
HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
IMG_FORMATS = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes
VID_FORMATS = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) # DPP
NUM_THREADS = min(8, os.cpu_count()) # number of multiprocessing threads
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(paths):
# Returns a single hash value of a list of paths (files or dirs)
size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes
h = hashlib.md5(str(size).encode()) # hash sizes
h.update(''.join(paths).encode()) # hash paths
return h.hexdigest() # return hash
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def exif_transpose(image):
"""
Transpose a PIL image accordingly if it has an EXIF Orientation tag.
Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose()
:param image: The image to transpose.
:return: An image.
"""
exif = image.getexif()
orientation = exif.get(0x0112, 1) # default 1
if orientation > 1:
method = {2: Image.FLIP_LEFT_RIGHT,
3: Image.ROTATE_180,
4: Image.FLIP_TOP_BOTTOM,
5: Image.TRANSPOSE,
6: Image.ROTATE_270,
7: Image.TRANSVERSE,
8: Image.ROTATE_90,
}.get(orientation)
if method is not None:
image = image.transpose(method)
del exif[0x0112]
image.info["exif"] = exif.tobytes()
return image
def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0,
rect=False, rank=-1, workers=8, image_weights=False, quad=False, prefix='', shuffle=False):
if rect and shuffle:
LOGGER.warning('WARNING: --rect is incompatible with DataLoader shuffle, setting shuffle=False')
shuffle = False
with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augmentation
hyp=hyp, # hyperparameters
rect=rect, # rectangular batches
cache_images=cache,
single_cls=single_cls,
stride=int(stride),
pad=pad,
image_weights=image_weights,
prefix=prefix)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count() // WORLD_SIZE, batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)
loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates
return loader(dataset,
batch_size=batch_size,
shuffle=shuffle and sampler is None,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn), dataset
class InfiniteDataLoader(dataloader.DataLoader):
""" Dataloader that reuses workers
Uses same syntax as vanilla DataLoader
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler:
""" Sampler that repeats forever
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages:
# YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`
def __init__(self, path, img_size=640, stride=32, auto=True):
p = str(Path(path).resolve()) # os-agnostic absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception(f'ERROR: {p} does not exist')
# 图片加载,通过.分割文件名和后缀名并按照升序排序
images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]
videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]
# 总共数量
ni, nv = len(images), len(videos)
# 输入到模型中的图片大小,普遍都是640*640,但是目前yolo也支持1280的(我电脑带不动)
self.img_size = img_size
# 这个好像是yolo要求图片尺寸必须能被32整除,我觉得和他的卷积层数有关系
self.stride = stride
self.files = images + videos
self.nf = ni + nv # number of files
# 这个主要是为了给图片和视频做一个特殊标记,因为采用的加载方式不同。
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
self.auto = auto
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, f'No images or videos found in {p}. ' \
f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}'
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: '
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, f'Image Not Found {path}'
s = f'image {self.count}/{self.nf} {path}: '
# Padded resize
# 这行是生成缩放后的图片
img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0]
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
# cap只有在视频的时候才有用
return path, img, img0, self.cap, s
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadImagesMy:
# YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`
def __init__(self,picNumber,k, path, img_size=640, stride=32, auto=True):
self.picNumber=picNumber
self.k=k
p = str(Path(path).resolve()) # os-agnostic absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception(f'ERROR: {p} does not exist')
# 图片加载,通过.分割文件名和后缀名并按照升序排序
images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]
videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]
# 总共数量
ni, nv = len(images), len(videos)
# 输入到模型中的图片大小,普遍都是640*640,但是目前yolo也支持1280的(我电脑带不动)
self.img_size = img_size
# 这个好像是yolo要求图片尺寸必须能被32整除,我觉得和他的卷积层数有关系
self.stride = stride
self.files = images + videos
self.nf = ni + nv # number of files
# 这个主要是为了给图片和视频做一个特殊标记,因为采用的加载方式不同。
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
self.auto = auto
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, f'No images or videos found in {p}. ' \
f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}'
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, f'Image Not Found {path}'
s = f'image {self.k+1}/{self.picNumber} {path}: '
# Padded resize
# 这行是生成缩放后的图片
img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0]
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
# cap只有在视频的时候才有用
return path, img, img0, self.cap, s
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
# YOLOv5 local webcam dataloader, i.e. `python detect.py --source 0`
def __init__(self, pipe='0', img_size=640, stride=32):
self.img_size = img_size
self.stride = stride
self.pipe = eval(pipe) if pipe.isnumeric() else pipe
self.cap = cv2.VideoCapture(self.pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
# Print
assert ret_val, f'Camera Error {self.pipe}'
img_path = 'webcam.jpg'
s = f'webcam {self.count}: '
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride)[0]
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
return img_path, img, img0, None, s
def __len__(self):
return 0
class LoadStreams:
# YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`
def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True):
self.mode = 'stream'
self.img_size = img_size
self.stride = stride
if os.path.isfile(sources):
with open(sources) as f:
sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n
self.sources = [clean_str(x) for x in sources] # clean source names for later
self.auto = auto
for i, s in enumerate(sources): # index, source
# Start thread to read frames from video stream
st = f'{i + 1}/{n}: {s}... '
if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video
check_requirements(('pafy', 'youtube_dl'))
import pafy
s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL
s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam
cap = cv2.VideoCapture(s)
assert cap.isOpened(), f'{st}Failed to open {s}'
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.fps[i] = max(cap.get(cv2.CAP_PROP_FPS) % 100, 0) or 30.0 # 30 FPS fallback
self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback
_, self.imgs[i] = cap.read() # guarantee first frame
self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True)
LOGGER.info(f"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)")
self.threads[i].start()
LOGGER.info('') # newline
# check for common shapes
s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs])
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
LOGGER.warning('WARNING: Stream shapes differ. For optimal performance supply similarly-shaped streams.')
def update(self, i, cap, stream):
# Read stream `i` frames in daemon thread
n, f, read = 0, self.frames[i], 1 # frame number, frame array, inference every 'read' frame
while cap.isOpened() and n < f:
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n % read == 0:
success, im = cap.retrieve()
if success:
self.imgs[i] = im
else:
LOGGER.warning('WARNING: Video stream unresponsive, please check your IP camera connection.')
self.imgs[i] *= 0
cap.open(stream) # re-open stream if signal was lost
time.sleep(1 / self.fps[i]) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img0 = self.imgs.copy()
img = [letterbox(x, self.img_size, stride=self.stride, auto=self.rect and self.auto)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW
img = np.ascontiguousarray(img)
return self.sources, img, img0, None, ''
def __len__(self):
return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years
def img2label_paths(img_paths):
# Define label paths as a function of image paths
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths]
class LoadImagesAndLabels(Dataset):
# YOLOv5 train_loader/val_loader, loads images and labels for training and validation
cache_version = 0.6 # dataset labels *.cache version
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''):
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
self.path = path
self.albumentations = Albumentations() if augment else None
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = Path(p) # os-agnostic
if p.is_dir(): # dir
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
# f = list(p.rglob('*.*')) # pathlib
elif p.is_file(): # file
with open(p) as t:
t = t.read().strip().splitlines()
parent = str(p.parent) + os.sep
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
# f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)
else:
raise Exception(f'{prefix}{p} does not exist')
self.img_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS)
# self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib
assert self.img_files, f'{prefix}No images found'
except Exception as e:
raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {HELP_URL}')
# Check cache
self.label_files = img2label_paths(self.img_files) # labels
cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache')
try:
cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict
assert cache['version'] == self.cache_version # same version
assert cache['hash'] == get_hash(self.label_files + self.img_files) # same hash
except:
cache, exists = self.cache_labels(cache_path, prefix), False # cache
# Display cache
nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total
if exists:
d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results
if cache['msgs']:
LOGGER.info('\n'.join(cache['msgs'])) # display warnings
assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {HELP_URL}'
# Read cache
[cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items
labels, shapes, self.segments = zip(*cache.values())
self.labels = list(labels)
self.shapes = np.array(shapes, dtype=np.float64)
self.img_files = list(cache.keys()) # update
self.label_files = img2label_paths(cache.keys()) # update
n = len(shapes) # number of images
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.batch = bi # batch index of image
self.n = n
self.indices = range(n)
# Update labels
include_class = [] # filter labels to include only these classes (optional)
include_class_array = np.array(include_class).reshape(1, -1)
for i, (label, segment) in enumerate(zip(self.labels, self.segments)):
if include_class:
j = (label[:, 0:1] == include_class_array).any(1)
self.labels[i] = label[j]
if segment:
self.segments[i] = segment[j]
if single_cls: # single-class training, merge all classes into 0
self.labels[i][:, 0] = 0
if segment:
self.segments[i][:, 0] = 0
# Rectangular Training
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs, self.img_npy = [None] * n, [None] * n
if cache_images:
if cache_images == 'disk':
self.im_cache_dir = Path(Path(self.img_files[0]).parent.as_posix() + '_npy')
self.img_npy = [self.im_cache_dir / Path(f).with_suffix('.npy').name for f in self.img_files]
self.im_cache_dir.mkdir(parents=True, exist_ok=True)
gb = 0 # Gigabytes of cached images
self.img_hw0, self.img_hw = [None] * n, [None] * n
results = ThreadPool(NUM_THREADS).imap(lambda x: load_image(*x), zip(repeat(self), range(n)))
pbar = tqdm(enumerate(results), total=n)
for i, x in pbar:
if cache_images == 'disk':
if not self.img_npy[i].exists():
np.save(self.img_npy[i].as_posix(), x[0])
gb += self.img_npy[i].stat().st_size
else:
self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i)
gb += self.imgs[i].nbytes
pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB {cache_images})'
pbar.close()
def cache_labels(self, path=Path('./labels.cache'), prefix=''):
# Cache dataset labels, check images and read shapes
x = {} # dict
nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages
desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..."
with Pool(NUM_THREADS) as pool:
pbar = tqdm(pool.imap(verify_image_label, zip(self.img_files, self.label_files, repeat(prefix))),
desc=desc, total=len(self.img_files))
for im_file, l, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar:
nm += nm_f
nf += nf_f
ne += ne_f
nc += nc_f
if im_file:
x[im_file] = [l, shape, segments]
if msg:
msgs.append(msg)
pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
pbar.close()
if msgs:
LOGGER.info('\n'.join(msgs))
if nf == 0:
LOGGER.warning(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}')
x['hash'] = get_hash(self.label_files + self.img_files)
x['results'] = nf, nm, ne, nc, len(self.img_files)
x['msgs'] = msgs # warnings
x['version'] = self.cache_version # cache version
try:
np.save(path, x) # save cache for next time
path.with_suffix('.cache.npy').rename(path) # remove .npy suffix
LOGGER.info(f'{prefix}New cache created: {path}')
except Exception as e:
LOGGER.warning(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # not writeable
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
index = self.indices[index] # linear, shuffled, or image_weights
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
# MixUp augmentation
if random.random() < hyp['mixup']:
img, labels = mixup(img, labels, *load_mosaic(self, random.randint(0, self.n - 1)))
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
labels = self.labels[index].copy()
if labels.size: # normalized xywh to pixel xyxy format
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
if self.augment:
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
nl = len(labels) # number of labels
if nl:
labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3)
if self.augment:
# Albumentations
img, labels = self.albumentations(img, labels)
nl = len(labels) # update after albumentations
# HSV color-space
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nl:
labels[:, 2] = 1 - labels[:, 2]
# Flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nl:
labels[:, 1] = 1 - labels[:, 1]
# Cutouts
# labels = cutout(img, labels, p=0.5)
labels_out = torch.zeros((nl, 6))
if nl:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
@staticmethod
def collate_fn4(batch):
img, label, path, shapes = zip(*batch) # transposed
n = len(shapes) // 4
img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
ho = torch.tensor([[0.0, 0, 0, 1, 0, 0]])
wo = torch.tensor([[0.0, 0, 1, 0, 0, 0]])
s = torch.tensor([[1, 1, 0.5, 0.5, 0.5, 0.5]]) # scale
for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
i *= 4
if random.random() < 0.5:
im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2.0, mode='bilinear', align_corners=False)[
0].type(img[i].type())
l = label[i]
else:
im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)
l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
img4.append(im)
label4.append(l)
for i, l in enumerate(label4):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4
# Ancillary functions --------------------------------------------------------------------------------------------------
def load_image(self, i):
# loads 1 image from dataset index 'i', returns im, original hw, resized hw
im = self.imgs[i]
if im is None: # not cached in ram
npy = self.img_npy[i]
if npy and npy.exists(): # load npy
im = np.load(npy)
else: # read image
path = self.img_files[i]
im = cv2.imread(path) # BGR
assert im is not None, f'Image Not Found {path}'
h0, w0 = im.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # ratio
if r != 1: # if sizes are not equal
im = cv2.resize(im, (int(w0 * r), int(h0 * r)),
interpolation=cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR)
return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized
else:
return self.imgs[i], self.img_hw0[i], self.img_hw[i] # im, hw_original, hw_resized
def load_mosaic(self, index):
# YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic
labels4, segments4 = [], []
s = self.img_size
yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y
indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
random.shuffle(indices)
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
labels4.append(labels)
segments4.extend(segments)
# Concat/clip labels
labels4 = np.concatenate(labels4, 0)
for x in (labels4[:, 1:], *segments4):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste'])
img4, labels4 = random_perspective(img4, labels4, segments4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def load_mosaic9(self, index):
# YOLOv5 9-mosaic loader. Loads 1 image + 8 random images into a 9-image mosaic
labels9, segments9 = [], []
s = self.img_size
indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices
random.shuffle(indices)
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img9
if i == 0: # center
img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
h0, w0 = h, w
c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
elif i == 1: # top
c = s, s - h, s + w, s
elif i == 2: # top right
c = s + wp, s - h, s + wp + w, s
elif i == 3: # right
c = s + w0, s, s + w0 + w, s + h
elif i == 4: # bottom right
c = s + w0, s + hp, s + w0 + w, s + hp + h
elif i == 5: # bottom
c = s + w0 - w, s + h0, s + w0, s + h0 + h
elif i == 6: # bottom left
c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
elif i == 7: # left
c = s - w, s + h0 - h, s, s + h0
elif i == 8: # top left
c = s - w, s + h0 - hp - h, s, s + h0 - hp
padx, pady = c[:2]
x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coords
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padx, pady) for x in segments]
labels9.append(labels)
segments9.extend(segments)
# Image
img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
hp, wp = h, w # height, width previous
# Offset
yc, xc = (int(random.uniform(0, s)) for _ in self.mosaic_border) # mosaic center x, y
img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
# Concat/clip labels
labels9 = np.concatenate(labels9, 0)
labels9[:, [1, 3]] -= xc
labels9[:, [2, 4]] -= yc
c = np.array([xc, yc]) # centers
segments9 = [x - c for x in segments9]
for x in (labels9[:, 1:], *segments9):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img9, labels9 = replicate(img9, labels9) # replicate
# Augment
img9, labels9 = random_perspective(img9, labels9, segments9,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img9, labels9
def create_folder(path='./new'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
def flatten_recursive(path='../datasets/coco128'):
# Flatten a recursive directory by bringing all files to top level
new_path = Path(path + '_flat')
create_folder(new_path)
for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
shutil.copyfile(file, new_path / Path(file).name)
def extract_boxes(path='../datasets/coco128'): # from utils.datasets import *; extract_boxes()
# Convert detection dataset into classification dataset, with one directory per class
path = Path(path) # images dir
shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing
files = list(path.rglob('*.*'))
n = len(files) # number of files
for im_file in tqdm(files, total=n):
if im_file.suffix[1:] in IMG_FORMATS:
# image
im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
h, w = im.shape[:2]
# labels
lb_file = Path(img2label_paths([str(im_file)])[0])
if Path(lb_file).exists():
with open(lb_file) as f:
lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
for j, x in enumerate(lb):
c = int(x[0]) # class
f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
if not f.parent.is_dir():
f.parent.mkdir(parents=True)
b = x[1:] * [w, h, w, h] # box
# b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.2 + 3 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
def autosplit(path='../datasets/coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False):
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
Usage: from utils.datasets import *; autosplit()
Arguments
path: Path to images directory
weights: Train, val, test weights (list, tuple)
annotated_only: Only use images with an annotated txt file
"""
path = Path(path) # images dir
files = sorted(x for x in path.rglob('*.*') if x.suffix[1:].lower() in IMG_FORMATS) # image files only
n = len(files) # number of files
random.seed(0) # for reproducibility
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
[(path.parent / x).unlink(missing_ok=True) for x in txt] # remove existing
print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only)
for i, img in tqdm(zip(indices, files), total=n):
if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label
with open(path.parent / txt[i], 'a') as f:
f.write('./' + img.relative_to(path.parent).as_posix() + '\n') # add image to txt file
def verify_image_label(args):
# Verify one image-label pair
im_file, lb_file, prefix = args
nm, nf, ne, nc, msg, segments = 0, 0, 0, 0, '', [] # number (missing, found, empty, corrupt), message, segments
try:
# verify images
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'
assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}'
if im.format.lower() in ('jpg', 'jpeg'):
with open(im_file, 'rb') as f:
f.seek(-2, 2)
if f.read() != b'\xff\xd9': # corrupt JPEG
ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100)
msg = f'{prefix}WARNING: {im_file}: corrupt JPEG restored and saved'
# verify labels
if os.path.isfile(lb_file):
nf = 1 # label found
with open(lb_file) as f:
l = [x.split() for x in f.read().strip().splitlines() if len(x)]
if any([len(x) > 8 for x in l]): # is segment
classes = np.array([x[0] for x in l], dtype=np.float32)
segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...)
l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
l = np.array(l, dtype=np.float32)
nl = len(l)
if nl:
assert l.shape[1] == 5, f'labels require 5 columns, {l.shape[1]} columns detected'
assert (l >= 0).all(), f'negative label values {l[l < 0]}'
assert (l[:, 1:] <= 1).all(), f'non-normalized or out of bounds coordinates {l[:, 1:][l[:, 1:] > 1]}'
_, i = np.unique(l, axis=0, return_index=True)
if len(i) < nl: # duplicate row check
l = l[i] # remove duplicates
if segments:
segments = segments[i]
msg = f'{prefix}WARNING: {im_file}: {nl - len(i)} duplicate labels removed'
else:
ne = 1 # label empty
l = np.zeros((0, 5), dtype=np.float32)
else:
nm = 1 # label missing
l = np.zeros((0, 5), dtype=np.float32)
return im_file, l, shape, segments, nm, nf, ne, nc, msg
except Exception as e:
nc = 1
msg = f'{prefix}WARNING: {im_file}: ignoring corrupt image/label: {e}'
return [None, None, None, None, nm, nf, ne, nc, msg]
def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False, profile=False, hub=False):
""" Return dataset statistics dictionary with images and instances counts per split per class
To run in parent directory: export PYTHONPATH="$PWD/yolov5"
Usage1: from utils.datasets import *; dataset_stats('coco128.yaml', autodownload=True)
Usage2: from utils.datasets import *; dataset_stats('../datasets/coco128_with_yaml.zip')
Arguments
path: Path to data.yaml or data.zip (with data.yaml inside data.zip)
autodownload: Attempt to download dataset if not found locally
verbose: Print stats dictionary
"""
def round_labels(labels):
# Update labels to integer class and 6 decimal place floats
return [[int(c), *(round(x, 4) for x in points)] for c, *points in labels]
def unzip(path):
# Unzip data.zip TODO: CONSTRAINT: path/to/abc.zip MUST unzip to 'path/to/abc/'
if str(path).endswith('.zip'): # path is data.zip
assert Path(path).is_file(), f'Error unzipping {path}, file not found'
ZipFile(path).extractall(path=path.parent) # unzip
dir = path.with_suffix('') # dataset directory == zip name
return True, str(dir), next(dir.rglob('*.yaml')) # zipped, data_dir, yaml_path
else: # path is data.yaml
return False, None, path
def hub_ops(f, max_dim=1920):
# HUB ops for 1 image 'f': resize and save at reduced quality in /dataset-hub for web/app viewing
f_new = im_dir / Path(f).name # dataset-hub image filename
try: # use PIL
im = Image.open(f)
r = max_dim / max(im.height, im.width) # ratio
if r < 1.0: # image too large
im = im.resize((int(im.width * r), int(im.height * r)))
im.save(f_new, 'JPEG', quality=75, optimize=True) # save
except Exception as e: # use OpenCV
print(f'WARNING: HUB ops PIL failure {f}: {e}')
im = cv2.imread(f)
im_height, im_width = im.shape[:2]
r = max_dim / max(im_height, im_width) # ratio
if r < 1.0: # image too large
im = cv2.resize(im, (int(im_width * r), int(im_height * r)), interpolation=cv2.INTER_LINEAR)
cv2.imwrite(str(f_new), im)
zipped, data_dir, yaml_path = unzip(Path(path))
with open(check_yaml(yaml_path), errors='ignore') as f:
data = yaml.safe_load(f) # data dict
if zipped:
data['path'] = data_dir # TODO: should this be dir.resolve()?
check_dataset(data, autodownload) # download dataset if missing
hub_dir = Path(data['path'] + ('-hub' if hub else ''))
stats = {'nc': data['nc'], 'names': data['names']} # statistics dictionary
for split in 'train', 'val', 'test':
if data.get(split) is None:
stats[split] = None # i.e. no test set
continue
x = []
dataset = LoadImagesAndLabels(data[split]) # load dataset
for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics'):
x.append(np.bincount(label[:, 0].astype(int), minlength=data['nc']))
x = np.array(x) # shape(128x80)
stats[split] = {'instance_stats': {'total': int(x.sum()), 'per_class': x.sum(0).tolist()},
'image_stats': {'total': dataset.n, 'unlabelled': int(np.all(x == 0, 1).sum()),
'per_class': (x > 0).sum(0).tolist()},
'labels': [{str(Path(k).name): round_labels(v.tolist())} for k, v in
zip(dataset.img_files, dataset.labels)]}
if hub:
im_dir = hub_dir / 'images'
im_dir.mkdir(parents=True, exist_ok=True)
for _ in tqdm(ThreadPool(NUM_THREADS).imap(hub_ops, dataset.img_files), total=dataset.n, desc='HUB Ops'):
pass
# Profile
stats_path = hub_dir / 'stats.json'
if profile:
for _ in range(1):
file = stats_path.with_suffix('.npy')
t1 = time.time()
np.save(file, stats)
t2 = time.time()
x = np.load(file, allow_pickle=True)
print(f'stats.npy times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write')
file = stats_path.with_suffix('.json')
t1 = time.time()
with open(file, 'w') as f:
json.dump(stats, f) # save stats *.json
t2 = time.time()
with open(file) as f:
x = json.load(f) # load hyps dict
print(f'stats.json times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write')
# Save, print and return
if hub:
print(f'Saving {stats_path.resolve()}...')
with open(stats_path, 'w') as f:
json.dump(stats, f) # save stats.json
if verbose:
print(json.dumps(stats, indent=2, sort_keys=False))
return stats
|
locations2tfrecords.py | #!/usr/bin/env python
import multiprocessing
from subprocess import call
try:
import mapnik2 as mapnik
except:
import mapnik
import sys, os, random as rd
import tensorflow as tf, cv2 , pickle, time
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _floats_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def save_data(data, layers, label, lat, lon, num_items, writer):
coordinates = str(lat) + ',' + str(lon)
for item in range(0,num_items + 1):
feature = {
'label': _int64_feature(label),
'coordinates' : _bytes_feature(coordinates),
'lat' : _floats_feature(lat),
'lon' : _floats_feature(lon),
'item' : _int64_feature(item)
}
for layer in layers:
feature[layer] = _bytes_feature(tf.compat.as_bytes(data[(item, layer)]))
# Create an example protocol buffer
example = tf.train.Example(features=tf.train.Features(feature=feature))
# Serialize to string and write on the file
writer.write(example.SerializeToString())
NUM_THREADS = 8
# Set up projections
# spherical mercator (most common target map projection of osm data imported with osm2pgsql)
merc = mapnik.Projection('+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +no_defs +over')
# long/lat in degrees, aka ESPG:4326 and "WGS 84"
longlat = mapnik.Projection('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs')
# can also be constructed as:
#longlat = mapnik.Projection('+init=epsg:4326')
# ensure minimum mapnik version
if not hasattr(mapnik,'mapnik_version') and not mapnik.mapnik_version() >= 600:
raise SystemExit('This script requires Mapnik >=0.6.0)')
class RenderThread:
def __init__(self, q, printLock):
self.q = q
self.maxZoom = 1
self.printLock = printLock
def rendertiles(self, bounds, data, item, label, lat, layer, lon, num_items, projec):
z = 1
imgx = 128 * z
imgy = 128 * z
mapfile = "/map_data/styles/bs_" + layer + ".xml"
m = mapnik.Map(imgx,imgy)
mapnik.load_map(m,mapfile)
# ensure the target map projection is mercator
m.srs = projec.params()
if hasattr(mapnik,'Box2d'):
bbox = mapnik.Box2d(*bounds)
else:
bbox = mapnik.Envelope(*bounds)
transform = mapnik.ProjTransform(longlat,projec)
merc_bbox = transform.forward(bbox)
m.zoom_to_box(merc_bbox)
# render the map to an image
im = mapnik.Image(imgx,imgy)
mapnik.render(m, im)
img = im.tostring('png256')
data[(item, layer)]= img
def loop(self):
self.m = mapnik.Map(128, 128)
# Load style XML
#mapnik.load_map(self.m, self.mapfile, True)
# Obtain <Map> projection
self.prj = mapnik.Projection(self.m.srs)
# Projects between tile pixel co-ordinates and LatLong (EPSG:4326)
#self.tileproj = GoogleProjection(self.maxZoom+1)
while True:
#Fetch a tile from the queue and render it
r = self.q.get()
if (r == None):
self.q.task_done()
break
else:
(name, bounds, data, item, label, lat, layer, lon, num_items, projec) = r
self.rendertiles(bounds, data, item, label, lat, layer, lon, num_items, projec)
self.printLock.acquire()
self.printLock.release()
self.q.task_done()
def render_location(label, layers, location, num_items, num_threads, size, writer):
with multiprocessing.Manager() as manager:
data = manager.dict() # Create a list that can be shared between processes
queue = multiprocessing.JoinableQueue(32)
printLock = multiprocessing.Lock()
renderers = {}
for i in range(num_threads):
renderer = RenderThread( queue, printLock)
render_thread = multiprocessing.Process(target=renderer.loop)
render_thread.start()
#print "Started render thread %s" % render_thread.getName()
renderers[i] = render_thread
lon = float(location[1].split('(')[1].split(')')[0].split()[0])
lat = float(location[1].split('(')[1].split(')')[0].split()[1])
cpoint = [lon, lat]
#---Generate num_tems images from shifting in the range [0,0.8*size] and rotating
for item in range(0 , num_items + 1) :
if item == 0:
shift_lat = 0
shift_lon = 0
teta = 0
else:
shift_lat = 0.8*size*(rd.random()-rd.random())
shift_lon = 0.8*size*(rd.random()-rd.random())
teta = 360*rd.random()
for layer in layers:
new_cpoint = [cpoint[0]+shift_lon, cpoint[1]+shift_lat]
bounds = (new_cpoint[0]-size, new_cpoint[1]+size, new_cpoint[0]+size, new_cpoint[1]-size )
aeqd = mapnik.Projection('+proj=aeqd +ellps=WGS84 +lat_0=90 +lon_0='+str(teta))
t = ("Bristol", bounds, data, item, label, lat, layer, lon, num_items, aeqd)
queue.put(t)
# Signal render threads to exit by sending empty request to queue
for i in range(num_threads):
queue.put(None)
# wait for pending rendering jobs to complete
queue.join()
for i in range(num_threads):
renderers[i].join()
save_data(data, layers, label, lat, lon, num_items, writer)
def render_images(layers, locations, num_items, num_threads, size, writer):
# Check if dir exists
if not os.path.isdir(save_dir):
print("Directory no exists, exit...")
exit()
ntiles = 0
label = 0
total_locations = len(locations)
total_tiles = num_items * total_locations * len(layers)
for location in locations:
start = time.time()
ntiles += num_items * len(layers)
label += 1
print("Rendering location " + str(label) + "/" + str(total_locations))
render_location(label, layers, location, num_items, num_threads, size, writer)
end = time.time()
time_elapsed = end - start
time_to_finish = time_elapsed * (total_locations - label) / 3600
print("Images rendered and saved: " + str(ntiles) + "/" + str(total_tiles))
print("Time to process a location: {} sec. Time remaining {} hours".format(time_elapsed, time_to_finish))
if __name__ == "__main__":
layers = ['complete','amenity', 'barriers','bridge','buildings','landcover','landuse','natural','others','roads','text','water']
#layers = ['complete']
size = 0.0005
road_nodes = "/map_data/road_nodes.pkl"
save_dir = "/images/roads_tfrecords/"
datasets = ["train_layers", "validation_layers", "test_layers"]
process = {"train_layers": True, "validation_layers": True, "test_layers": True}
porcentages = [[0,0.6], [0.6,0.8], [0.8,1]] #in %
num_items = 10 #Total number of images by location (including rotated and shifted)
with open(road_nodes, 'rb') as f:
locations = pickle.load(f)
print("{} Pointes were found".format(len(locations)))
rd.shuffle(locations)
# Divide the dataset and render
for dataset in datasets:
if process[dataset] == True:
print("Creating tfrecords for {} dataset encoding with cv2 and tf".format(dataset))
filename = save_dir + '/' + dataset + '_locations.tfrecords'
dataset_init = int(porcentages[datasets.index(dataset)][0]*len(locations))
dataset_fin = int(porcentages[datasets.index(dataset)][1]*len(locations))
locations_to_render = locations[dataset_init:dataset_fin]
print("Num of locations to render: " + str(len(locations_to_render)))
print("Num of tiles to render: " + str(len(locations_to_render)* len(layers) * num_items))
writer = tf.python_io.TFRecordWriter(filename)
render_images(layers, locations_to_render, num_items, NUM_THREADS, size, writer)
writer.close()
sys.stdout.flush()
else:
print("Nothin to process for the dataset " + dataset)
|
thread_rlock.py | #导入Thread, RLock类
from threading import Thread, RLock
#初始化全局变量
g_sum = 0
sum = 499995000000
def child_thread(rl):
global g_sum
rl.acquire() #获取可重入锁
rl.acquire() #获取可重入锁
for i in range(100000):
g_sum = g_sum + i
rl.release() #释放可重入锁
rl.release() #释放可重入锁
if __name__ == "__main__":
rl = RLock() #创建可重入锁
threads = [Thread(target=child_thread, args=(rl,)) for i in range(100)]
for t in threads:
t.start() #启动所有线程
for t in threads:
t.join() #等待线程结束
print("g_sum should be:%s;g_sum:%s"%(sum,g_sum))
|
trading-bot.py | """
Disclaimer
All investment strategies and investments involve risk of loss.
Nothing contained in this program, scripts, code or repositoy should be
construed as investment advice.Any reference to an investment's past or
potential performance is not, and should not be construed as, a recommendation
or as a guarantee of any specific outcome or profit.
By using this program you accept all liabilities,
and that no claims can be made against the developers,
or others connected with the program.
"""
# use for environment variables
import os
# use if needed to pass args to external modules
import sys
# used to create threads & dynamic loading of modules
import threading
import importlib
# used for directory handling
import glob
# Needed for colorful console output Install with: python3 -m pip install colorama (Mac/Linux) or pip install colorama (PC)
from colorama import init
init()
# needed for the KuCoin API / websockets / Exception handling
from kucoin.client import Market, Trade, User
from requests.exceptions import ReadTimeout, ConnectionError
# used for dates
from datetime import date, datetime, timedelta
import time
import random
# used to repeatedly execute the code
from itertools import count
# used to store trades and sell assets
import json
# Load helper modules
from helpers.parameters import (
parse_args, load_config
)
# Load creds modules
from helpers.handle_creds import (
load_correct_creds, test_api_key
)
# for colourful logging to the console
class txcolors:
BUY = '\033[92m'
WARNING = '\033[93m'
SELL_LOSS = '\033[91m'
SELL_PROFIT = '\033[32m'
DIM = '\033[2m\033[35m'
DEFAULT = '\033[39m'
# tracks profit/loss each session
global session_profit, unrealised_percent, unrealised_percent_delay
session_profit = 0
unrealised_percent = 0
unrealised_percent_delay = 0
global profit_history
try:
profit_history
except NameError:
profit_history = 0 # or some other default value.
# print with timestamps
old_out = sys.stdout
class St_ampe_dOut:
"""Stamped stdout."""
nl = True
def write(self, x):
"""Write function overloaded."""
if x == '\n':
old_out.write(x)
self.nl = True
elif self.nl:
old_out.write(f'{txcolors.DIM}[{str(datetime.now().replace(microsecond=0))}]{txcolors.DEFAULT} {x}')
self.nl = False
else:
old_out.write(x)
def flush(self):
pass
sys.stdout = St_ampe_dOut()
def is_fiat():
# check if we are using a fiat as a base currency
global hsp_head
PAIR_WITH = parsed_config['trading_options']['PAIR_WITH']
#list below is in the order that Binance displays them, apologies for not using ASC order
fiats = ['USDT', 'BUSD', 'AUD', 'BRL', 'EUR', 'GBP', 'RUB', 'TRY', 'TUSD', 'USDC', 'PAX', 'BIDR', 'DAI', 'IDRT', 'UAH', 'NGN', 'VAI', 'BVND']
if PAIR_WITH in fiats:
return True
else:
return False
def decimals():
# set number of decimals for reporting fractions
if is_fiat():
return 2
else:
return 8
def get_price(add_to_historical=True):
'''Return the current price for all coins on kucoin'''
global historical_prices, hsp_head
initial_price = {}
prices = market.get_all_tickers()['ticker']
for coin in prices:
#need symbolName for Kucoin in case the symbol changes (ex. BCHSV to BSV)
if CUSTOM_LIST:
if any(item + "-" + PAIR_WITH == coin['symbolName'] for item in tickers) and all(item not in coin['symbolName'] for item in FIATS):
initial_price[coin['symbolName']] = { 'price': coin['last'], 'time': datetime.now()}
else:
if PAIR_WITH in coin['symbolName'] and all(item not in coin['symbolName'] for item in FIATS):
initial_price[coin['symbolName']] = { 'price': coin['last'], 'time': datetime.now()}
if add_to_historical:
hsp_head += 1
if hsp_head == RECHECK_INTERVAL:
hsp_head = 0
historical_prices[hsp_head] = initial_price
return initial_price
def wait_for_price():
'''calls the initial price and ensures the correct amount of time has passed
before reading the current price again'''
global historical_prices, hsp_head, volatility_cooloff
volatile_coins = {}
externals = {}
coins_up = 0
coins_down = 0
coins_unchanged = 0
pause_bot()
if historical_prices[hsp_head]['KCS-' + PAIR_WITH]['time'] > datetime.now() - timedelta(minutes=float(TIME_DIFFERENCE / RECHECK_INTERVAL)):
# sleep for exactly the amount of time required
time.sleep((timedelta(minutes=float(TIME_DIFFERENCE / RECHECK_INTERVAL)) - (datetime.now() - historical_prices[hsp_head]['KCS-' + PAIR_WITH]['time'])).total_seconds())
balance_report() # print current profit status
get_price() # retreive latest prices
# calculate the difference in prices
for coin in historical_prices[hsp_head]:
# minimum and maximum prices over time period
min_price = min(historical_prices, key = lambda x: float("inf") if x is None else float(x[coin]['price']))
max_price = max(historical_prices, key = lambda x: -1 if x is None else float(x[coin]['price']))
threshold_check = (-1.0 if min_price[coin]['time'] > max_price[coin]['time'] else 1.0) * (float(max_price[coin]['price']) - float(min_price[coin]['price'])) / float(min_price[coin]['price']) * 100
# each coin with higher gains than our CHANGE_IN_PRICE is added to the volatile_coins dict if less than TRADE_SLOTS is not reached.
if threshold_check > CHANGE_IN_PRICE:
coins_up +=1
if coin not in volatility_cooloff:
volatility_cooloff[coin] = datetime.now() - timedelta(minutes=TIME_DIFFERENCE)
# only include coin as volatile if it hasn't been picked up in the last TIME_DIFFERENCE minutes already
if datetime.now() >= volatility_cooloff[coin] + timedelta(minutes=TIME_DIFFERENCE):
volatility_cooloff[coin] = datetime.now()
if len(coin_orders) + len(volatile_coins) < TRADE_SLOTS or TRADE_SLOTS == 0:
volatile_coins[coin] = round(threshold_check, 3)
print(f"{coin} has gained {volatile_coins[coin]}% within the last {TIME_DIFFERENCE} minutes, calculating {QUANTITY} {PAIR_WITH} value of {coin} for purchase!")
else:
print(f"{txcolors.WARNING}{coin} has gained {round(threshold_check, 3)}% within the last {TIME_DIFFERENCE} minutes, but you are using all available trade slots!{txcolors.DEFAULT}")
elif threshold_check < CHANGE_IN_PRICE:
coins_down +=1
else:
coins_unchanged +=1
# Disabled until fix
#print(f'Up: {coins_up} Down: {coins_down} Unchanged: {coins_unchanged}')
# Here goes new code for external signalling
externals = external_signals()
exnumber = 0
for excoin in externals:
if (excoin not in volatile_coins) and (not excoin == order['symbol'] for order in coin_orders) and (len(coin_orders)+ len(volatile_coins) + exnumber) < TRADE_SLOTS:
volatile_coins[excoin] = 1
exnumber +=1
print(f'External signal received on {excoin}, calculating {QUANTITY} {PAIR_WITH} value of {excoin} for purchase!')
return volatile_coins, len(volatile_coins), historical_prices[hsp_head]
def external_signals():
external_list = {}
signals = {}
# check directory and load pairs from files into external_list
signals = glob.glob("signals/*.exs")
for filename in signals:
for line in open(filename):
symbol = line.strip()
external_list[symbol] = symbol
try:
os.remove(filename)
except:
if DEBUG: print(f'{txcolors.WARNING}Could not remove external signalling file{txcolors.DEFAULT}')
return external_list
def balance_report():
global profit_history, unrealised_percent
INVESTMENT_TOTAL = (QUANTITY * TRADE_SLOTS)
CURRENT_EXPOSURE = (QUANTITY * len(coin_orders))
TOTAL_GAINS = ((QUANTITY * session_profit) / 100)
NEW_BALANCE = (INVESTMENT_TOTAL + TOTAL_GAINS)
INVESTMENT_GAIN = (TOTAL_GAINS / INVESTMENT_TOTAL) * 100
PROFIT_HISTORY = profit_history
# truncating some of the above values to the correct decimal places before printing
INVESTMENT_TOTAL = round(INVESTMENT_TOTAL, decimals())
CURRENT_EXPOSURE = round(CURRENT_EXPOSURE, decimals())
if len(coin_orders) > 0:
UNREALISED_PERCENT = unrealised_percent/len(coin_orders)
else:
UNREALISED_PERCENT = 0
print(f'Trade slots: {len(coin_orders)}/{TRADE_SLOTS} ({float(CURRENT_EXPOSURE):g}/{float(INVESTMENT_TOTAL):g}{PAIR_WITH}) | Open trades: {UNREALISED_PERCENT:.2f}% | Closed trades: {session_profit:.2f}% (all time: {PROFIT_HISTORY:.2f}%) | Session profit: {INVESTMENT_GAIN:.2f}% ({TOTAL_GAINS:.{decimals()}f}{PAIR_WITH})')
unrealised_percent_calc()
return
def pause_bot():
'''Pause the script when exeternal indicators detect a bearish trend in the market'''
global bot_paused, session_profit, hsp_head
# start counting for how long the bot's been paused
start_time = time.perf_counter()
while os.path.isfile("signals/paused.exc"):
if bot_paused == False:
print(f'{txcolors.WARNING}Buying paused due to negative market conditions, stop loss and take profit will continue to work...{txcolors.DEFAULT}')
bot_paused = True
# Sell function needs to work even while paused
sell_orders = sell_coins()
remove_from_portfolio(sell_orders)
get_price(True)
# pausing here
if hsp_head == 1: print(f'Paused...Session profit:{session_profit:.2f}% Est:{(QUANTITY * session_profit)/100:.{decimals()}f} {PAIR_WITH}')
time.sleep((TIME_DIFFERENCE * 60) / RECHECK_INTERVAL)
else:
# stop counting the pause time
stop_time = time.perf_counter()
time_elapsed = timedelta(seconds=int(stop_time-start_time))
# resume the bot and ser pause_bot to False
if bot_paused == True:
print(f'{txcolors.WARNING}Resuming buying due to positive market conditions, total sleep time: {time_elapsed}{txcolors.DEFAULT}')
bot_paused = False
return
def convert_volume():
'''Converts the volume given in QUANTITY from USDT to the each coin's volume'''
volatile_coins, number_of_coins, last_price = wait_for_price()
lot_size = {}
volume = {}
for coin in volatile_coins:
# Find the correct step size for each coin
# max accuracy for BTC for example is 6 decimal points
# while XRP is only 1
try:
coin_info = list(filter(lambda x:x["symbol"]==coin,full_symbol_list)) # search the master list for correct coin
step_size = coin_info[0]['baseIncrement']
lot_size[coin] = step_size.index('1') - 1
if lot_size[coin] < 0:
lot_size[coin] = 0
except:
pass
# calculate the volume in coin from QUANTITY in USDT (default)
volume[coin] = float(QUANTITY / float(last_price[coin]['price']))
# define the volume with the correct step size
if coin not in lot_size:
volume[coin] = float('{:.1f}'.format(volume[coin]))
else:
# if lot size has 0 decimal points, make the volume an integer
if lot_size[coin] == 0:
volume[coin] = int(volume[coin])
else:
volume[coin] = float('{:.{}f}'.format(volume[coin], lot_size[coin]))
return volume, last_price
def test_order_id():
"""returns a fake order id by hashing the current time"""
test_order_id_number = random.randint(100000000,999999999)
return test_order_id_number
def buy():
'''Place Buy market orders for each volatile coin found'''
global test_order_id
volume, last_price = convert_volume()
orders = {}
for coin in volume:
# only buy if the there are no active trades on the coin
print(f"{txcolors.BUY}Preparing to buy {volume[coin]} {coin}{txcolors.DEFAULT}")
if TEST_MODE:
orders[coin] = {
'symbol': coin,
'id': test_order_id(),
'createdAt': datetime.now().timestamp()
}
# Log trade
if LOG_TRADES:
write_log(f"Buy : {volume[coin]} {coin} - {last_price[coin]['price']}")
continue
# try to create a real order if the test orders did not raise an exception
try:
buy_limit = trader.create_market_order(
symbol = coin,
side = 'BUY',
size = volume[coin]
)
# error handling here in case position cannot be placed
except Exception as e:
print(e)
# run the else block if the position has been placed and return order info
else:
orders[coin] = trader.get_order_details(buy_limit['orderId'])
# binance sometimes returns an empty list, the code will wait here until binance returns the order
while orders[coin]['dealFunds'] == "0":
print('Kucoin is being slow in returning the order, calling the API again...')
orders[coin] = trader.get_order_details(buy_limit['orderId'])
time.sleep(1)
else:
print('Order returned, saving order to file')
# Log trade
if LOG_TRADES:
write_log(f"Buy : {volume[coin]} {coin} - {get_order_price(buy_limit['orderId'])}")
return orders, last_price, volume
def sell_coins():
'''sell coins that have reached the STOP LOSS or TAKE PROFIT threshold'''
global hsp_head, session_profit, profit_history, coin_order_id
last_price = get_price(False) # don't populate rolling window
sell_orders = {}
for order, order_data in coin_orders.items():
symbol = order_data['symbol']
# define stop loss and take profit
TP = float(order_data['bought_at']) + (float(order_data['bought_at']) * order_data['take_profit']) / 100
SL = float(order_data['bought_at']) + (float(order_data['bought_at']) * order_data['stop_loss']) / 100
LastPrice = float(last_price[symbol]['price'])
sellFee = (order_data['volume'] * LastPrice) * (TRADING_FEE/100)
BuyPrice = float(order_data['bought_at'])
buyFee = (order_data['volume'] * BuyPrice) * (TRADING_FEE/100)
PriceChange = float((LastPrice - BuyPrice) / BuyPrice * 100)
profit = ((LastPrice - BuyPrice) * order_data['volume']) - (buyFee+sellFee) # adjust for trading fee here
profit_percent = profit / (order_data['volume'] * BuyPrice) * 100
# check that the price is above the take profit and readjust SL and TP accordingly if trialing stop loss used
if LastPrice > TP and USE_TRAILING_STOP_LOSS:
# increasing TP by TRAILING_TAKE_PROFIT (essentially next time to readjust SL)
order_data['stop_loss'] = order_data['take_profit'] - TRAILING_STOP_LOSS
order_data['take_profit'] = PriceChange + TRAILING_TAKE_PROFIT
if DEBUG: print(f"{symbol} TP reached, adjusting TP {order_data['take_profit']:.{decimals()}f} and SL {order_data['stop_loss']:.{decimals()}f} accordingly to lock-in profit")
continue
# check that the price is below the stop loss or above take profit (if trailing stop loss not used) and sell if this is the case
if LastPrice < SL or LastPrice > TP and not USE_TRAILING_STOP_LOSS:
print(f"{txcolors.SELL_PROFIT if PriceChange >= 0. else txcolors.SELL_LOSS}TP or SL reached, selling {order_data['volume']} {symbol} - {float(BuyPrice):g} - {float(LastPrice):g} : {profit_percent:.2f}% Est: {profit:.{decimals()}f} {PAIR_WITH}{txcolors.DEFAULT}")
# try to create a real order
try:
if not TEST_MODE:
sell_coins_limit = trader.create_market_order(
symbol = symbol,
side = 'SELL',
size = order_data['volume']
)
# error handling here in case position cannot be placed
except Exception as e:
print(e)
# run the else block if coin has been sold and create a dict for each coin sold
else:
sell_orders[order] = coin_orders[order]
if not TEST_MODE:
# update LastPrice with actual price of order that was executed
LastPrice = float(get_order_price(sell_coins_limit['orderId']))
sellFee = (order_data['volume'] * LastPrice) * (TRADING_FEE/100)
PriceChange = float((LastPrice - BuyPrice) / BuyPrice * 100)
# prevent system from buying this coin for the next TIME_DIFFERENCE minutes
volatility_cooloff[symbol] = datetime.now()
# Log trade
if LOG_TRADES:
profit = ((LastPrice - BuyPrice) * sell_orders[order]['volume']) - (buyFee+sellFee) # adjust for trading fee here
profit_percent = profit / (sell_orders[order]['volume'] * BuyPrice) * 100
write_log(f"Sell: {sell_orders[order]['volume']} {symbol} - {BuyPrice} - {LastPrice} Profit: {profit:.{decimals()}f} {PAIR_WITH} ({profit_percent:.2f}%)")
session_profit = session_profit + profit_percent
profit_history = profit_history + profit_percent
continue
# no action; print once every TIME_DIFFERENCE
if hsp_head == 1:
if len(coin_orders) > 0:
print(f'Holding {symbol} - Price: {BuyPrice}, Now: {LastPrice}, P/L: {txcolors.SELL_PROFIT if profit_percent >= 0. else txcolors.SELL_LOSS}{profit_percent:.2f}% ({profit:.{decimals()}f} {PAIR_WITH}){txcolors.DEFAULT}')
if hsp_head == 1 and len(coin_orders) == 0: print(f'No trade slots are currently in use')
return sell_orders
# return coin_order_id
def update_portfolio(orders, last_price, volume):
'''add every coin bought to our portfolio for tracking/selling later'''
global profit_history
# if DEBUG: print(orders)
for coin in orders:
if TEST_MODE:
price = last_price[coin]['price']
if not TEST_MODE:
price = get_order_price(orders[coin]['id'])
coin_orders[orders[coin]['id']] = {
'symbol': orders[coin]['symbol'],
'orderid': orders[coin]['id'],
'timestamp': orders[coin]['createdAt'],
'bought_at': price,
'volume': volume[coin],
'stop_loss': -STOP_LOSS,
'take_profit': TAKE_PROFIT,
}
# save the coins in a json file in the same directory
with open(coin_orders_file_path, 'w') as file:
json.dump(coin_orders, file, indent=4)
#save session info for through session portability
with open(profit_history_file_path, 'w') as file:
json.dump(profit_history, file, indent=4)
print(f'Order for {orders[coin]["symbol"]} with ID {orders[coin]["id"]} placed and saved to file.')
def remove_from_portfolio(sell_orders):
'''Remove coins sold due to SL or TP from portfolio'''
for order,data in sell_orders.items():
order_id = data['orderid']
for bought_coin, bought_coin_data in coin_orders.items():
if bought_coin_data['orderid'] == order_id:
print(f"Sold {bought_coin_data['symbol']}, removed order ID {order_id} from history.")
coin_orders.pop(bought_coin)
with open(coin_orders_file_path, 'w') as file:
json.dump(coin_orders, file, indent=4)
break
def write_log(logline):
timestamp = datetime.now().strftime("%d/%m %H:%M:%S")
with open(LOG_FILE,'a+') as f:
f.write(timestamp + ' ' + logline + '\n')
def unrealised_percent_calc():
global unrealised_percent_delay, unrealised_percent
if (unrealised_percent_delay > 3):
unrealised_percent = 0
for order, order_data in coin_orders.items():
LastPrice = float(last_price[order_data['symbol']]['price'])
# sell fee below would ofc only apply if transaction was closed at the current LastPrice
sellFee = (order_data['volume'] * LastPrice) * (TRADING_FEE/100)
BuyPrice = float(order_data['bought_at'])
buyFee = (order_data['volume'] * BuyPrice) * (TRADING_FEE/100)
PriceChange = float((LastPrice - BuyPrice) / BuyPrice * 100)
if len(coin_orders) > 0:
unrealised_percent = unrealised_percent + (PriceChange-(sellFee+buyFee))
unrealised_percent_delay = 0
else:
unrealised_percent_delay = unrealised_percent_delay + 1
return unrealised_percent
def get_order_price(orderId):
fills = trader.get_fill_list(orderId=orderId, tradeType='TRADE')['items']
while fills == []: # keep going until kucoin fills
fills = trader.get_fill_list(orderId=orderId, tradeType='TRADE')['items']
time.sleep(0.5)
# get correct decimal places
coin_info = list(filter(lambda x:x["symbol"]==fills[0]['symbol'],full_symbol_list)) # search the master list for correct coin
decimal_length = coin_info[0]['baseIncrement'].index('1') - 1
# weighted_avg = (price * size for each fill) / total order size
weighted_avg = round(sum(float(fill['price']) * float(fill['size']) for fill in fills)
/ sum(float(fill['size']) for fill in fills), decimal_length)
return str(weighted_avg)
if __name__ == '__main__':
# Load arguments then parse settings
args = parse_args()
mymodule = {}
# set to false at Start
global bot_paused
bot_paused = False
DEFAULT_CONFIG_FILE = 'config.yml'
DEFAULT_CREDS_FILE = 'creds.yml'
config_file = args.config if args.config else DEFAULT_CONFIG_FILE
creds_file = args.creds if args.creds else DEFAULT_CREDS_FILE
parsed_config = load_config(config_file)
parsed_creds = load_config(creds_file)
# Default no debugging
DEBUG = False
# Load system vars
TEST_MODE = parsed_config['script_options']['TEST_MODE']
LOG_TRADES = parsed_config['script_options'].get('LOG_TRADES')
LOG_FILE = parsed_config['script_options'].get('LOG_FILE')
DEBUG_SETTING = parsed_config['script_options'].get('DEBUG')
# Load trading vars
PAIR_WITH = parsed_config['trading_options']['PAIR_WITH']
QUANTITY = parsed_config['trading_options']['QUANTITY']
TRADE_SLOTS = parsed_config['trading_options']['TRADE_SLOTS']
FIATS = parsed_config['trading_options']['FIATS']
TIME_DIFFERENCE = parsed_config['trading_options']['TIME_DIFFERENCE']
RECHECK_INTERVAL = parsed_config['trading_options']['RECHECK_INTERVAL']
CHANGE_IN_PRICE = parsed_config['trading_options']['CHANGE_IN_PRICE']
STOP_LOSS = parsed_config['trading_options']['STOP_LOSS']
TAKE_PROFIT = parsed_config['trading_options']['TAKE_PROFIT']
CUSTOM_LIST = parsed_config['trading_options']['CUSTOM_LIST']
TICKERS_LIST = parsed_config['trading_options']['TICKERS_LIST']
USE_TRAILING_STOP_LOSS = parsed_config['trading_options']['USE_TRAILING_STOP_LOSS']
TRAILING_STOP_LOSS = parsed_config['trading_options']['TRAILING_STOP_LOSS']
TRAILING_TAKE_PROFIT = parsed_config['trading_options']['TRAILING_TAKE_PROFIT']
TRADING_FEE = parsed_config['trading_options']['TRADING_FEE']
SIGNALLING_MODULES = parsed_config['trading_options']['SIGNALLING_MODULES']
if DEBUG_SETTING or args.debug:
DEBUG = True
# Load creds for correct environment
key, secret, passphrase = load_correct_creds(parsed_creds)
if DEBUG:
print(f'loaded config below\n{json.dumps(parsed_config, indent=4)}')
print(f'Your credentials have been loaded from {creds_file}')
# Authenticate with the client, Ensure API key is good before continuing
market = Market(url='https://api.kucoin.com')
trader = Trade(key, secret, passphrase, is_sandbox=False, url='')
client = User(key, secret, passphrase, is_sandbox=False, url='')
api_ready, msg = test_api_key(client)
if api_ready is not True:
exit(f'{txcolors.SELL_LOSS}{msg}{txcolors.DEFAULT}')
full_symbol_list = market.get_symbol_list() # get master list of symbols
# Use CUSTOM_LIST symbols if CUSTOM_LIST is set to True
if CUSTOM_LIST: tickers=[line.strip() for line in open(TICKERS_LIST)]
# try to load all the coins bought by the bot if the file exists and is not empty
coin_orders = {}
# path to the saved coin_orders file
coin_orders_file_path = 'coin_orders.json'
profit_history_file_path = 'profit_history.json'
# use separate files for testing and live trading
if TEST_MODE:
coin_orders_file_path = 'test_' + coin_orders_file_path
profit_history_file_path = 'test_' + profit_history_file_path
LOG_FILE = 'test_' + LOG_FILE
# profit_history is calculated in %, apparently: "this is inaccurate if QUANTITY is not the same!"
if os.path.isfile(profit_history_file_path) and os.stat(profit_history_file_path).st_size!= 0:
json_file=open(profit_history_file_path)
profit_history=json.load(json_file)
json_file.close()
# rolling window of prices; cyclical queue
historical_prices = [None] * (TIME_DIFFERENCE * RECHECK_INTERVAL)
hsp_head = -1
# prevent including a coin in volatile_coins if it has already appeared there less than TIME_DIFFERENCE minutes ago
volatility_cooloff = {}
# if saved coin_orders json file exists and it's not empty then load it
if os.path.isfile(coin_orders_file_path) and os.stat(coin_orders_file_path).st_size!= 0:
with open(coin_orders_file_path) as file:
coin_orders = json.load(file)
print('Press Ctrl-Q to stop the script')
if not TEST_MODE:
if not args.notimeout: # if notimeout skip this (fast for dev tests)
print('WARNING: test mode is disabled in the configuration, you are using live funds.')
print('WARNING: Waiting 30 seconds before live trading as a security measure!')
time.sleep(10)
signals = glob.glob("signals/*.exs")
for filename in signals:
for line in open(filename):
try:
os.remove(filename)
except:
if DEBUG: print(f'{txcolors.WARNING}Could not remove external signalling file {filename}{txcolors.DEFAULT}')
if os.path.isfile("signals/paused.exc"):
try:
os.remove("signals/paused.exc")
except:
if DEBUG: print(f'{txcolors.WARNING}Could not remove external signalling file {filename}{txcolors.DEFAULT}')
# load signalling modules
try:
if len(SIGNALLING_MODULES) > 0:
for module in SIGNALLING_MODULES:
print(f'Starting {module}')
mymodule[module] = importlib.import_module(module)
t = threading.Thread(target=mymodule[module].do_work, args=())
t.daemon = True
t.start()
time.sleep(2)
else:
print(f'No modules to load {SIGNALLING_MODULES}')
except Exception as e:
print(e)
# seed initial prices
get_price()
ERROR_COUNT = 0
while True:
try:
orders, last_price, volume = buy()
update_portfolio(orders, last_price, volume)
sell_orders = sell_coins()
remove_from_portfolio(sell_orders)
except (ReadTimeout, ConnectionError, ConnectionResetError) as e:
print(f'{txcolors.WARNING}KuCoin timeout error. Trying again. Current Count: {ERROR_COUNT}\n{e}{txcolors.DEFAULT}')
time.sleep(1) |
nicolive.py | import json
import logging
import re
import threading
import time
from urllib.parse import unquote_plus, urlparse
import websocket
from streamlink.plugin import Plugin, PluginArgument, PluginArguments
from streamlink.plugin.api import useragents
from streamlink.stream import HLSStream
from streamlink.utils.times import hours_minutes_seconds
from streamlink.utils.url import update_qsd
_log = logging.getLogger(__name__)
_url_re = re.compile(
r"^https?://(?P<domain>live[0-9]*\.nicovideo\.jp)/watch/lv[0-9]*")
_login_url = "https://account.nicovideo.jp/login/redirector"
_login_url_params = {
"show_button_twitter": 1,
"show_button_facebook": 1,
"next_url": "/"}
class NicoLive(Plugin):
arguments = PluginArguments(
PluginArgument(
"email",
argument_name="niconico-email",
sensitive=True,
metavar="EMAIL",
help="The email or phone number associated with your "
"Niconico account"),
PluginArgument(
"password",
argument_name="niconico-password",
sensitive=True,
metavar="PASSWORD",
help="The password of your Niconico account"),
PluginArgument(
"user-session",
argument_name="niconico-user-session",
sensitive=True,
metavar="VALUE",
help="Value of the user-session token \n(can be used in "
"case you do not want to put your password here)"),
PluginArgument(
"purge-credentials",
argument_name="niconico-purge-credentials",
action="store_true",
help="""
Purge cached Niconico credentials to initiate a new session
and reauthenticate.
"""),
PluginArgument(
"timeshift-offset",
type=hours_minutes_seconds,
argument_name="niconico-timeshift-offset",
metavar="[HH:]MM:SS",
default=None,
help="Amount of time to skip from the beginning of a stream. "
"Default is 00:00:00."))
is_stream_ready = False
is_stream_ended = False
watching_interval = 30
watching_interval_worker_thread = None
stream_reader = None
_ws = None
frontend_id = None
@classmethod
def can_handle_url(cls, url):
return _url_re.match(url) is not None
def _get_streams(self):
if self.options.get("purge_credentials"):
self.clear_cookies()
_log.info("All credentials were successfully removed")
self.url = self.url.split("?")[0]
self.session.http.headers.update({
"User-Agent": useragents.CHROME,
})
self.niconico_web_login()
if not self.get_wss_api_url():
_log.error(
"Failed to get wss_api_url. "
"Please check if the URL is correct, "
"and make sure your account has access to the video.")
return None
self.api_connect(self.wss_api_url)
i = 0
while not self.is_stream_ready:
if i % 10 == 0:
_log.debug("Waiting for permit...")
if i == 600:
_log.error("Waiting for permit timed out.")
return None
if self.is_stream_ended:
return None
time.sleep(0.1)
i += 1
streams = HLSStream.parse_variant_playlist(
self.session, self.hls_stream_url)
nico_streams = {}
for s in streams:
nico_stream = NicoHLSStream(streams[s], self)
nico_streams[s] = nico_stream
return nico_streams
def get_wss_api_url(self):
_log.debug("Getting video page: {0}".format(self.url))
resp = self.session.http.get(self.url)
try:
self.wss_api_url = extract_text(
resp.text, ""webSocketUrl":"", """)
if not self.wss_api_url:
return False
except Exception as e:
_log.debug(e)
_log.debug("Failed to extract wss api url")
return False
try:
self.frontend_id = extract_text(
resp.text, ""frontendId":", ","")
except Exception as e:
_log.debug(e)
_log.warning("Failed to extract frontend id")
self.wss_api_url = "{0}&frontend_id={1}".format(self.wss_api_url, self.frontend_id)
_log.debug("Video page response code: {0}".format(resp.status_code))
_log.trace("Video page response body: {0}".format(resp.text))
_log.debug("Got wss_api_url: {0}".format(self.wss_api_url))
_log.debug("Got frontend_id: {0}".format(self.frontend_id))
return self.wss_api_url.startswith("wss://")
def api_on_open(self):
self.send_playerversion()
require_new_stream = not self.is_stream_ready
self.send_getpermit(require_new_stream=require_new_stream)
def api_on_error(self, ws, error=None):
if error:
_log.warning(error)
_log.warning("wss api disconnected.")
_log.warning("Attempting to reconnect in 5 secs...")
time.sleep(5)
self.api_connect(self.wss_api_url)
def api_connect(self, url):
# Proxy support adapted from the UStreamTV plugin (ustreamtv.py)
proxy_url = self.session.get_option("https-proxy")
if proxy_url is None:
proxy_url = self.session.get_option("http-proxy")
proxy_options = parse_proxy_url(proxy_url)
if proxy_options.get('http_proxy_host'):
_log.debug("Using proxy ({0}://{1}:{2})".format(
proxy_options.get('proxy_type') or "http",
proxy_options.get('http_proxy_host'),
proxy_options.get('http_proxy_port') or 80))
_log.debug("Connecting: {0}".format(url))
self._ws = websocket.WebSocketApp(
url,
header=["User-Agent: {0}".format(useragents.CHROME)],
on_open=self.api_on_open,
on_message=self.handle_api_message,
on_error=self.api_on_error)
self.ws_worker_thread = threading.Thread(
target=self._ws.run_forever,
args=proxy_options)
self.ws_worker_thread.daemon = True
self.ws_worker_thread.start()
def send_message(self, type_, body):
msg = {"type": type_, "body": body}
msg_json = json.dumps(msg)
_log.debug(f"Sending: {msg_json}")
if self._ws and self._ws.sock.connected:
self._ws.send(msg_json)
else:
_log.warning("wss api is not connected.")
def send_no_body_message(self, type_):
msg = {"type": type_}
msg_json = json.dumps(msg)
_log.debug(f"Sending: {msg_json}")
if self._ws and self._ws.sock.connected:
self._ws.send(msg_json)
else:
_log.warning("wss api is not connected.")
def send_custom_message(self, msg):
msg_json = json.dumps(msg)
_log.debug(f"Sending: {msg_json}")
if self._ws and self._ws.sock.connected:
self._ws.send(msg_json)
else:
_log.warning("wss api is not connected.")
def send_playerversion(self):
body = {
"type": "startWatching",
"data": {
"stream": {
"quality": "abr",
"protocol": "hls",
"latency": "high",
"chasePlay": False
},
"room": {
"protocol": "webSocket",
"commentable": True
},
"reconnect": False
}
}
self.send_custom_message(body)
def send_getpermit(self, require_new_stream=True):
body = {
"type": "getAkashic",
"data": {
"chasePlay": False
}
}
self.send_custom_message(body)
def send_watching(self):
body = {
"command": "watching",
"params": [self.broadcast_id, "-1", "0"]
}
self.send_message("watch", body)
def send_pong(self):
self.send_no_body_message("pong")
self.send_no_body_message("keepSeat")
def handle_api_message(self, message):
_log.debug(f"Received: {message}")
message_parsed = json.loads(message)
if message_parsed["type"] == "stream":
data = message_parsed["data"]
self.hls_stream_url = data["uri"]
# load in the offset for timeshift live videos
offset = self.get_option("timeshift-offset")
if offset and 'timeshift' in self.wss_api_url:
self.hls_stream_url = update_qsd(self.hls_stream_url, {"start": offset})
self.is_stream_ready = True
if message_parsed["type"] == "watch":
body = message_parsed["body"]
command = body["command"]
if command == "currentstream":
current_stream = body["currentStream"]
self.hls_stream_url = current_stream["uri"]
self.is_stream_ready = True
elif command == "watchinginterval":
self.watching_interval = int(body["params"][0])
_log.debug("Got watching_interval: {0}".format(
self.watching_interval))
if self.watching_interval_worker_thread is None:
_log.debug("send_watching_scheduler starting.")
self.watching_interval_worker_thread = threading.Thread(
target=self.send_watching_scheduler)
self.watching_interval_worker_thread.daemon = True
self.watching_interval_worker_thread.start()
else:
_log.debug("send_watching_scheduler already running.")
elif command == "disconnect":
_log.info("Websocket API closed.")
_log.info("Stream ended.")
self.is_stream_ended = True
if self.stream_reader is not None:
self.stream_reader.close()
_log.info("Stream reader closed.")
elif message_parsed["type"] == "ping":
self.send_pong()
def send_watching_scheduler(self):
"""
Periodically send "watching" command to the API.
This is necessary to keep the session alive.
"""
while not self.is_stream_ended:
self.send_watching()
time.sleep(self.watching_interval)
def niconico_web_login(self):
user_session = self.get_option("user-session")
email = self.get_option("email")
password = self.get_option("password")
if user_session is not None:
_log.info("User session cookie is provided. Using it.")
self.session.http.cookies.set(
"user_session",
user_session,
path="/",
domain="nicovideo.jp")
self.save_cookies()
return True
elif self.session.http.cookies.get("user_session"):
_log.info("cached session cookie is provided. Using it.")
return True
elif email is not None and password is not None:
_log.info("Email and password are provided. Attemping login.")
payload = {"mail_tel": email, "password": password}
resp = self.session.http.post(_login_url, data=payload,
params=_login_url_params)
_log.debug("Login response code: {0}".format(resp.status_code))
_log.trace("Login response body: {0}".format(resp.text))
_log.debug("Cookies: {0}".format(
self.session.http.cookies.get_dict()))
if self.session.http.cookies.get("user_session") is None:
try:
msg = extract_text(
resp.text, '<p class="notice__text">', "</p>")
except Exception as e:
_log.debug(e)
msg = "unknown reason"
_log.warning("Login failed. {0}".format(msg))
return False
else:
_log.info("Logged in.")
self.save_cookies()
return True
else:
return False
class NicoHLSStream(HLSStream):
def __init__(self, hls_stream, nicolive_plugin):
super().__init__(
hls_stream.session,
force_restart=hls_stream.force_restart,
start_offset=hls_stream.start_offset,
duration=hls_stream.duration,
**hls_stream.args)
# url is already in hls_stream.args
self.nicolive_plugin = nicolive_plugin
def open(self):
reader = super().open()
self.nicolive_plugin.stream_reader = reader
return reader
def extract_text(text, left, right):
"""Extract text from HTML"""
result = re.findall("{0}(.*?){1}".format(left, right), text)
if len(result) != 1:
raise Exception("Failed to extract string. "
"Expected 1, found {0}".format(len(result)))
return result[0]
def parse_proxy_url(purl):
"""Adapted from UStreamTV plugin (ustreamtv.py)"""
proxy_options = {}
if purl:
p = urlparse(purl)
proxy_options['proxy_type'] = p.scheme
proxy_options['http_proxy_host'] = p.hostname
if p.port:
proxy_options['http_proxy_port'] = p.port
if p.username:
proxy_options['http_proxy_auth'] = \
(unquote_plus(p.username), unquote_plus(p.password or ""))
return proxy_options
__plugin__ = NicoLive
|
master_server.py | #!/usr/bin/env python
#
# Copyright 2013 Tanel Alumae
"""
Reads speech data via websocket requests, sends it to Redis, waits for results from Redis and
forwards to client via websocket
"""
import sys
import logging
import json
import codecs
import os.path
import uuid
import time
import threading
import functools
from Queue import Queue
import tornado.ioloop
import tornado.options
import tornado.web
import tornado.websocket
import tornado.gen
import tornado.concurrent
import settings
import common
class Application(tornado.web.Application):
def __init__(self):
settings = dict(
cookie_secret="43oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=",
template_path=os.path.join(os.path.dirname(os.path.dirname(__file__)), "templates"),
static_path=os.path.join(os.path.dirname(os.path.dirname(__file__)), "static"),
xsrf_cookies=False,
autoescape=None,
)
handlers = [
(r"/", MainHandler),
(r"/client/ws/speech", DecoderSocketHandler),
(r"/client/ws/status", StatusSocketHandler),
(r"/client/dynamic/reference", ReferenceHandler),
(r"/client/dynamic/recognize", HttpChunkedRecognizeHandler),
(r"/worker/ws/speech", WorkerSocketHandler),
(r"/client/static/(.*)", tornado.web.StaticFileHandler, {'path': settings["static_path"]}),
]
tornado.web.Application.__init__(self, handlers, **settings)
self.available_workers = set()
self.status_listeners = set()
self.num_requests_processed = 0
def send_status_update_single(self, ws):
status = dict(num_workers_available=len(self.available_workers), num_requests_processed=self.num_requests_processed)
ws.write_message(json.dumps(status))
def send_status_update(self):
for ws in self.status_listeners:
self.send_status_update_single(ws)
def save_reference(self, content_id, content):
refs = {}
try:
with open("reference-content.json") as f:
refs = json.load(f)
except:
pass
refs[content_id] = content
with open("reference-content.json", "w") as f:
json.dump(refs, f, indent=2)
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render("../README.md")
def run_async(func):
@functools.wraps(func)
def async_func(*args, **kwargs):
func_hl = threading.Thread(target=func, args=args, kwargs=kwargs)
func_hl.start()
return func_hl
return async_func
def content_type_to_caps(content_type):
"""
Converts MIME-style raw audio content type specifier to GStreamer CAPS string
"""
default_attributes= {"rate": 16000, "format" : "S16LE", "channels" : 1, "layout" : "interleaved"}
media_type, _, attr_string = content_type.replace(";", ",").partition(",")
if media_type in ["audio/x-raw", "audio/x-raw-int"]:
media_type = "audio/x-raw"
attributes = default_attributes
for (key,_,value) in [p.partition("=") for p in attr_string.split(",")]:
attributes[key.strip()] = value.strip()
return "%s, %s" % (media_type, ", ".join(["%s=%s" % (key, value) for (key,value) in attributes.iteritems()]))
else:
return content_type
@tornado.web.stream_request_body
class HttpChunkedRecognizeHandler(tornado.web.RequestHandler):
"""
Provides a HTTP POST/PUT interface supporting chunked transfer requests, similar to that provided by
http://github.com/alumae/ruby-pocketsphinx-server.
"""
def prepare(self):
self.id = str(uuid.uuid4())
self.final_hyp = ""
self.final_result_queue = Queue()
self.user_id = self.request.headers.get("device-id", "none")
self.content_id = self.request.headers.get("content-id", "none")
logging.info("%s: OPEN: user='%s', content='%s'" % (self.id, self.user_id, self.content_id))
self.worker = None
self.error_status = 0
self.error_message = None
try:
self.worker = self.application.available_workers.pop()
self.application.send_status_update()
logging.info("%s: Using worker %s" % (self.id, self.__str__()))
self.worker.set_client_socket(self)
content_type = self.request.headers.get("Content-Type", None)
if content_type:
content_type = content_type_to_caps(content_type)
logging.info("%s: Using content type: %s" % (self.id, content_type))
self.worker.write_message(json.dumps(dict(id=self.id, content_type=content_type, user_id=self.user_id, content_id=self.content_id)))
except KeyError:
logging.warn("%s: No worker available for client request" % self.id)
self.set_status(503)
self.finish("No workers available")
def data_received(self, chunk):
assert self.worker is not None
logging.debug("%s: Forwarding client message of length %d to worker" % (self.id, len(chunk)))
self.worker.write_message(chunk, binary=True)
def post(self, *args, **kwargs):
self.end_request(args, kwargs)
def put(self, *args, **kwargs):
self.end_request(args, kwargs)
@run_async
def get_final_hyp(self, callback=None):
logging.info("%s: Waiting for final result..." % self.id)
callback(self.final_result_queue.get(block=True))
@tornado.web.asynchronous
@tornado.gen.coroutine
def end_request(self, *args, **kwargs):
logging.info("%s: Handling the end of chunked recognize request" % self.id)
assert self.worker is not None
self.worker.write_message("EOS", binary=True)
logging.info("%s: yielding..." % self.id)
hyp = yield tornado.gen.Task(self.get_final_hyp)
if self.error_status == 0:
logging.info("%s: Final hyp: %s" % (self.id, hyp))
response = {"status" : 0, "id": self.id, "hypotheses": [{"utterance" : hyp}]}
self.write(response)
else:
logging.info("%s: Error (status=%d) processing HTTP request: %s" % (self.id, self.error_status, self.error_message))
response = {"status" : self.error_status, "id": self.id, "message": self.error_message}
self.write(response)
self.application.num_requests_processed += 1
self.application.send_status_update()
self.worker.set_client_socket(None)
self.worker.close()
self.finish()
logging.info("Everything done")
def send_event(self, event):
event_str = str(event)
if len(event_str) > 100:
event_str = event_str[:97] + "..."
logging.info("%s: Receiving event %s from worker" % (self.id, event_str))
if event["status"] == 0 and ("result" in event):
try:
if len(event["result"]["hypotheses"]) > 0 and event["result"]["final"]:
if len(self.final_hyp) > 0:
self.final_hyp += " "
self.final_hyp += event["result"]["hypotheses"][0]["transcript"]
except:
e = sys.exc_info()[0]
logging.warn("Failed to extract hypothesis from recognition result:" + e)
elif event["status"] != 0:
self.error_status = event["status"]
self.error_message = event.get("message", "")
def close(self):
logging.info("%s: Receiving 'close' from worker" % (self.id))
self.final_result_queue.put(self.final_hyp)
class ReferenceHandler(tornado.web.RequestHandler):
def post(self, *args, **kwargs):
content_id = self.request.headers.get("Content-Id")
if content_id:
content = codecs.decode(self.request.body, "utf-8")
user_id = self.request.headers.get("User-Id", "")
self.application.save_reference(content_id, dict(content=content, user_id=user_id, time=time.strftime("%Y-%m-%dT%H:%M:%S")))
logging.info("Received reference text for content %s and user %s" % (content_id, user_id))
self.set_header('Access-Control-Allow-Origin', '*')
else:
self.set_status(400)
self.finish("No Content-Id specified")
def options(self, *args, **kwargs):
self.set_header('Access-Control-Allow-Origin', '*')
self.set_header('Access-Control-Allow-Methods', 'POST, OPTIONS')
self.set_header('Access-Control-Max-Age', 1000)
# note that '*' is not valid for Access-Control-Allow-Headers
self.set_header('Access-Control-Allow-Headers', 'origin, x-csrftoken, content-type, accept, User-Id, Content-Id')
class StatusSocketHandler(tornado.websocket.WebSocketHandler):
# needed for Tornado 4.0
def check_origin(self, origin):
return True
def open(self):
logging.info("New status listener")
self.application.status_listeners.add(self)
self.application.send_status_update_single(self)
def on_close(self):
logging.info("Status listener left")
self.application.status_listeners.remove(self)
class WorkerSocketHandler(tornado.websocket.WebSocketHandler):
def __init__(self, application, request, **kwargs):
tornado.websocket.WebSocketHandler.__init__(self, application, request, **kwargs)
self.client_socket = None
# needed for Tornado 4.0
def check_origin(self, origin):
return True
def open(self):
self.client_socket = None
self.application.available_workers.add(self)
logging.info("New worker available " + self.__str__())
self.application.send_status_update()
def on_close(self):
logging.info("Worker " + self.__str__() + " leaving")
self.application.available_workers.discard(self)
if self.client_socket:
self.client_socket.close()
self.application.send_status_update()
def on_message(self, message):
assert self.client_socket is not None
# print >>sys.stderr,"-------------------------->",message
event = json.loads(message)
self.client_socket.send_event(event)
def set_client_socket(self, client_socket):
self.client_socket = client_socket
class DecoderSocketHandler(tornado.websocket.WebSocketHandler):
# needed for Tornado 4.0
def check_origin(self, origin):
return True
def send_event(self, event):
event["id"] = self.id
event_str = str(event)
if len(event_str) > 200:
event_str = event_str[:197] + "..."
logging.info("%s: Sending event %s to client" % (self.id, event_str))
self.write_message(json.dumps(event))
def open(self):
self.id = str(uuid.uuid4())
logging.info("%s: OPEN" % (self.id))
logging.info("%s: Request arguments: %s" % (self.id, " ".join(["%s=\"%s\"" % (a, self.get_argument(a)) for a in self.request.arguments])))
self.user_id = self.get_argument("user-id", "none", True)
self.content_id = self.get_argument("content-id", "none", True)
self.worker = None
try:
self.worker = self.application.available_workers.pop()
self.application.send_status_update()
logging.info("%s: Using worker %s" % (self.id, self.__str__()))
self.worker.set_client_socket(self)
content_type = self.get_argument("content-type", None, True)
if content_type:
logging.info("%s: Using content type: %s" % (self.id, content_type))
self.worker.write_message(json.dumps(dict(id=self.id, content_type=content_type, user_id=self.user_id, content_id=self.content_id)))
except KeyError:
logging.warn("%s: No worker available for client request" % self.id)
event = dict(status=common.STATUS_NOT_AVAILABLE, message="No decoder available, try again later")
self.send_event(event)
self.close()
def on_connection_close(self):
logging.info("%s: Handling on_connection_close()" % self.id)
self.application.num_requests_processed += 1
self.application.send_status_update()
if self.worker:
try:
self.worker.set_client_socket(None)
logging.info("%s: Closing worker connection" % self.id)
self.worker.close()
except:
pass
def on_message(self, message):
assert self.worker is not None
logging.info("%s: Forwarding client message (%s) of length %d to worker" % (self.id, type(message), len(message)))
if isinstance(message, unicode):
self.worker.write_message(message, binary=False)
else:
self.worker.write_message(message, binary=True)
def main():
logging.basicConfig(level=logging.DEBUG, format="%(levelname)8s %(asctime)s %(message)s ")
logging.debug('Starting up server')
from tornado.options import options
tornado.options.parse_command_line()
app = Application()
app.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
main_window.py | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys, time, threading
import os, json, traceback
import shutil
import weakref
import webbrowser
import csv
from decimal import Decimal
import base64
from functools import partial
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import PyQt5.QtCore as QtCore
from .exception_window import Exception_Hook
from PyQt5.QtWidgets import *
from electrum import keystore, simple_config, ecc
from electrum.bitcoin import COIN, is_address, TYPE_ADDRESS
from electrum import constants
from electrum.plugins import run_hook
from electrum.i18n import _
from electrum.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain, NotEnoughFunds, PrintError,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword,
base_units, base_units_list, base_unit_name_to_decimal_point,
decimal_point_to_base_unit_name, quantize_feerate)
from electrum import Transaction
from electrum import util, bitcoin, commands, coinchooser
from electrum import paymentrequest
from electrum.wallet import Multisig_Wallet, AddTransactionException, CannotBumpFee
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import *
from .installwizard import WIF_HELP_TEXT
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
from electrum.paymentrequest import PR_PAID
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
notify_transactions_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object, wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config
self.setup_exception_hook()
self.network = gui_object.daemon.network
self.fx = gui_object.daemon.fx
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.is_max = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tx_notifications = []
self.tl_windows = []
self.tx_external_keypairs = {}
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', 5)
self.num_zeros = int(config.get('num_zeros',0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.addTab(self.create_history_tab(), QIcon(":icons/tab_history.png"), _('History'))
tabs.addTab(self.send_tab, QIcon(":icons/tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, QIcon(":icons/tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, QIcon(":icons/tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, QIcon(":icons/tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, QIcon(":icons/tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, QIcon(":icons/tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(QIcon(":icons/electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.notify_transactions_signal.connect(self.notify_transactions)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['updated', 'new_transaction', 'status',
'banner', 'verified', 'fee']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
self.connect_slots(gui_object.timer)
self.fetch_alias()
def on_history(self, b):
self.new_fx_history_signal.emit()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide") if show else _("Show")) + " " + tab.tab_description
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self),
self.wallet.basename() if self.wallet else "None")
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
try:
traceback.print_exception(*exc_info)
except OSError:
pass # see #4418; try to at least show popup:
self.show_error(str(exc_info[1]))
def on_network(self, event, *args):
if event == 'updated':
self.need_update.set()
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
elif event == 'new_transaction':
self.tx_notifications.append(args[0])
self.notify_transactions_signal.emit()
elif event in ['status', 'banner', 'verified', 'fee']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
self.history_list.update_item(*args)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
# todo: update only unconfirmed tx
self.history_list.update()
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.wallet = wallet
self.update_recently_visited(wallet.storage.path)
# address used to create a dummy transaction and estimate transaction fee
self.history_list.update()
self.address_list.update()
self.utxo_list.update()
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
self.notify_transactions()
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum Testnet" if constants.net.TESTNET else "Electrum"
title = '%s %s - %s' % (name, self.wallet.electrum_version,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
self.warn_if_watching_only()
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
i = 1
while True:
filename = "wallet_%d" % i
if filename in os.listdir(wallet_folder):
i += 1
else:
break
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Official website"), lambda: webbrowser.open("https://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webbrowser.open("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters()[0]
self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
(_("Version")+" %s" % self.wallet.electrum_version + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/spesmilo/electrum/issues\">https://github.com/spesmilo/electrum/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"))
def notify_transactions(self):
if not self.network or not self.network.is_connected():
return
self.print_error("Notifying GUI")
if len(self.tx_notifications) > 0:
# Combine the transactions if there are at least three
num_txns = len(self.tx_notifications)
if num_txns >= 3:
total_amount = 0
for tx in self.tx_notifications:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if v > 0:
total_amount += v
self.notify(_("{} new transactions received: Total amount received in the new transactions {}")
.format(num_txns, self.format_amount_and_units(total_amount)))
self.tx_notifications = []
else:
for tx in self.tx_notifications:
if tx:
self.tx_notifications.remove(tx)
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if v > 0:
self.notify(_("New transaction received: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum", message, QIcon(":icons/electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def connect_slots(self, sender):
sender.timer_signal.connect(self.timer_actions)
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
return format_fee_satoshis(fee_rate/1000, self.num_zeros) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None or not self.network.is_running():
text = _("Offline")
icon = QIcon(":icons/status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = QIcon(":icons/status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = QIcon(":icons/status_lagging.png")
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = QIcon(":icons/status_connected.png")
else:
icon = QIcon(":icons/status_connected_proxy.png")
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = QIcon(":icons/status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self):
self.history_list.update()
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
from .history_list import HistoryList
self.history_list = l = HistoryList(self)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_history', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Bitcoin address where the payment should be received. Note that each payment request uses a different Bitcoin address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
URI = util.create_URI(addr, amount, message)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req, self.config)
except Exception as e:
traceback.print_exc(file=sys.stderr)
self.show_error(_('Error adding payment request') + ':\n' + str(e))
else:
self.sign_payment_request(addr)
self.save_request_button.setEnabled(False)
finally:
self.request_list.update()
self.address_list.update()
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
addr = self.wallet.get_receiving_address() or ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_URI(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.set_content(addr, amount, message, uri)
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} satoshis are going to be added.')
.format(num_satoshis_added))
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = MyTreeWidget(self, self.from_list_menu, ['',''])
self.from_list.setHeaderHidden(True)
self.from_list.setMaximumHeight(80)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(140)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('Bitcoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
fee_rate = Decimal(fee_rate)
self.feerate_e.setAmount(quantize_feerate(fee_rate / 1000))
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.is_max else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(140)
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if not edit_changed.get_amount():
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(140)
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.'))
QMessageBox.information(self, 'Fee rounding', text)
self.feerounding_icon = QPushButton(QIcon(':icons/info.png'), '')
self.feerounding_icon.setFixedWidth(20)
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
vbox_feecontrol = QVBoxLayout()
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addLayout(vbox_feecontrol, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(text):
self.is_max = False
enable = not bool(text) and not self.amount_e.isReadOnly()
self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
self.is_max = True
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.is_max else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee_estimator = self.get_send_fee_estimator()
outputs = self.payto_e.get_outputs(self.is_max)
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
self.get_coins(), outputs, self.config,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
traceback.print_exc(file=sys.stderr)
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate:
displayed_feerate = quantize_feerate(displayed_feerate)
else:
# fallback to actual fee
displayed_feerate = quantize_feerate(fee / size) if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = round(displayed_feerate * size) if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = quantize_feerate(displayed_fee / size) if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(int(feerounding))
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(abs(feerounding) >= 1)
if self.is_max:
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount() # sat/byte feerate
amount = 0 if amount is None else amount * 1000 # sat/kilobyte feerate
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_send_tab(self):
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.is_max)
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
if not outputs:
self.show_error(_('No outputs'))
return
for _type, addr, amount in outputs:
if addr is None:
self.show_error(_('Bitcoin Address is None'))
return
if _type == TYPE_ADDRESS and not bitcoin.is_address(addr):
self.show_error(_('Invalid Bitcoin Address'))
return
if amount is None:
self.show_error(_('Invalid Amount'))
return
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee_estimator, tx_desc, coins = r
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except NotEnoughFunds:
self.show_message(_("Insufficient funds"))
return
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
amount = tx.output_value() if self.is_max else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = self.config.get('use_rbf', True)
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > confirm_rate * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status, msg = self.network.broadcast_transaction(tx)
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_addresses()[0]
ack_status, ack_msg = pr.send_ack(str(tx), refund_address)
if ack_status:
msg = ack_msg
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except BaseException as e:
self.show_error(_('Invalid bitcoin URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.is_max = False
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_byte())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_addresses', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?").format(addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
if pr is None:
self.show_error('Cannot find payment request in wallet.')
return
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
fn = self.getSaveFileName(_("Save invoice to file"), "*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({'wallet' : self.wallet,
'network' : self.network,
'plugins' : self.gui_object.plugins,
'window': self})
console.updateNamespace({'util' : util, 'bitcoin':bitcoin})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = QIcon(":icons/lock.png") if self.wallet.has_password() else QIcon(":icons/unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum.storage import STO_EV_XPUB_PW
if self.wallet.get_available_storage_encryption_version() == STO_EV_XPUB_PW:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
traceback.print_exc(file=sys.stderr)
self.show_error(str(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
traceback.print_exc(file=sys.stdout)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
#if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
#return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + ' ' + str(key+1)
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
self.gui_object.daemon.stop_wallet(wallet_path)
self.close()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("bitcoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
try:
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
except BaseException as e:
self.show_error((_('Could not decode QR code')+':\n{}').format(e))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
r = self.network.get_transaction(txid)
except BaseException as e:
self.show_message(str(e))
return
tx = transaction.Transaction(r)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
import json
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk():
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text)
f = lambda: button.setEnabled(get_address() is not None and get_pk() is not None)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(f)
address_e.textChanged.connect(f)
address_e.textChanged.connect(on_address)
if not d.exec_():
return
from electrum.wallet import sweep_preparations
try:
self.do_clear()
coins, keypairs = sweep_preparations(get_pk(), self.network)
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(get_address())
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
except BaseException as e:
self.show_message(str(e))
return
self.warn_if_watching_only()
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
bad = []
good = []
for key in str(text).split():
try:
addr = func(key)
good.append(addr)
except BaseException as e:
bad.append(key)
continue
if good:
self.show_message(_("The following addresses were added") + ':\n' + '\n'.join(good))
if bad:
self.show_critical(_("The following inputs could not be imported") + ':\n'+ '\n'.join(bad))
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_address)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_key(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum.i18n import languages
lang_combo.addItems(list(languages.values()))
lang_keys = list(languages.keys())
lang_cur_setting = self.config.get("language", '')
try:
index = lang_keys.index(lang_cur_setting)
except ValueError: # not in list
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
msg = '\n'.join([
_('Time based: fee rate is based on average confirmation time estimates'),
_('Mempool based: fee rate is targeting a depth in the memory pool')
]
)
fee_type_label = HelpLabel(_('Fee estimation') + ':', msg)
fee_type_combo = QComboBox()
fee_type_combo.addItems([_('Static'), _('ETA'), _('Mempool')])
fee_type_combo.setCurrentIndex((2 if self.config.use_mempool_fees() else 1) if self.config.is_dynfee() else 0)
def on_fee_type(x):
self.config.set_key('mempool_fees', x==2)
self.config.set_key('dynamic_fees', x>0)
self.fee_slider.update()
fee_type_combo.currentIndexChanged.connect(on_fee_type)
fee_widgets.append((fee_type_label, fee_type_combo))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_adv_controls.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
use_rbf_cb = QCheckBox(_('Use Replace-By-Fee'))
use_rbf_cb.setChecked(self.config.get('use_rbf', True))
use_rbf_cb.setToolTip(
_('If you check this box, your transactions will be marked as non-final,') + '\n' + \
_('and you will have the possibility, while they are unconfirmed, to replace them with transactions that pay higher fees.') + '\n' + \
_('Note that some merchants do not accept non-final transactions until they are confirmed.'))
def on_use_rbf(x):
self.config.set_key('use_rbf', x == Qt.Checked)
use_rbf_cb.stateChanged.connect(on_use_rbf)
fee_widgets.append((use_rbf_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see https://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = base_units_list
msg = (_('Base unit of your wallet.')
+ '\n1 BTC = 1000 mBTC. 1 mBTC = 1000 bits. 1 bit = 100 sat.\n'
+ _('This setting affects the Send tab, and all balance related fields.'))
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
self.decimal_point = base_unit_name_to_decimal_point(unit_result)
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
colortheme_combo = QComboBox()
colortheme_combo.addItem(_('Light'), 'default')
colortheme_combo.addItem(_('Dark'), 'dark')
index = colortheme_combo.findData(self.config.get('qt_gui_color_theme', 'default'))
colortheme_combo.setCurrentIndex(index)
colortheme_label = QLabel(_('Color theme') + ':')
def on_colortheme(x):
self.config.set_key('qt_gui_color_theme', colortheme_combo.itemData(x), True)
self.need_restart = True
colortheme_combo.currentIndexChanged.connect(on_colortheme)
gui_widgets.append((colortheme_label, colortheme_combo))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
if len(choosers) > 1:
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
def on_outrounding(x):
self.config.set_key('coin_chooser_output_rounding', bool(x))
enable_outrounding = self.config.get('coin_chooser_output_rounding', False)
outrounding_cb = QCheckBox(_('Enable output value rounding'))
outrounding_cb.setToolTip(
_('Set the value of the change output so that it has similar precision to the other outputs.') + '\n' +
_('This might improve your privacy somewhat.') + '\n' +
_('If enabled, at most 100 satoshis might be lost due to this, per transaction.'))
outrounding_cb.setChecked(enable_outrounding)
outrounding_cb.stateChanged.connect(on_outrounding)
tx_widgets.append((outrounding_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
hist_capgains_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_history_capgains_cb():
if not self.fx: return
hist_capgains_checkbox.setChecked(self.fx.get_history_capital_gains_config())
hist_capgains_checkbox.setEnabled(hist_checkbox.isChecked())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_list.refresh_headers()
if self.fx.is_enabled() and checked:
# reset timeout to get historical rates
self.fx.timeout = 0
update_history_capgains_cb()
def on_history_capgains(checked):
if not self.fx: return
self.fx.set_history_capital_gains_config(checked)
self.history_list.refresh_headers()
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_history_capgains_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
hist_capgains_checkbox.stateChanged.connect(on_history_capgains)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show capital gains in history')), hist_capgains_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('Appearance')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.timeout = 0
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
name = descr['__name__']
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
def f(x):
a = max_fee - fee_e.get_amount()
output_amount.setText((self.format_amount(a) + ' ' + self.base_unit()) if a else '')
fee_e.textChanged.connect(f)
fee = self.config.fee_per_kb() * total_size / 1000
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee' + ':')), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * total_size / 1000
fee = min(max_fee, fee)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx):
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(tx.txid())
tx_size = tx.estimated_size()
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('Current fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('New fee' + ':')))
fee_e = BTCAmountEdit(self.get_decimal_point)
fee_e.setAmount(fee * 1.5)
vbox.addWidget(fee_e)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * tx_size / 1000
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee = fee_e.get_amount()
delta = new_fee - fee
if delta < 0:
self.show_error("fee too low")
return
try:
new_tx = self.wallet.bump_fee(tx, delta)
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
def save_transaction_into_wallet(self, tx):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx.txid(), tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_transactions(write=True)
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(":icons/offline_tx.png"), None, _('Success'), msg)
return True
|
Rerequester.py | # Written by Bram Cohen
# modified for multitracker operation by John Hoffman
# see LICENSE.txt for license information
from .. zurllib import urlopen, quote
from urlparse import urlparse, urlunparse
from socket import gethostbyname
from btformats import check_peers
from .. bencode import bdecode
from threading import Thread, Lock
from cStringIO import StringIO
from traceback import print_exc
from socket import error, gethostbyname
from random import shuffle
try:
from hashlib import sha1 as sha
except ImportError:
from sha import sha
from time import time
try:
from os import getpid
except ImportError:
def getpid():
return 1
try:
True
except:
True = 1
False = 0
mapbase64 = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz.-'
keys = {}
basekeydata = str(getpid()) + repr(time()) + 'tracker'
def add_key(tracker):
key = ''
for i in sha(basekeydata+tracker).digest()[-6:]:
key += mapbase64[ord(i) & 0x3F]
keys[tracker] = key
def get_key(tracker):
try:
return "&key="+keys[tracker]
except:
add_key(tracker)
return "&key="+keys[tracker]
class fakeflag:
def __init__(self, state=False):
self.state = state
def wait(self):
pass
def isSet(self):
return self.state
class Rerequester:
def __init__(self, trackerlist, interval, sched, howmany, minpeers,
connect, externalsched, amount_left, up, down,
port, ip, myid, infohash, timeout, errorfunc, excfunc,
maxpeers, doneflag, upratefunc, downratefunc,
unpauseflag = fakeflag(True),
seed_id = '', seededfunc = None, force_rapid_update = False ):
self.excfunc = excfunc
newtrackerlist = []
for tier in trackerlist:
if len(tier)>1:
shuffle(tier)
newtrackerlist += [tier]
self.trackerlist = newtrackerlist
self.lastsuccessful = ''
self.rejectedmessage = 'rejected by tracker - '
self.url = ('?info_hash=%s&peer_id=%s&port=%s' %
(quote(infohash), quote(myid), str(port)))
self.ip = ip
self.interval = interval
self.last = None
self.trackerid = None
self.announce_interval = 30 * 60
self.sched = sched
self.howmany = howmany
self.minpeers = minpeers
self.connect = connect
self.externalsched = externalsched
self.amount_left = amount_left
self.up = up
self.down = down
self.timeout = timeout
self.errorfunc = errorfunc
self.maxpeers = maxpeers
self.doneflag = doneflag
self.upratefunc = upratefunc
self.downratefunc = downratefunc
self.unpauseflag = unpauseflag
if seed_id:
self.url += '&seed_id='+quote(seed_id)
self.seededfunc = seededfunc
if seededfunc:
self.url += '&check_seeded=1'
self.force_rapid_update = force_rapid_update
self.last_failed = True
self.never_succeeded = True
self.errorcodes = {}
self.lock = SuccessLock()
self.special = None
self.stopped = False
def start(self):
self.sched(self.c, self.interval/2)
self.d(0)
def c(self):
if self.stopped:
return
if not self.unpauseflag.isSet() and (
self.howmany() < self.minpeers or self.force_rapid_update ):
self.announce(3, self._c)
else:
self._c()
def _c(self):
self.sched(self.c, self.interval)
def d(self, event = 3):
if self.stopped:
return
if not self.unpauseflag.isSet():
self._d()
return
self.announce(event, self._d)
def _d(self):
if self.never_succeeded:
self.sched(self.d, 60) # retry in 60 seconds
elif self.force_rapid_update:
return
else:
self.sched(self.d, self.announce_interval)
def hit(self, event = 3):
if not self.unpauseflag.isSet() and (
self.howmany() < self.minpeers or self.force_rapid_update ):
self.announce(event)
def announce(self, event = 3, callback = lambda: None, specialurl = None):
if specialurl is not None:
s = self.url+'&uploaded=0&downloaded=0&left=1' # don't add to statistics
if self.howmany() >= self.maxpeers:
s += '&numwant=0'
else:
s += '&no_peer_id=1&compact=1'
self.last_failed = True # force true, so will display an error
self.special = specialurl
self.rerequest(s, callback)
return
else:
s = ('%s&uploaded=%s&downloaded=%s&left=%s' %
(self.url, str(self.up()), str(self.down()),
str(self.amount_left())))
if self.last is not None:
s += '&last=' + quote(str(self.last))
if self.trackerid is not None:
s += '&trackerid=' + quote(str(self.trackerid))
if self.howmany() >= self.maxpeers:
s += '&numwant=0'
else:
s += '&no_peer_id=1&compact=1'
if event != 3:
s += '&event=' + ['started', 'completed', 'stopped'][event]
if event == 2:
self.stopped = True
self.rerequest(s, callback)
def snoop(self, peers, callback = lambda: None): # tracker call support
self.rerequest(self.url
+'&event=stopped&port=0&uploaded=0&downloaded=0&left=1&tracker=1&numwant='
+str(peers), callback)
def rerequest(self, s, callback):
if not self.lock.isfinished(): # still waiting for prior cycle to complete??
def retry(self = self, s = s, callback = callback):
self.rerequest(s, callback)
self.sched(retry,5) # retry in 5 seconds
return
self.lock.reset()
rq = Thread(target = self._rerequest, args = [s, callback])
rq.setDaemon(False)
rq.start()
def _rerequest(self, s, callback):
try:
def fail (self = self, callback = callback):
self._fail(callback)
if self.ip:
try:
s += '&ip=' + gethostbyname(self.ip)
except:
self.errorcodes['troublecode'] = 'unable to resolve: '+self.ip
self.externalsched(fail)
self.errorcodes = {}
if self.special is None:
for t in range(len(self.trackerlist)):
for tr in range(len(self.trackerlist[t])):
tracker = self.trackerlist[t][tr]
if self.rerequest_single(tracker, s, callback):
if not self.last_failed and tr != 0:
del self.trackerlist[t][tr]
self.trackerlist[t] = [tracker] + self.trackerlist[t]
return
else:
tracker = self.special
self.special = None
if self.rerequest_single(tracker, s, callback):
return
# no success from any tracker
self.externalsched(fail)
except:
self.exception(callback)
def _fail(self, callback):
if ( (self.upratefunc() < 100 and self.downratefunc() < 100)
or not self.amount_left() ):
for f in ['rejected', 'bad_data', 'troublecode']:
if self.errorcodes.has_key(f):
r = self.errorcodes[f]
break
else:
r = 'Problem connecting to tracker - unspecified error'
return
self.errorfunc(r)
self.last_failed = True
self.lock.give_up()
self.externalsched(callback)
def rerequest_single(self, t, s, callback):
l = self.lock.set()
rq = Thread(target = self._rerequest_single, args = [t, s+get_key(t), l, callback])
rq.setDaemon(False)
rq.start()
self.lock.wait()
if self.lock.success:
self.lastsuccessful = t
self.last_failed = False
self.never_succeeded = False
return True
if not self.last_failed and self.lastsuccessful == t:
# if the last tracker hit was successful, and you've just tried the tracker
# you'd contacted before, don't go any further, just fail silently.
self.last_failed = True
self.externalsched(callback)
self.lock.give_up()
return True
return False # returns true if it wants rerequest() to exit
def _rerequest_single(self, t, s, l, callback):
try:
closer = [None]
def timedout(self = self, l = l, closer = closer):
if self.lock.trip(l):
self.errorcodes['troublecode'] = 'Problem connecting to tracker - timeout exceeded'
self.lock.unwait(l)
try:
closer[0]()
except:
pass
self.externalsched(timedout, self.timeout)
err = None
try:
h = urlopen(t+s)
closer[0] = h.close
data = h.read()
except (IOError, error), e:
err = 'Problem connecting to tracker - ' + str(e)
except:
err = 'Problem connecting to tracker'
try:
h.close()
except:
pass
if err:
if self.lock.trip(l):
self.errorcodes['troublecode'] = err
self.lock.unwait(l)
return
if data == '':
if self.lock.trip(l):
self.errorcodes['troublecode'] = 'no data from tracker'
self.lock.unwait(l)
return
try:
r = bdecode(data, sloppy=1)
check_peers(r)
except ValueError, e:
if self.lock.trip(l):
self.errorcodes['bad_data'] = 'bad data from tracker - ' + str(e)
self.lock.unwait(l)
return
if r.has_key('failure reason'):
if self.lock.trip(l):
self.errorcodes['rejected'] = self.rejectedmessage + r['failure reason']
self.lock.unwait(l)
return
if self.lock.trip(l, True): # success!
self.lock.unwait(l)
else:
callback = lambda: None # attempt timed out, don't do a callback
# even if the attempt timed out, go ahead and process data
def add(self = self, r = r, callback = callback):
self.postrequest(r, callback)
self.externalsched(add)
except:
self.exception(callback)
def postrequest(self, r, callback):
if r.has_key('warning message'):
self.errorfunc('warning from tracker - ' + r['warning message'])
self.announce_interval = r.get('interval', self.announce_interval)
self.interval = r.get('min interval', self.interval)
self.trackerid = r.get('tracker id', self.trackerid)
self.last = r.get('last')
# ps = len(r['peers']) + self.howmany()
p = r['peers']
peers = []
if type(p) == type(''):
for x in xrange(0, len(p), 6):
ip = '.'.join([str(ord(i)) for i in p[x:x+4]])
port = (ord(p[x+4]) << 8) | ord(p[x+5])
peers.append(((ip, port), 0))
else:
for x in p:
peers.append(((x['ip'].strip(), x['port']), x.get('peer id',0)))
ps = len(peers) + self.howmany()
if ps < self.maxpeers:
if self.doneflag.isSet():
if r.get('num peers', 1000) - r.get('done peers', 0) > ps * 1.2:
self.last = None
else:
if r.get('num peers', 1000) > ps * 1.2:
self.last = None
if self.seededfunc and r.get('seeded'):
self.seededfunc()
elif peers:
shuffle(peers)
self.connect(peers)
callback()
def exception(self, callback):
data = StringIO()
print_exc(file = data)
def r(s = data.getvalue(), callback = callback):
if self.excfunc:
self.excfunc(s)
else:
print s
callback()
self.externalsched(r)
class SuccessLock:
def __init__(self):
self.lock = Lock()
self.pause = Lock()
self.code = 0L
self.success = False
self.finished = True
def reset(self):
self.success = False
self.finished = False
def set(self):
self.lock.acquire()
if not self.pause.locked():
self.pause.acquire()
self.first = True
self.code += 1L
self.lock.release()
return self.code
def trip(self, code, s = False):
self.lock.acquire()
try:
if code == self.code and not self.finished:
r = self.first
self.first = False
if s:
self.finished = True
self.success = True
return r
finally:
self.lock.release()
def give_up(self):
self.lock.acquire()
self.success = False
self.finished = True
self.lock.release()
def wait(self):
self.pause.acquire()
def unwait(self, code):
if code == self.code and self.pause.locked():
self.pause.release()
def isfinished(self):
self.lock.acquire()
x = self.finished
self.lock.release()
return x
|
testSocketRelay.py | from socket import *
import struct
from time import sleep, time
import serial
import subprocess
from threading import Thread
import sys
import os
from deepstream import get
from autonomousCore import *
from leds import writeToBus
global myDriver
global storedPoints
global cmdBuffer
global currentGpsLoc
global ghzConnection
global mhzConnection
global ghzCountdown
global mhzCountdown
global countdown
global requestStop
global toggleSuspend
storedPoints = []
cmdBuffer = []
currentGpsLoc = (0.00, 0.00) # GPS tuple (lat, lon)
countdown = 10
ghzCountdown = 5
mhzCountdown = 5
requestStop = False
toggleSuspend = False
ghzConnection = False
mhzConnection = False
payload_size = 20 #size of payload in bytes 10i (10 x 2byte shorts) for full command, 2b (2 signed bytes) for mobility over mhz connection
# LED Strip colors
ledOff = 6 # off
ghzLed = 1 # green
mhzLed = 3 # purple
drivingMode = 2 # blue
# Autonomous module object
#myDriver = Driver()
# Arduino address and connection
try:
ardConnectData = ('192.168.1.10', 5000)
ardSocket = socket(AF_INET, SOCK_DGRAM)
ardSocket.settimeout(0.5)
except:
print("Arduino init failed...")
# MHz initialization
try:
ser = serial.Serial('/dev/ttyUSB0', 9600, timeout=None)
mhzConnection = True
except:
print("Failed socketRelay MHz init")
# GHz address and connection
baseConnData = ('192.168.1.121', 5001) # 192.168.1.8 for base station
ghzSocket = socket(AF_INET, SOCK_DGRAM)
ghzSocket.settimeout(0.5)
ghzConnection = True
ghzSocket.bind(('', 5002))
def packGPS():
global currentGpsLoc
currGPS = struct.pack("2f", currentGpsLoc[0], currentGpsLoc[1])
return currGPS
def putRF(rf_uart, data): #arguments to make function more self-contained and function-like
rf_uart.setDTR(True) #if the extra pins on the ttl usb are connected to m0 & m1 on the ebyte module
rf_uart.setRTS(True) #then these two lines will send low logic to both which puts the module in transmit mode 0
rf_uart.write(b's' + data + b'f') #start byte
#rf_uart.write(data) #payload
#rf_uart.write(b'f') #end byte
rf_uart.flush() #waits until all data is written
def getRF(rf_uart, size_of_payload): #added argument to make it more function-like
rf_uart.setDTR(True) #if the extra pins on the ttl usb are connected to m0 & m1 on the ebyte module
rf_uart.setRTS(True) #then these two lines will send low logic to both which puts the module in transmit mode 0
n = rf_uart.read(1) #read bytes one at a time
while True:
if n == b's': #throw away bytes until start byte is encountered
data = rf_uart.read(size_of_payload) #read fixed number of bytes
n = rf_uart.read(1) #the following byte should be the stop byte
if n == b'f':
#print(data)
return data
else: #if that last byte wasn't the stop byte then something is out of sync
print("return bytes successful")
return -1
def trackGhzConnection():
global ghzCountdown, ghzConnection
while True:
if ghzConnection:
ghzCountdown -= 1
if ghzCountdown <= 0:
ghzConnection = False
sleep(1)
def reconnect():
global ghzConnection
while True:
if not ghzConnection:
resp = os.system("ping -c 10 " + "192.168.1.8")
if resp == 0:
ghzConnection = True
sleep(5) # Try to check connection again every 5 seconds - autonomous mode active during this time
def connectionLost():
global storedPoints, ghzConnection, mhzConnection, myDriver
while True:
while len(storedPoints) > 0 and not ghzConnection and not mhzConnection:
myDriver.goTo(storedPoints.pop())
writeToBus(4, 4)
def dist(origin, dest):
a1, b1 = origin
a2, b2 = dest
radius = 6371 # km
da = math.radians(a2-a1)
db = math.radians(b2-b1)
a = math.sin(da/2) * math.sin(da/2) + math.cos(math.radians(a1)) \
* math.cos(math.radians(a2)) * math.sin(db/2) * math.sin(db/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = radius * c
return d * 100000
def collectPoints():
global storedPoints, ghzConnection, currentGpsLoc
prevPoint = (0.00, 0.00)
while True:
if ghzConnection:
gps = get('gps')
currentGpsLoc = (gps['lat'], gps['lon'])
if dist(prevPoint, currentGpsLoc) > 500:
storedPoints.append(currentGpsLoc)
prevPoint = currentGpsLoc
sleep(5)
def sendToArduino():
global cmdBuffer
while True:
try:
if cmdBuffer == []:
continue
else:
outString = cmdBuffer[-1]
print(outString[0])
if outString[1] == ghzLed:
ardSocket.sendto(bytes(outString[0][:-2],'utf-8'), ardConnectData)
elif outString[1] == mhzLed:
#for i in range(2):
ardSocket.sendto(bytes(outString[0], 'utf-8'), ardConnectData)
cmdBuffer = []
re_data = ardSocket.recvfrom(512)
while bytes.decode(re_data[0]) != "r":
re_data = ardSocket.recvfrom(512)
#print("after reading r: ", re_data)
try:
# Write to LED lights bus
writeToBus(int(outString[0][-1]), ghzLed) if int(outString[1]) == ghzLed else writeToBus(drivingMode, mhzLed)
except:
print("LED error")
except:
cmdBuffer = []
#print("Ard fail")
pass
def stopDrv():
global myDriver, requestStop
while True:
try:
stopRecord = get("stop")
except:
print("stopDrv: DS get failed")
continue
if stopRecord == True:
myDriver.setStop()
requestStop = False
def toggleDrvPause():
global myDriver, toggleSuspend
while True:
try:
pauseRecord = get("pause")
except:
print("toggleDrvPause: DS get failed")
continue
if not pauseRecord == toggleSuspend:
myDriver.setPause()
toggleSuspend = not toggleSuspend
def storeCmd(cmd, freq):
global cmdBuffer
if freq == 1:
unpacked = struct.unpack('9h', cmd)
else:
unpacked = struct.unpack('2b', cmd)
cmdList = list(map(str, unpacked))
cmdBuffer.append((','.join(cmdList), freq))
def readMhz():
global storedPoints, mhzConnection, ghzConnection, mhzCountdown
while True:
if not ghzConnection:
try:
mhzData = getRF(ser, 2)
#print(mhzData)
if mhzData == -1:
continue
storeCmd(mhzData, mhzLed)
mhzCountdown = 10
try:
#Send back current GPS
putRF(ser, packGPS())
except:
print("put rf failed")
except:
print("failed mhz")
Thread(target = collectPoints).start()
Thread(target = trackGhzConnection).start()
Thread(target = reconnect).start()
Thread(target = stopDrv).start()
Thread(target = toggleDrvPause).start()
Thread(target = readMhz).start()
Thread(target = connectionLost),start()
sleep(3) # Allow thread/port initializations
ardSocket.sendto(bytes('0,0,0,0,0,0,0,0','utf-8'), ardConnectData)
Thread(target=sendToArduino).start()
while True:
if ghzConnection:
try:
data = ghzSocket.recvfrom(512)[0]
ghzSocket.sendto(bytes('xff', 'utf-8'), baseConnData)
storeCmd(data, ghzLed)
ghzCountdown = 10
except (KeyboardInterrupt, SystemExit):
ghzSocket.close()
raise
except:
print("GHz failed - trying MHz")
pass
|
test_decimal.py | # Copyright (c) 2004 Python Software Foundation.
# All rights reserved.
# Written by Eric Price <eprice at tjhsst.edu>
# and Facundo Batista <facundo at taniquetil.com.ar>
# and Raymond Hettinger <python at rcn.com>
# and Aahz (aahz at pobox.com)
# and Tim Peters
"""
These are the test cases for the Decimal module.
There are two groups of tests, Arithmetic and Behaviour. The former test
the Decimal arithmetic using the tests provided by Mike Cowlishaw. The latter
test the pythonic behaviour according to PEP 327.
Cowlishaw's tests can be downloaded from:
http://speleotrove.com/decimal/dectest.zip
This test module can be called from command line with one parameter (Arithmetic
or Behaviour) to test each part, or without parameter to test both parts. If
you're working through IDLE, you can import this test module and call test_main()
with the corresponding argument.
"""
import math
import os, sys
import operator
import warnings
import pickle, copy
import unittest
import numbers
import locale
from test.support import (run_unittest, run_doctest, is_resource_enabled,
requires_IEEE_754, requires_docstrings)
from test.support import (check_warnings, import_fresh_module, TestFailed,
run_with_locale, cpython_only)
import random
import inspect
try:
import threading
except ImportError:
threading = None
C = import_fresh_module('decimal', fresh=['_decimal'])
P = import_fresh_module('decimal', blocked=['_decimal'])
orig_sys_decimal = sys.modules['decimal']
# fractions module must import the correct decimal module.
cfractions = import_fresh_module('fractions', fresh=['fractions'])
sys.modules['decimal'] = P
pfractions = import_fresh_module('fractions', fresh=['fractions'])
sys.modules['decimal'] = C
fractions = {C:cfractions, P:pfractions}
sys.modules['decimal'] = orig_sys_decimal
# Useful Test Constant
Signals = {
C: tuple(C.getcontext().flags.keys()) if C else None,
P: tuple(P.getcontext().flags.keys())
}
# Signals ordered with respect to precedence: when an operation
# produces multiple signals, signals occurring later in the list
# should be handled before those occurring earlier in the list.
OrderedSignals = {
C: [C.Clamped, C.Rounded, C.Inexact, C.Subnormal, C.Underflow,
C.Overflow, C.DivisionByZero, C.InvalidOperation,
C.FloatOperation] if C else None,
P: [P.Clamped, P.Rounded, P.Inexact, P.Subnormal, P.Underflow,
P.Overflow, P.DivisionByZero, P.InvalidOperation,
P.FloatOperation]
}
def assert_signals(cls, context, attr, expected):
d = getattr(context, attr)
cls.assertTrue(all(d[s] if s in expected else not d[s] for s in d))
ROUND_UP = P.ROUND_UP
ROUND_DOWN = P.ROUND_DOWN
ROUND_CEILING = P.ROUND_CEILING
ROUND_FLOOR = P.ROUND_FLOOR
ROUND_HALF_UP = P.ROUND_HALF_UP
ROUND_HALF_DOWN = P.ROUND_HALF_DOWN
ROUND_HALF_EVEN = P.ROUND_HALF_EVEN
ROUND_05UP = P.ROUND_05UP
RoundingModes = [
ROUND_UP, ROUND_DOWN, ROUND_CEILING, ROUND_FLOOR,
ROUND_HALF_UP, ROUND_HALF_DOWN, ROUND_HALF_EVEN,
ROUND_05UP
]
# Tests are built around these assumed context defaults.
# test_main() restores the original context.
ORIGINAL_CONTEXT = {
C: C.getcontext().copy() if C else None,
P: P.getcontext().copy()
}
def init(m):
if not m: return
DefaultTestContext = m.Context(
prec=9, rounding=ROUND_HALF_EVEN, traps=dict.fromkeys(Signals[m], 0)
)
m.setcontext(DefaultTestContext)
TESTDATADIR = 'decimaltestdata'
if __name__ == '__main__':
file = sys.argv[0]
else:
file = __file__
testdir = os.path.dirname(file) or os.curdir
directory = testdir + os.sep + TESTDATADIR + os.sep
skip_expected = not os.path.isdir(directory)
# Make sure it actually raises errors when not expected and caught in flags
# Slower, since it runs some things several times.
EXTENDEDERRORTEST = False
# Test extra functionality in the C version (-DEXTRA_FUNCTIONALITY).
EXTRA_FUNCTIONALITY = True if hasattr(C, 'DecClamped') else False
requires_extra_functionality = unittest.skipUnless(
EXTRA_FUNCTIONALITY, "test requires build with -DEXTRA_FUNCTIONALITY")
skip_if_extra_functionality = unittest.skipIf(
EXTRA_FUNCTIONALITY, "test requires regular build")
class IBMTestCases(unittest.TestCase):
"""Class which tests the Decimal class against the IBM test cases."""
def setUp(self):
self.context = self.decimal.Context()
self.readcontext = self.decimal.Context()
self.ignore_list = ['#']
# List of individual .decTest test ids that correspond to tests that
# we're skipping for one reason or another.
self.skipped_test_ids = set([
# Skip implementation-specific scaleb tests.
'scbx164',
'scbx165',
# For some operations (currently exp, ln, log10, power), the decNumber
# reference implementation imposes additional restrictions on the context
# and operands. These restrictions are not part of the specification;
# however, the effect of these restrictions does show up in some of the
# testcases. We skip testcases that violate these restrictions, since
# Decimal behaves differently from decNumber for these testcases so these
# testcases would otherwise fail.
'expx901',
'expx902',
'expx903',
'expx905',
'lnx901',
'lnx902',
'lnx903',
'lnx905',
'logx901',
'logx902',
'logx903',
'logx905',
'powx1183',
'powx1184',
'powx4001',
'powx4002',
'powx4003',
'powx4005',
'powx4008',
'powx4010',
'powx4012',
'powx4014',
])
if self.decimal == C:
# status has additional Subnormal, Underflow
self.skipped_test_ids.add('pwsx803')
self.skipped_test_ids.add('pwsx805')
# Correct rounding (skipped for decNumber, too)
self.skipped_test_ids.add('powx4302')
self.skipped_test_ids.add('powx4303')
self.skipped_test_ids.add('powx4342')
self.skipped_test_ids.add('powx4343')
# http://bugs.python.org/issue7049
self.skipped_test_ids.add('pwmx325')
self.skipped_test_ids.add('pwmx326')
# Map test directives to setter functions.
self.ChangeDict = {'precision' : self.change_precision,
'rounding' : self.change_rounding_method,
'maxexponent' : self.change_max_exponent,
'minexponent' : self.change_min_exponent,
'clamp' : self.change_clamp}
# Name adapter to be able to change the Decimal and Context
# interface without changing the test files from Cowlishaw.
self.NameAdapter = {'and':'logical_and',
'apply':'_apply',
'class':'number_class',
'comparesig':'compare_signal',
'comparetotal':'compare_total',
'comparetotmag':'compare_total_mag',
'copy':'copy_decimal',
'copyabs':'copy_abs',
'copynegate':'copy_negate',
'copysign':'copy_sign',
'divideint':'divide_int',
'invert':'logical_invert',
'iscanonical':'is_canonical',
'isfinite':'is_finite',
'isinfinite':'is_infinite',
'isnan':'is_nan',
'isnormal':'is_normal',
'isqnan':'is_qnan',
'issigned':'is_signed',
'issnan':'is_snan',
'issubnormal':'is_subnormal',
'iszero':'is_zero',
'maxmag':'max_mag',
'minmag':'min_mag',
'nextminus':'next_minus',
'nextplus':'next_plus',
'nexttoward':'next_toward',
'or':'logical_or',
'reduce':'normalize',
'remaindernear':'remainder_near',
'samequantum':'same_quantum',
'squareroot':'sqrt',
'toeng':'to_eng_string',
'tointegral':'to_integral_value',
'tointegralx':'to_integral_exact',
'tosci':'to_sci_string',
'xor':'logical_xor'}
# Map test-case names to roundings.
self.RoundingDict = {'ceiling' : ROUND_CEILING,
'down' : ROUND_DOWN,
'floor' : ROUND_FLOOR,
'half_down' : ROUND_HALF_DOWN,
'half_even' : ROUND_HALF_EVEN,
'half_up' : ROUND_HALF_UP,
'up' : ROUND_UP,
'05up' : ROUND_05UP}
# Map the test cases' error names to the actual errors.
self.ErrorNames = {'clamped' : self.decimal.Clamped,
'conversion_syntax' : self.decimal.InvalidOperation,
'division_by_zero' : self.decimal.DivisionByZero,
'division_impossible' : self.decimal.InvalidOperation,
'division_undefined' : self.decimal.InvalidOperation,
'inexact' : self.decimal.Inexact,
'invalid_context' : self.decimal.InvalidOperation,
'invalid_operation' : self.decimal.InvalidOperation,
'overflow' : self.decimal.Overflow,
'rounded' : self.decimal.Rounded,
'subnormal' : self.decimal.Subnormal,
'underflow' : self.decimal.Underflow}
# The following functions return True/False rather than a
# Decimal instance.
self.LogicalFunctions = ('is_canonical',
'is_finite',
'is_infinite',
'is_nan',
'is_normal',
'is_qnan',
'is_signed',
'is_snan',
'is_subnormal',
'is_zero',
'same_quantum')
def read_unlimited(self, v, context):
"""Work around the limitations of the 32-bit _decimal version. The
guaranteed maximum values for prec, Emax etc. are 425000000,
but higher values usually work, except for rare corner cases.
In particular, all of the IBM tests pass with maximum values
of 1070000000."""
if self.decimal == C and self.decimal.MAX_EMAX == 425000000:
self.readcontext._unsafe_setprec(1070000000)
self.readcontext._unsafe_setemax(1070000000)
self.readcontext._unsafe_setemin(-1070000000)
return self.readcontext.create_decimal(v)
else:
return self.decimal.Decimal(v, context)
def eval_file(self, file):
global skip_expected
if skip_expected:
raise unittest.SkipTest
with open(file) as f:
for line in f:
line = line.replace('\r\n', '').replace('\n', '')
#print line
try:
t = self.eval_line(line)
except self.decimal.DecimalException as exception:
#Exception raised where there shouldn't have been one.
self.fail('Exception "'+exception.__class__.__name__ + '" raised on line '+line)
def eval_line(self, s):
if s.find(' -> ') >= 0 and s[:2] != '--' and not s.startswith(' --'):
s = (s.split('->')[0] + '->' +
s.split('->')[1].split('--')[0]).strip()
else:
s = s.split('--')[0].strip()
for ignore in self.ignore_list:
if s.find(ignore) >= 0:
#print s.split()[0], 'NotImplemented--', ignore
return
if not s:
return
elif ':' in s:
return self.eval_directive(s)
else:
return self.eval_equation(s)
def eval_directive(self, s):
funct, value = (x.strip().lower() for x in s.split(':'))
if funct == 'rounding':
value = self.RoundingDict[value]
else:
try:
value = int(value)
except ValueError:
pass
funct = self.ChangeDict.get(funct, (lambda *args: None))
funct(value)
def eval_equation(self, s):
if not TEST_ALL and random.random() < 0.90:
return
self.context.clear_flags()
try:
Sides = s.split('->')
L = Sides[0].strip().split()
id = L[0]
if DEBUG:
print("Test ", id, end=" ")
funct = L[1].lower()
valstemp = L[2:]
L = Sides[1].strip().split()
ans = L[0]
exceptions = L[1:]
except (TypeError, AttributeError, IndexError):
raise self.decimal.InvalidOperation
def FixQuotes(val):
val = val.replace("''", 'SingleQuote').replace('""', 'DoubleQuote')
val = val.replace("'", '').replace('"', '')
val = val.replace('SingleQuote', "'").replace('DoubleQuote', '"')
return val
if id in self.skipped_test_ids:
return
fname = self.NameAdapter.get(funct, funct)
if fname == 'rescale':
return
funct = getattr(self.context, fname)
vals = []
conglomerate = ''
quote = 0
theirexceptions = [self.ErrorNames[x.lower()] for x in exceptions]
for exception in Signals[self.decimal]:
self.context.traps[exception] = 1 #Catch these bugs...
for exception in theirexceptions:
self.context.traps[exception] = 0
for i, val in enumerate(valstemp):
if val.count("'") % 2 == 1:
quote = 1 - quote
if quote:
conglomerate = conglomerate + ' ' + val
continue
else:
val = conglomerate + val
conglomerate = ''
v = FixQuotes(val)
if fname in ('to_sci_string', 'to_eng_string'):
if EXTENDEDERRORTEST:
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(self.context.create_decimal(v))
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
v = self.context.create_decimal(v)
else:
v = self.read_unlimited(v, self.context)
vals.append(v)
ans = FixQuotes(ans)
if EXTENDEDERRORTEST and fname not in ('to_sci_string', 'to_eng_string'):
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(*vals)
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
# as above, but add traps cumulatively, to check precedence
ordered_errors = [e for e in OrderedSignals[self.decimal] if e in theirexceptions]
for error in ordered_errors:
self.context.traps[error] = 1
try:
funct(*vals)
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s; expected %s" %
(type(e), s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
# reset traps
for error in ordered_errors:
self.context.traps[error] = 0
if DEBUG:
print("--", self.context)
try:
result = str(funct(*vals))
if fname in self.LogicalFunctions:
result = str(int(eval(result))) # 'True', 'False' -> '1', '0'
except Signals[self.decimal] as error:
self.fail("Raised %s in %s" % (error, s))
except: #Catch any error long enough to state the test case.
print("ERROR:", s)
raise
myexceptions = self.getexceptions()
myexceptions.sort(key=repr)
theirexceptions.sort(key=repr)
self.assertEqual(result, ans,
'Incorrect answer for ' + s + ' -- got ' + result)
self.assertEqual(myexceptions, theirexceptions,
'Incorrect flags set in ' + s + ' -- got ' + str(myexceptions))
def getexceptions(self):
return [e for e in Signals[self.decimal] if self.context.flags[e]]
def change_precision(self, prec):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setprec(prec)
else:
self.context.prec = prec
def change_rounding_method(self, rounding):
self.context.rounding = rounding
def change_min_exponent(self, exp):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setemin(exp)
else:
self.context.Emin = exp
def change_max_exponent(self, exp):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setemax(exp)
else:
self.context.Emax = exp
def change_clamp(self, clamp):
self.context.clamp = clamp
class CIBMTestCases(IBMTestCases):
decimal = C
class PyIBMTestCases(IBMTestCases):
decimal = P
# The following classes test the behaviour of Decimal according to PEP 327
class ExplicitConstructionTest(unittest.TestCase):
'''Unit tests for Explicit Construction cases of Decimal.'''
def test_explicit_empty(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(), Decimal("0"))
def test_explicit_from_None(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, Decimal, None)
def test_explicit_from_int(self):
Decimal = self.decimal.Decimal
#positive
d = Decimal(45)
self.assertEqual(str(d), '45')
#very large positive
d = Decimal(500000123)
self.assertEqual(str(d), '500000123')
#negative
d = Decimal(-45)
self.assertEqual(str(d), '-45')
#zero
d = Decimal(0)
self.assertEqual(str(d), '0')
# single word longs
for n in range(0, 32):
for sign in (-1, 1):
for x in range(-5, 5):
i = sign * (2**n + x)
d = Decimal(i)
self.assertEqual(str(d), str(i))
def test_explicit_from_string(self):
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
localcontext = self.decimal.localcontext
#empty
self.assertEqual(str(Decimal('')), 'NaN')
#int
self.assertEqual(str(Decimal('45')), '45')
#float
self.assertEqual(str(Decimal('45.34')), '45.34')
#engineer notation
self.assertEqual(str(Decimal('45e2')), '4.5E+3')
#just not a number
self.assertEqual(str(Decimal('ugly')), 'NaN')
#leading and trailing whitespace permitted
self.assertEqual(str(Decimal('1.3E4 \n')), '1.3E+4')
self.assertEqual(str(Decimal(' -7.89')), '-7.89')
self.assertEqual(str(Decimal(" 3.45679 ")), '3.45679')
# underscores
self.assertEqual(str(Decimal('1_3.3e4_0')), '1.33E+41')
self.assertEqual(str(Decimal('1_0_0_0')), '1000')
# unicode whitespace
for lead in ["", ' ', '\u00a0', '\u205f']:
for trail in ["", ' ', '\u00a0', '\u205f']:
self.assertEqual(str(Decimal(lead + '9.311E+28' + trail)),
'9.311E+28')
with localcontext() as c:
c.traps[InvalidOperation] = True
# Invalid string
self.assertRaises(InvalidOperation, Decimal, "xyz")
# Two arguments max
self.assertRaises(TypeError, Decimal, "1234", "x", "y")
# space within the numeric part
self.assertRaises(InvalidOperation, Decimal, "1\u00a02\u00a03")
self.assertRaises(InvalidOperation, Decimal, "\u00a01\u00a02\u00a0")
# unicode whitespace
self.assertRaises(InvalidOperation, Decimal, "\u00a0")
self.assertRaises(InvalidOperation, Decimal, "\u00a0\u00a0")
# embedded NUL
self.assertRaises(InvalidOperation, Decimal, "12\u00003")
# underscores don't prevent errors
self.assertRaises(InvalidOperation, Decimal, "1_2_\u00003")
@cpython_only
def test_from_legacy_strings(self):
import _testcapi
Decimal = self.decimal.Decimal
context = self.decimal.Context()
s = _testcapi.unicode_legacy_string('9.999999')
self.assertEqual(str(Decimal(s)), '9.999999')
self.assertEqual(str(context.create_decimal(s)), '9.999999')
def test_explicit_from_tuples(self):
Decimal = self.decimal.Decimal
#zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(str(d), '0')
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(str(d), '-45')
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(str(d), '45.34')
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
#inf
d = Decimal( (0, (), "F") )
self.assertEqual(str(d), 'Infinity')
#wrong number of items
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1)) )
#bad sign
self.assertRaises(ValueError, Decimal, (8, (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (0., (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (Decimal(1), (4, 3, 4, 9, 1), 2))
#bad exp
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 'wrong!') )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 0.) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), '1') )
#bad coefficients
self.assertRaises(ValueError, Decimal, (1, "xyz", 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, None, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, -3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 10, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 'a', 1), 2) )
def test_explicit_from_list(self):
Decimal = self.decimal.Decimal
d = Decimal([0, [0], 0])
self.assertEqual(str(d), '0')
d = Decimal([1, [4, 3, 4, 9, 1, 3, 5, 3, 4], -25])
self.assertEqual(str(d), '-4.34913534E-17')
d = Decimal([1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25])
self.assertEqual(str(d), '-4.34913534E-17')
d = Decimal((1, [4, 3, 4, 9, 1, 3, 5, 3, 4], -25))
self.assertEqual(str(d), '-4.34913534E-17')
def test_explicit_from_bool(self):
Decimal = self.decimal.Decimal
self.assertIs(bool(Decimal(0)), False)
self.assertIs(bool(Decimal(1)), True)
self.assertEqual(Decimal(False), Decimal(0))
self.assertEqual(Decimal(True), Decimal(1))
def test_explicit_from_Decimal(self):
Decimal = self.decimal.Decimal
#positive
d = Decimal(45)
e = Decimal(d)
self.assertEqual(str(e), '45')
#very large positive
d = Decimal(500000123)
e = Decimal(d)
self.assertEqual(str(e), '500000123')
#negative
d = Decimal(-45)
e = Decimal(d)
self.assertEqual(str(e), '-45')
#zero
d = Decimal(0)
e = Decimal(d)
self.assertEqual(str(e), '0')
@requires_IEEE_754
def test_explicit_from_float(self):
Decimal = self.decimal.Decimal
r = Decimal(0.1)
self.assertEqual(type(r), Decimal)
self.assertEqual(str(r),
'0.1000000000000000055511151231257827021181583404541015625')
self.assertTrue(Decimal(float('nan')).is_qnan())
self.assertTrue(Decimal(float('inf')).is_infinite())
self.assertTrue(Decimal(float('-inf')).is_infinite())
self.assertEqual(str(Decimal(float('nan'))),
str(Decimal('NaN')))
self.assertEqual(str(Decimal(float('inf'))),
str(Decimal('Infinity')))
self.assertEqual(str(Decimal(float('-inf'))),
str(Decimal('-Infinity')))
self.assertEqual(str(Decimal(float('-0.0'))),
str(Decimal('-0')))
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(Decimal(x))) # roundtrip
def test_explicit_context_create_decimal(self):
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
Rounded = self.decimal.Rounded
nc = copy.copy(self.decimal.getcontext())
nc.prec = 3
# empty
d = Decimal()
self.assertEqual(str(d), '0')
d = nc.create_decimal()
self.assertEqual(str(d), '0')
# from None
self.assertRaises(TypeError, nc.create_decimal, None)
# from int
d = nc.create_decimal(456)
self.assertIsInstance(d, Decimal)
self.assertEqual(nc.create_decimal(45678),
nc.create_decimal('457E+2'))
# from string
d = Decimal('456789')
self.assertEqual(str(d), '456789')
d = nc.create_decimal('456789')
self.assertEqual(str(d), '4.57E+5')
# leading and trailing whitespace should result in a NaN;
# spaces are already checked in Cowlishaw's test-suite, so
# here we just check that a trailing newline results in a NaN
self.assertEqual(str(nc.create_decimal('3.14\n')), 'NaN')
# from tuples
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
d = nc.create_decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.35E-17')
# from Decimal
prevdec = Decimal(500000123)
d = Decimal(prevdec)
self.assertEqual(str(d), '500000123')
d = nc.create_decimal(prevdec)
self.assertEqual(str(d), '5.00E+8')
# more integers
nc.prec = 28
nc.traps[InvalidOperation] = True
for v in [-2**63-1, -2**63, -2**31-1, -2**31, 0,
2**31-1, 2**31, 2**63-1, 2**63]:
d = nc.create_decimal(v)
self.assertTrue(isinstance(d, Decimal))
self.assertEqual(int(d), v)
nc.prec = 3
nc.traps[Rounded] = True
self.assertRaises(Rounded, nc.create_decimal, 1234)
# from string
nc.prec = 28
self.assertEqual(str(nc.create_decimal('0E-017')), '0E-17')
self.assertEqual(str(nc.create_decimal('45')), '45')
self.assertEqual(str(nc.create_decimal('-Inf')), '-Infinity')
self.assertEqual(str(nc.create_decimal('NaN123')), 'NaN123')
# invalid arguments
self.assertRaises(InvalidOperation, nc.create_decimal, "xyz")
self.assertRaises(ValueError, nc.create_decimal, (1, "xyz", -25))
self.assertRaises(TypeError, nc.create_decimal, "1234", "5678")
# no whitespace and underscore stripping is done with this method
self.assertRaises(InvalidOperation, nc.create_decimal, " 1234")
self.assertRaises(InvalidOperation, nc.create_decimal, "12_34")
# too many NaN payload digits
nc.prec = 3
self.assertRaises(InvalidOperation, nc.create_decimal, 'NaN12345')
self.assertRaises(InvalidOperation, nc.create_decimal,
Decimal('NaN12345'))
nc.traps[InvalidOperation] = False
self.assertEqual(str(nc.create_decimal('NaN12345')), 'NaN')
self.assertTrue(nc.flags[InvalidOperation])
nc.flags[InvalidOperation] = False
self.assertEqual(str(nc.create_decimal(Decimal('NaN12345'))), 'NaN')
self.assertTrue(nc.flags[InvalidOperation])
def test_explicit_context_create_from_float(self):
Decimal = self.decimal.Decimal
nc = self.decimal.Context()
r = nc.create_decimal(0.1)
self.assertEqual(type(r), Decimal)
self.assertEqual(str(r), '0.1000000000000000055511151231')
self.assertTrue(nc.create_decimal(float('nan')).is_qnan())
self.assertTrue(nc.create_decimal(float('inf')).is_infinite())
self.assertTrue(nc.create_decimal(float('-inf')).is_infinite())
self.assertEqual(str(nc.create_decimal(float('nan'))),
str(nc.create_decimal('NaN')))
self.assertEqual(str(nc.create_decimal(float('inf'))),
str(nc.create_decimal('Infinity')))
self.assertEqual(str(nc.create_decimal(float('-inf'))),
str(nc.create_decimal('-Infinity')))
self.assertEqual(str(nc.create_decimal(float('-0.0'))),
str(nc.create_decimal('-0')))
nc.prec = 100
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(nc.create_decimal(x))) # roundtrip
def test_unicode_digits(self):
Decimal = self.decimal.Decimal
test_values = {
'\uff11': '1',
'\u0660.\u0660\u0663\u0667\u0662e-\u0663' : '0.0000372',
'-nan\u0c68\u0c6a\u0c66\u0c66' : '-NaN2400',
}
for input, expected in test_values.items():
self.assertEqual(str(Decimal(input)), expected)
class CExplicitConstructionTest(ExplicitConstructionTest):
decimal = C
class PyExplicitConstructionTest(ExplicitConstructionTest):
decimal = P
class ImplicitConstructionTest(unittest.TestCase):
'''Unit tests for Implicit Construction cases of Decimal.'''
def test_implicit_from_None(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + None', locals())
def test_implicit_from_int(self):
Decimal = self.decimal.Decimal
#normal
self.assertEqual(str(Decimal(5) + 45), '50')
#exceeding precision
self.assertEqual(Decimal(5) + 123456789000, Decimal(123456789000))
def test_implicit_from_string(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + "3"', locals())
def test_implicit_from_float(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + 2.2', locals())
def test_implicit_from_Decimal(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(5) + Decimal(45), Decimal(50))
def test_rop(self):
Decimal = self.decimal.Decimal
# Allow other classes to be trained to interact with Decimals
class E:
def __divmod__(self, other):
return 'divmod ' + str(other)
def __rdivmod__(self, other):
return str(other) + ' rdivmod'
def __lt__(self, other):
return 'lt ' + str(other)
def __gt__(self, other):
return 'gt ' + str(other)
def __le__(self, other):
return 'le ' + str(other)
def __ge__(self, other):
return 'ge ' + str(other)
def __eq__(self, other):
return 'eq ' + str(other)
def __ne__(self, other):
return 'ne ' + str(other)
self.assertEqual(divmod(E(), Decimal(10)), 'divmod 10')
self.assertEqual(divmod(Decimal(10), E()), '10 rdivmod')
self.assertEqual(eval('Decimal(10) < E()'), 'gt 10')
self.assertEqual(eval('Decimal(10) > E()'), 'lt 10')
self.assertEqual(eval('Decimal(10) <= E()'), 'ge 10')
self.assertEqual(eval('Decimal(10) >= E()'), 'le 10')
self.assertEqual(eval('Decimal(10) == E()'), 'eq 10')
self.assertEqual(eval('Decimal(10) != E()'), 'ne 10')
# insert operator methods and then exercise them
oplist = [
('+', '__add__', '__radd__'),
('-', '__sub__', '__rsub__'),
('*', '__mul__', '__rmul__'),
('/', '__truediv__', '__rtruediv__'),
('%', '__mod__', '__rmod__'),
('//', '__floordiv__', '__rfloordiv__'),
('**', '__pow__', '__rpow__')
]
for sym, lop, rop in oplist:
setattr(E, lop, lambda self, other: 'str' + lop + str(other))
setattr(E, rop, lambda self, other: str(other) + rop + 'str')
self.assertEqual(eval('E()' + sym + 'Decimal(10)'),
'str' + lop + '10')
self.assertEqual(eval('Decimal(10)' + sym + 'E()'),
'10' + rop + 'str')
class CImplicitConstructionTest(ImplicitConstructionTest):
decimal = C
class PyImplicitConstructionTest(ImplicitConstructionTest):
decimal = P
class FormatTest(unittest.TestCase):
'''Unit tests for the format function.'''
def test_formatting(self):
Decimal = self.decimal.Decimal
# triples giving a format, a Decimal, and the expected result
test_values = [
('e', '0E-15', '0e-15'),
('e', '2.3E-15', '2.3e-15'),
('e', '2.30E+2', '2.30e+2'), # preserve significant zeros
('e', '2.30000E-15', '2.30000e-15'),
('e', '1.23456789123456789e40', '1.23456789123456789e+40'),
('e', '1.5', '1.5e+0'),
('e', '0.15', '1.5e-1'),
('e', '0.015', '1.5e-2'),
('e', '0.0000000000015', '1.5e-12'),
('e', '15.0', '1.50e+1'),
('e', '-15', '-1.5e+1'),
('e', '0', '0e+0'),
('e', '0E1', '0e+1'),
('e', '0.0', '0e-1'),
('e', '0.00', '0e-2'),
('.6e', '0E-15', '0.000000e-9'),
('.6e', '0', '0.000000e+6'),
('.6e', '9.999999', '9.999999e+0'),
('.6e', '9.9999999', '1.000000e+1'),
('.6e', '-1.23e5', '-1.230000e+5'),
('.6e', '1.23456789e-3', '1.234568e-3'),
('f', '0', '0'),
('f', '0.0', '0.0'),
('f', '0E-2', '0.00'),
('f', '0.00E-8', '0.0000000000'),
('f', '0E1', '0'), # loses exponent information
('f', '3.2E1', '32'),
('f', '3.2E2', '320'),
('f', '3.20E2', '320'),
('f', '3.200E2', '320.0'),
('f', '3.2E-6', '0.0000032'),
('.6f', '0E-15', '0.000000'), # all zeros treated equally
('.6f', '0E1', '0.000000'),
('.6f', '0', '0.000000'),
('.0f', '0', '0'), # no decimal point
('.0f', '0e-2', '0'),
('.0f', '3.14159265', '3'),
('.1f', '3.14159265', '3.1'),
('.4f', '3.14159265', '3.1416'),
('.6f', '3.14159265', '3.141593'),
('.7f', '3.14159265', '3.1415926'), # round-half-even!
('.8f', '3.14159265', '3.14159265'),
('.9f', '3.14159265', '3.141592650'),
('g', '0', '0'),
('g', '0.0', '0.0'),
('g', '0E1', '0e+1'),
('G', '0E1', '0E+1'),
('g', '0E-5', '0.00000'),
('g', '0E-6', '0.000000'),
('g', '0E-7', '0e-7'),
('g', '-0E2', '-0e+2'),
('.0g', '3.14159265', '3'), # 0 sig fig -> 1 sig fig
('.0n', '3.14159265', '3'), # same for 'n'
('.1g', '3.14159265', '3'),
('.2g', '3.14159265', '3.1'),
('.5g', '3.14159265', '3.1416'),
('.7g', '3.14159265', '3.141593'),
('.8g', '3.14159265', '3.1415926'), # round-half-even!
('.9g', '3.14159265', '3.14159265'),
('.10g', '3.14159265', '3.14159265'), # don't pad
('%', '0E1', '0%'),
('%', '0E0', '0%'),
('%', '0E-1', '0%'),
('%', '0E-2', '0%'),
('%', '0E-3', '0.0%'),
('%', '0E-4', '0.00%'),
('.3%', '0', '0.000%'), # all zeros treated equally
('.3%', '0E10', '0.000%'),
('.3%', '0E-10', '0.000%'),
('.3%', '2.34', '234.000%'),
('.3%', '1.234567', '123.457%'),
('.0%', '1.23', '123%'),
('e', 'NaN', 'NaN'),
('f', '-NaN123', '-NaN123'),
('+g', 'NaN456', '+NaN456'),
('.3e', 'Inf', 'Infinity'),
('.16f', '-Inf', '-Infinity'),
('.0g', '-sNaN', '-sNaN'),
('', '1.00', '1.00'),
# test alignment and padding
('6', '123', ' 123'),
('<6', '123', '123 '),
('>6', '123', ' 123'),
('^6', '123', ' 123 '),
('=+6', '123', '+ 123'),
('#<10', 'NaN', 'NaN#######'),
('#<10', '-4.3', '-4.3######'),
('#<+10', '0.0130', '+0.0130###'),
('#< 10', '0.0130', ' 0.0130###'),
('@>10', '-Inf', '@-Infinity'),
('#>5', '-Inf', '-Infinity'),
('?^5', '123', '?123?'),
('%^6', '123', '%123%%'),
(' ^6', '-45.6', '-45.6 '),
('/=10', '-45.6', '-/////45.6'),
('/=+10', '45.6', '+/////45.6'),
('/= 10', '45.6', ' /////45.6'),
('\x00=10', '-inf', '-\x00Infinity'),
('\x00^16', '-inf', '\x00\x00\x00-Infinity\x00\x00\x00\x00'),
('\x00>10', '1.2345', '\x00\x00\x00\x001.2345'),
('\x00<10', '1.2345', '1.2345\x00\x00\x00\x00'),
# thousands separator
(',', '1234567', '1,234,567'),
(',', '123456', '123,456'),
(',', '12345', '12,345'),
(',', '1234', '1,234'),
(',', '123', '123'),
(',', '12', '12'),
(',', '1', '1'),
(',', '0', '0'),
(',', '-1234567', '-1,234,567'),
(',', '-123456', '-123,456'),
('7,', '123456', '123,456'),
('8,', '123456', ' 123,456'),
('08,', '123456', '0,123,456'), # special case: extra 0 needed
('+08,', '123456', '+123,456'), # but not if there's a sign
(' 08,', '123456', ' 123,456'),
('08,', '-123456', '-123,456'),
('+09,', '123456', '+0,123,456'),
# ... with fractional part...
('07,', '1234.56', '1,234.56'),
('08,', '1234.56', '1,234.56'),
('09,', '1234.56', '01,234.56'),
('010,', '1234.56', '001,234.56'),
('011,', '1234.56', '0,001,234.56'),
('012,', '1234.56', '0,001,234.56'),
('08,.1f', '1234.5', '01,234.5'),
# no thousands separators in fraction part
(',', '1.23456789', '1.23456789'),
(',%', '123.456789', '12,345.6789%'),
(',e', '123456', '1.23456e+5'),
(',E', '123456', '1.23456E+5'),
# issue 6850
('a=-7.0', '0.12345', 'aaaa0.1'),
# issue 22090
('<^+15.20%', 'inf', '<<+Infinity%<<<'),
('\x07>,%', 'sNaN1234567', 'sNaN1234567%'),
('=10.10%', 'NaN123', ' NaN123%'),
]
for fmt, d, result in test_values:
self.assertEqual(format(Decimal(d), fmt), result)
# bytes format argument
self.assertRaises(TypeError, Decimal(1).__format__, b'-020')
def test_n_format(self):
Decimal = self.decimal.Decimal
try:
from locale import CHAR_MAX
except ImportError:
self.skipTest('locale.CHAR_MAX not available')
def make_grouping(lst):
return ''.join([chr(x) for x in lst]) if self.decimal == C else lst
def get_fmt(x, override=None, fmt='n'):
if self.decimal == C:
return Decimal(x).__format__(fmt, override)
else:
return Decimal(x).__format__(fmt, _localeconv=override)
# Set up some localeconv-like dictionaries
en_US = {
'decimal_point' : '.',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : ','
}
fr_FR = {
'decimal_point' : ',',
'grouping' : make_grouping([CHAR_MAX]),
'thousands_sep' : ''
}
ru_RU = {
'decimal_point' : ',',
'grouping': make_grouping([3, 3, 0]),
'thousands_sep' : ' '
}
crazy = {
'decimal_point' : '&',
'grouping': make_grouping([1, 4, 2, CHAR_MAX]),
'thousands_sep' : '-'
}
dotsep_wide = {
'decimal_point' : b'\xc2\xbf'.decode('utf-8'),
'grouping': make_grouping([3, 3, 0]),
'thousands_sep' : b'\xc2\xb4'.decode('utf-8')
}
self.assertEqual(get_fmt(Decimal('12.7'), en_US), '12.7')
self.assertEqual(get_fmt(Decimal('12.7'), fr_FR), '12,7')
self.assertEqual(get_fmt(Decimal('12.7'), ru_RU), '12,7')
self.assertEqual(get_fmt(Decimal('12.7'), crazy), '1-2&7')
self.assertEqual(get_fmt(123456789, en_US), '123,456,789')
self.assertEqual(get_fmt(123456789, fr_FR), '123456789')
self.assertEqual(get_fmt(123456789, ru_RU), '123 456 789')
self.assertEqual(get_fmt(1234567890123, crazy), '123456-78-9012-3')
self.assertEqual(get_fmt(123456789, en_US, '.6n'), '1.23457e+8')
self.assertEqual(get_fmt(123456789, fr_FR, '.6n'), '1,23457e+8')
self.assertEqual(get_fmt(123456789, ru_RU, '.6n'), '1,23457e+8')
self.assertEqual(get_fmt(123456789, crazy, '.6n'), '1&23457e+8')
# zero padding
self.assertEqual(get_fmt(1234, fr_FR, '03n'), '1234')
self.assertEqual(get_fmt(1234, fr_FR, '04n'), '1234')
self.assertEqual(get_fmt(1234, fr_FR, '05n'), '01234')
self.assertEqual(get_fmt(1234, fr_FR, '06n'), '001234')
self.assertEqual(get_fmt(12345, en_US, '05n'), '12,345')
self.assertEqual(get_fmt(12345, en_US, '06n'), '12,345')
self.assertEqual(get_fmt(12345, en_US, '07n'), '012,345')
self.assertEqual(get_fmt(12345, en_US, '08n'), '0,012,345')
self.assertEqual(get_fmt(12345, en_US, '09n'), '0,012,345')
self.assertEqual(get_fmt(12345, en_US, '010n'), '00,012,345')
self.assertEqual(get_fmt(123456, crazy, '06n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '07n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '08n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '09n'), '01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '010n'), '0-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '011n'), '0-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '012n'), '00-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '013n'), '000-01-2345-6')
# wide char separator and decimal point
self.assertEqual(get_fmt(Decimal('-1.5'), dotsep_wide, '020n'),
'-0\u00b4000\u00b4000\u00b4000\u00b4001\u00bf5')
@run_with_locale('LC_ALL', 'ps_AF')
def test_wide_char_separator_decimal_point(self):
# locale with wide char separator and decimal point
import locale
Decimal = self.decimal.Decimal
decimal_point = locale.localeconv()['decimal_point']
thousands_sep = locale.localeconv()['thousands_sep']
if decimal_point != '\u066b':
self.skipTest('inappropriate decimal point separator'
'({!a} not {!a})'.format(decimal_point, '\u066b'))
if thousands_sep != '\u066c':
self.skipTest('inappropriate thousands separator'
'({!a} not {!a})'.format(thousands_sep, '\u066c'))
self.assertEqual(format(Decimal('100000000.123'), 'n'),
'100\u066c000\u066c000\u066b123')
def test_decimal_from_float_argument_type(self):
class A(self.decimal.Decimal):
def __init__(self, a):
self.a_type = type(a)
a = A.from_float(42.5)
self.assertEqual(self.decimal.Decimal, a.a_type)
a = A.from_float(42)
self.assertEqual(self.decimal.Decimal, a.a_type)
class CFormatTest(FormatTest):
decimal = C
class PyFormatTest(FormatTest):
decimal = P
class ArithmeticOperatorsTest(unittest.TestCase):
'''Unit tests for all arithmetic operators, binary and unary.'''
def test_addition(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1+d2, Decimal('11.1'))
self.assertEqual(d2+d1, Decimal('11.1'))
#with other type, left
c = d1 + 5
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 + d1
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 += d2
self.assertEqual(d1, Decimal('11.1'))
#inline with other type
d1 += 5
self.assertEqual(d1, Decimal('16.1'))
def test_subtraction(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1-d2, Decimal('-33.3'))
self.assertEqual(d2-d1, Decimal('33.3'))
#with other type, left
c = d1 - 5
self.assertEqual(c, Decimal('-16.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 - d1
self.assertEqual(c, Decimal('16.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 -= d2
self.assertEqual(d1, Decimal('-33.3'))
#inline with other type
d1 -= 5
self.assertEqual(d1, Decimal('-38.3'))
def test_multiplication(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-5')
d2 = Decimal('3')
#two Decimals
self.assertEqual(d1*d2, Decimal('-15'))
self.assertEqual(d2*d1, Decimal('-15'))
#with other type, left
c = d1 * 5
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 * d1
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 *= d2
self.assertEqual(d1, Decimal('-15'))
#inline with other type
d1 *= 5
self.assertEqual(d1, Decimal('-75'))
def test_division(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1/d2, Decimal('-2.5'))
self.assertEqual(d2/d1, Decimal('-0.4'))
#with other type, left
c = d1 / 4
self.assertEqual(c, Decimal('-1.25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 4 / d1
self.assertEqual(c, Decimal('-0.8'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 /= d2
self.assertEqual(d1, Decimal('-2.5'))
#inline with other type
d1 /= 4
self.assertEqual(d1, Decimal('-0.625'))
def test_floor_division(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1//d2, Decimal('2'))
self.assertEqual(d2//d1, Decimal('0'))
#with other type, left
c = d1 // 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 // d1
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 //= d2
self.assertEqual(d1, Decimal('2'))
#inline with other type
d1 //= 2
self.assertEqual(d1, Decimal('1'))
def test_powering(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1**d2, Decimal('25'))
self.assertEqual(d2**d1, Decimal('32'))
#with other type, left
c = d1 ** 4
self.assertEqual(c, Decimal('625'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 ** d1
self.assertEqual(c, Decimal('16807'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 **= d2
self.assertEqual(d1, Decimal('25'))
#inline with other type
d1 **= 4
self.assertEqual(d1, Decimal('390625'))
def test_module(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1%d2, Decimal('1'))
self.assertEqual(d2%d1, Decimal('2'))
#with other type, left
c = d1 % 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 % d1
self.assertEqual(c, Decimal('2'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 %= d2
self.assertEqual(d1, Decimal('1'))
#inline with other type
d1 %= 4
self.assertEqual(d1, Decimal('1'))
def test_floor_div_module(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
(p, q) = divmod(d1, d2)
self.assertEqual(p, Decimal('2'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, left
(p, q) = divmod(d1, 4)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, right
(p, q) = divmod(7, d1)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('2'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
def test_unary_operators(self):
Decimal = self.decimal.Decimal
self.assertEqual(+Decimal(45), Decimal(+45)) # +
self.assertEqual(-Decimal(45), Decimal(-45)) # -
self.assertEqual(abs(Decimal(45)), abs(Decimal(-45))) # abs
def test_nan_comparisons(self):
# comparisons involving signaling nans signal InvalidOperation
# order comparisons (<, <=, >, >=) involving only quiet nans
# also signal InvalidOperation
# equality comparisons (==, !=) involving only quiet nans
# don't signal, but return False or True respectively.
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
localcontext = self.decimal.localcontext
n = Decimal('NaN')
s = Decimal('sNaN')
i = Decimal('Inf')
f = Decimal('2')
qnan_pairs = (n, n), (n, i), (i, n), (n, f), (f, n)
snan_pairs = (s, n), (n, s), (s, i), (i, s), (s, f), (f, s), (s, s)
order_ops = operator.lt, operator.le, operator.gt, operator.ge
equality_ops = operator.eq, operator.ne
# results when InvalidOperation is not trapped
for x, y in qnan_pairs + snan_pairs:
for op in order_ops + equality_ops:
got = op(x, y)
expected = True if op is operator.ne else False
self.assertIs(expected, got,
"expected {0!r} for operator.{1}({2!r}, {3!r}); "
"got {4!r}".format(
expected, op.__name__, x, y, got))
# repeat the above, but this time trap the InvalidOperation
with localcontext() as ctx:
ctx.traps[InvalidOperation] = 1
for x, y in qnan_pairs:
for op in equality_ops:
got = op(x, y)
expected = True if op is operator.ne else False
self.assertIs(expected, got,
"expected {0!r} for "
"operator.{1}({2!r}, {3!r}); "
"got {4!r}".format(
expected, op.__name__, x, y, got))
for x, y in snan_pairs:
for op in equality_ops:
self.assertRaises(InvalidOperation, operator.eq, x, y)
self.assertRaises(InvalidOperation, operator.ne, x, y)
for x, y in qnan_pairs + snan_pairs:
for op in order_ops:
self.assertRaises(InvalidOperation, op, x, y)
def test_copy_sign(self):
Decimal = self.decimal.Decimal
d = Decimal(1).copy_sign(Decimal(-2))
self.assertEqual(Decimal(1).copy_sign(-2), d)
self.assertRaises(TypeError, Decimal(1).copy_sign, '-2')
class CArithmeticOperatorsTest(ArithmeticOperatorsTest):
decimal = C
class PyArithmeticOperatorsTest(ArithmeticOperatorsTest):
decimal = P
# The following are two functions used to test threading in the next class
def thfunc1(cls):
Decimal = cls.decimal.Decimal
InvalidOperation = cls.decimal.InvalidOperation
DivisionByZero = cls.decimal.DivisionByZero
Overflow = cls.decimal.Overflow
Underflow = cls.decimal.Underflow
Inexact = cls.decimal.Inexact
getcontext = cls.decimal.getcontext
localcontext = cls.decimal.localcontext
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
cls.finish1.set()
cls.synchro.wait()
test2 = d1/d3
with localcontext() as c2:
cls.assertTrue(c2.flags[Inexact])
cls.assertRaises(DivisionByZero, c2.divide, d1, 0)
cls.assertTrue(c2.flags[DivisionByZero])
with localcontext() as c3:
cls.assertTrue(c3.flags[Inexact])
cls.assertTrue(c3.flags[DivisionByZero])
cls.assertRaises(InvalidOperation, c3.compare, d1, Decimal('sNaN'))
cls.assertTrue(c3.flags[InvalidOperation])
del c3
cls.assertFalse(c2.flags[InvalidOperation])
del c2
cls.assertEqual(test1, Decimal('0.333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.333333333333333333333333'))
c1 = getcontext()
cls.assertTrue(c1.flags[Inexact])
for sig in Overflow, Underflow, DivisionByZero, InvalidOperation:
cls.assertFalse(c1.flags[sig])
def thfunc2(cls):
Decimal = cls.decimal.Decimal
InvalidOperation = cls.decimal.InvalidOperation
DivisionByZero = cls.decimal.DivisionByZero
Overflow = cls.decimal.Overflow
Underflow = cls.decimal.Underflow
Inexact = cls.decimal.Inexact
getcontext = cls.decimal.getcontext
localcontext = cls.decimal.localcontext
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
thiscontext = getcontext()
thiscontext.prec = 18
test2 = d1/d3
with localcontext() as c2:
cls.assertTrue(c2.flags[Inexact])
cls.assertRaises(Overflow, c2.multiply, Decimal('1e425000000'), 999)
cls.assertTrue(c2.flags[Overflow])
with localcontext(thiscontext) as c3:
cls.assertTrue(c3.flags[Inexact])
cls.assertFalse(c3.flags[Overflow])
c3.traps[Underflow] = True
cls.assertRaises(Underflow, c3.divide, Decimal('1e-425000000'), 999)
cls.assertTrue(c3.flags[Underflow])
del c3
cls.assertFalse(c2.flags[Underflow])
cls.assertFalse(c2.traps[Underflow])
del c2
cls.synchro.set()
cls.finish2.set()
cls.assertEqual(test1, Decimal('0.333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.333333333333333333'))
cls.assertFalse(thiscontext.traps[Underflow])
cls.assertTrue(thiscontext.flags[Inexact])
for sig in Overflow, Underflow, DivisionByZero, InvalidOperation:
cls.assertFalse(thiscontext.flags[sig])
class ThreadingTest(unittest.TestCase):
'''Unit tests for thread local contexts in Decimal.'''
# Take care executing this test from IDLE, there's an issue in threading
# that hangs IDLE and I couldn't find it
def test_threading(self):
DefaultContext = self.decimal.DefaultContext
if self.decimal == C and not self.decimal.HAVE_THREADS:
self.skipTest("compiled without threading")
# Test the "threading isolation" of a Context. Also test changing
# the DefaultContext, which acts as a template for the thread-local
# contexts.
save_prec = DefaultContext.prec
save_emax = DefaultContext.Emax
save_emin = DefaultContext.Emin
DefaultContext.prec = 24
DefaultContext.Emax = 425000000
DefaultContext.Emin = -425000000
self.synchro = threading.Event()
self.finish1 = threading.Event()
self.finish2 = threading.Event()
th1 = threading.Thread(target=thfunc1, args=(self,))
th2 = threading.Thread(target=thfunc2, args=(self,))
th1.start()
th2.start()
self.finish1.wait()
self.finish2.wait()
for sig in Signals[self.decimal]:
self.assertFalse(DefaultContext.flags[sig])
DefaultContext.prec = save_prec
DefaultContext.Emax = save_emax
DefaultContext.Emin = save_emin
@unittest.skipUnless(threading, 'threading required')
class CThreadingTest(ThreadingTest):
decimal = C
@unittest.skipUnless(threading, 'threading required')
class PyThreadingTest(ThreadingTest):
decimal = P
class UsabilityTest(unittest.TestCase):
'''Unit tests for Usability cases of Decimal.'''
def test_comparison_operators(self):
Decimal = self.decimal.Decimal
da = Decimal('23.42')
db = Decimal('23.42')
dc = Decimal('45')
#two Decimals
self.assertGreater(dc, da)
self.assertGreaterEqual(dc, da)
self.assertLess(da, dc)
self.assertLessEqual(da, dc)
self.assertEqual(da, db)
self.assertNotEqual(da, dc)
self.assertLessEqual(da, db)
self.assertGreaterEqual(da, db)
#a Decimal and an int
self.assertGreater(dc, 23)
self.assertLess(23, dc)
self.assertEqual(dc, 45)
#a Decimal and uncomparable
self.assertNotEqual(da, 'ugly')
self.assertNotEqual(da, 32.7)
self.assertNotEqual(da, object())
self.assertNotEqual(da, object)
# sortable
a = list(map(Decimal, range(100)))
b = a[:]
random.shuffle(a)
a.sort()
self.assertEqual(a, b)
def test_decimal_float_comparison(self):
Decimal = self.decimal.Decimal
da = Decimal('0.25')
db = Decimal('3.0')
self.assertLess(da, 3.0)
self.assertLessEqual(da, 3.0)
self.assertGreater(db, 0.25)
self.assertGreaterEqual(db, 0.25)
self.assertNotEqual(da, 1.5)
self.assertEqual(da, 0.25)
self.assertGreater(3.0, da)
self.assertGreaterEqual(3.0, da)
self.assertLess(0.25, db)
self.assertLessEqual(0.25, db)
self.assertNotEqual(0.25, db)
self.assertEqual(3.0, db)
self.assertNotEqual(0.1, Decimal('0.1'))
def test_decimal_complex_comparison(self):
Decimal = self.decimal.Decimal
da = Decimal('0.25')
db = Decimal('3.0')
self.assertNotEqual(da, (1.5+0j))
self.assertNotEqual((1.5+0j), da)
self.assertEqual(da, (0.25+0j))
self.assertEqual((0.25+0j), da)
self.assertEqual((3.0+0j), db)
self.assertEqual(db, (3.0+0j))
self.assertNotEqual(db, (3.0+1j))
self.assertNotEqual((3.0+1j), db)
self.assertIs(db.__lt__(3.0+0j), NotImplemented)
self.assertIs(db.__le__(3.0+0j), NotImplemented)
self.assertIs(db.__gt__(3.0+0j), NotImplemented)
self.assertIs(db.__le__(3.0+0j), NotImplemented)
def test_decimal_fraction_comparison(self):
D = self.decimal.Decimal
F = fractions[self.decimal].Fraction
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
emax = C.MAX_EMAX if C else 999999999
emin = C.MIN_EMIN if C else -999999999
etiny = C.MIN_ETINY if C else -1999999997
c = Context(Emax=emax, Emin=emin)
with localcontext(c):
c.prec = emax
self.assertLess(D(0), F(1,9999999999999999999999999999999999999))
self.assertLess(F(-1,9999999999999999999999999999999999999), D(0))
self.assertLess(F(0,1), D("1e" + str(etiny)))
self.assertLess(D("-1e" + str(etiny)), F(0,1))
self.assertLess(F(0,9999999999999999999999999), D("1e" + str(etiny)))
self.assertLess(D("-1e" + str(etiny)), F(0,9999999999999999999999999))
self.assertEqual(D("0.1"), F(1,10))
self.assertEqual(F(1,10), D("0.1"))
c.prec = 300
self.assertNotEqual(D(1)/3, F(1,3))
self.assertNotEqual(F(1,3), D(1)/3)
self.assertLessEqual(F(120984237, 9999999999), D("9e" + str(emax)))
self.assertGreaterEqual(D("9e" + str(emax)), F(120984237, 9999999999))
self.assertGreater(D('inf'), F(99999999999,123))
self.assertGreater(D('inf'), F(-99999999999,123))
self.assertLess(D('-inf'), F(99999999999,123))
self.assertLess(D('-inf'), F(-99999999999,123))
self.assertRaises(InvalidOperation, D('nan').__gt__, F(-9,123))
self.assertIs(NotImplemented, F(-9,123).__lt__(D('nan')))
self.assertNotEqual(D('nan'), F(-9,123))
self.assertNotEqual(F(-9,123), D('nan'))
def test_copy_and_deepcopy_methods(self):
Decimal = self.decimal.Decimal
d = Decimal('43.24')
c = copy.copy(d)
self.assertEqual(id(c), id(d))
dc = copy.deepcopy(d)
self.assertEqual(id(dc), id(d))
def test_hash_method(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
def hashit(d):
a = hash(d)
b = d.__hash__()
self.assertEqual(a, b)
return a
#just that it's hashable
hashit(Decimal(23))
hashit(Decimal('Infinity'))
hashit(Decimal('-Infinity'))
hashit(Decimal('nan123'))
hashit(Decimal('-NaN'))
test_values = [Decimal(sign*(2**m + n))
for m in [0, 14, 15, 16, 17, 30, 31,
32, 33, 61, 62, 63, 64, 65, 66]
for n in range(-10, 10)
for sign in [-1, 1]]
test_values.extend([
Decimal("-1"), # ==> -2
Decimal("-0"), # zeros
Decimal("0.00"),
Decimal("-0.000"),
Decimal("0E10"),
Decimal("-0E12"),
Decimal("10.0"), # negative exponent
Decimal("-23.00000"),
Decimal("1230E100"), # positive exponent
Decimal("-4.5678E50"),
# a value for which hash(n) != hash(n % (2**64-1))
# in Python pre-2.6
Decimal(2**64 + 2**32 - 1),
# selection of values which fail with the old (before
# version 2.6) long.__hash__
Decimal("1.634E100"),
Decimal("90.697E100"),
Decimal("188.83E100"),
Decimal("1652.9E100"),
Decimal("56531E100"),
])
# check that hash(d) == hash(int(d)) for integral values
for value in test_values:
self.assertEqual(hashit(value), hashit(int(value)))
#the same hash that to an int
self.assertEqual(hashit(Decimal(23)), hashit(23))
self.assertRaises(TypeError, hash, Decimal('sNaN'))
self.assertTrue(hashit(Decimal('Inf')))
self.assertTrue(hashit(Decimal('-Inf')))
# check that the hashes of a Decimal float match when they
# represent exactly the same values
test_strings = ['inf', '-Inf', '0.0', '-.0e1',
'34.0', '2.5', '112390.625', '-0.515625']
for s in test_strings:
f = float(s)
d = Decimal(s)
self.assertEqual(hashit(f), hashit(d))
with localcontext() as c:
# check that the value of the hash doesn't depend on the
# current context (issue #1757)
x = Decimal("123456789.1")
c.prec = 6
h1 = hashit(x)
c.prec = 10
h2 = hashit(x)
c.prec = 16
h3 = hashit(x)
self.assertEqual(h1, h2)
self.assertEqual(h1, h3)
c.prec = 10000
x = 1100 ** 1248
self.assertEqual(hashit(Decimal(x)), hashit(x))
def test_min_and_max_methods(self):
Decimal = self.decimal.Decimal
d1 = Decimal('15.32')
d2 = Decimal('28.5')
l1 = 15
l2 = 28
#between Decimals
self.assertIs(min(d1,d2), d1)
self.assertIs(min(d2,d1), d1)
self.assertIs(max(d1,d2), d2)
self.assertIs(max(d2,d1), d2)
#between Decimal and int
self.assertIs(min(d1,l2), d1)
self.assertIs(min(l2,d1), d1)
self.assertIs(max(l1,d2), d2)
self.assertIs(max(d2,l1), d2)
def test_as_nonzero(self):
Decimal = self.decimal.Decimal
#as false
self.assertFalse(Decimal(0))
#as true
self.assertTrue(Decimal('0.372'))
def test_tostring_methods(self):
#Test str and repr methods.
Decimal = self.decimal.Decimal
d = Decimal('15.32')
self.assertEqual(str(d), '15.32') # str
self.assertEqual(repr(d), "Decimal('15.32')") # repr
def test_tonum_methods(self):
#Test float and int methods.
Decimal = self.decimal.Decimal
d1 = Decimal('66')
d2 = Decimal('15.32')
#int
self.assertEqual(int(d1), 66)
self.assertEqual(int(d2), 15)
#float
self.assertEqual(float(d1), 66)
self.assertEqual(float(d2), 15.32)
#floor
test_pairs = [
('123.00', 123),
('3.2', 3),
('3.54', 3),
('3.899', 3),
('-2.3', -3),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('89891211712379812736.1', 89891211712379812736),
]
for d, i in test_pairs:
self.assertEqual(math.floor(Decimal(d)), i)
self.assertRaises(ValueError, math.floor, Decimal('-NaN'))
self.assertRaises(ValueError, math.floor, Decimal('sNaN'))
self.assertRaises(ValueError, math.floor, Decimal('NaN123'))
self.assertRaises(OverflowError, math.floor, Decimal('Inf'))
self.assertRaises(OverflowError, math.floor, Decimal('-Inf'))
#ceiling
test_pairs = [
('123.00', 123),
('3.2', 4),
('3.54', 4),
('3.899', 4),
('-2.3', -2),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('89891211712379812736.1', 89891211712379812737),
]
for d, i in test_pairs:
self.assertEqual(math.ceil(Decimal(d)), i)
self.assertRaises(ValueError, math.ceil, Decimal('-NaN'))
self.assertRaises(ValueError, math.ceil, Decimal('sNaN'))
self.assertRaises(ValueError, math.ceil, Decimal('NaN123'))
self.assertRaises(OverflowError, math.ceil, Decimal('Inf'))
self.assertRaises(OverflowError, math.ceil, Decimal('-Inf'))
#round, single argument
test_pairs = [
('123.00', 123),
('3.2', 3),
('3.54', 4),
('3.899', 4),
('-2.3', -2),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('-3.5', -4),
('-2.5', -2),
('-1.5', -2),
('-0.5', 0),
('0.5', 0),
('1.5', 2),
('2.5', 2),
('3.5', 4),
]
for d, i in test_pairs:
self.assertEqual(round(Decimal(d)), i)
self.assertRaises(ValueError, round, Decimal('-NaN'))
self.assertRaises(ValueError, round, Decimal('sNaN'))
self.assertRaises(ValueError, round, Decimal('NaN123'))
self.assertRaises(OverflowError, round, Decimal('Inf'))
self.assertRaises(OverflowError, round, Decimal('-Inf'))
#round, two arguments; this is essentially equivalent
#to quantize, which is already extensively tested
test_triples = [
('123.456', -4, '0E+4'),
('123.456', -3, '0E+3'),
('123.456', -2, '1E+2'),
('123.456', -1, '1.2E+2'),
('123.456', 0, '123'),
('123.456', 1, '123.5'),
('123.456', 2, '123.46'),
('123.456', 3, '123.456'),
('123.456', 4, '123.4560'),
('123.455', 2, '123.46'),
('123.445', 2, '123.44'),
('Inf', 4, 'NaN'),
('-Inf', -23, 'NaN'),
('sNaN314', 3, 'NaN314'),
]
for d, n, r in test_triples:
self.assertEqual(str(round(Decimal(d), n)), r)
def test_nan_to_float(self):
# Test conversions of decimal NANs to float.
# See http://bugs.python.org/issue15544
Decimal = self.decimal.Decimal
for s in ('nan', 'nan1234', '-nan', '-nan2468'):
f = float(Decimal(s))
self.assertTrue(math.isnan(f))
sign = math.copysign(1.0, f)
self.assertEqual(sign, -1.0 if s.startswith('-') else 1.0)
def test_snan_to_float(self):
Decimal = self.decimal.Decimal
for s in ('snan', '-snan', 'snan1357', '-snan1234'):
d = Decimal(s)
self.assertRaises(ValueError, float, d)
def test_eval_round_trip(self):
Decimal = self.decimal.Decimal
#with zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(d, eval(repr(d)))
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(d, eval(repr(d)))
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(d, eval(repr(d)))
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(d, eval(repr(d)))
def test_as_tuple(self):
Decimal = self.decimal.Decimal
#with zero
d = Decimal(0)
self.assertEqual(d.as_tuple(), (0, (0,), 0) )
#int
d = Decimal(-45)
self.assertEqual(d.as_tuple(), (1, (4, 5), 0) )
#complicated string
d = Decimal("-4.34913534E-17")
self.assertEqual(d.as_tuple(), (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
# The '0' coefficient is implementation specific to decimal.py.
# It has no meaning in the C-version and is ignored there.
d = Decimal("Infinity")
self.assertEqual(d.as_tuple(), (0, (0,), 'F') )
#leading zeros in coefficient should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), -2) )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), -2) )
d = Decimal( (1, (0, 0, 0), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
d = Decimal( (1, (), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
#leading zeros in NaN diagnostic info should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), 'n') )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), 'n') )
d = Decimal( (1, (0, 0, 0), 'N') )
self.assertEqual(d.as_tuple(), (1, (), 'N') )
d = Decimal( (1, (), 'n') )
self.assertEqual(d.as_tuple(), (1, (), 'n') )
# For infinities, decimal.py has always silently accepted any
# coefficient tuple.
d = Decimal( (0, (0,), 'F') )
self.assertEqual(d.as_tuple(), (0, (0,), 'F'))
d = Decimal( (0, (4, 5, 3, 4), 'F') )
self.assertEqual(d.as_tuple(), (0, (0,), 'F'))
d = Decimal( (1, (0, 2, 7, 1), 'F') )
self.assertEqual(d.as_tuple(), (1, (0,), 'F'))
def test_as_integer_ratio(self):
Decimal = self.decimal.Decimal
# exceptional cases
self.assertRaises(OverflowError,
Decimal.as_integer_ratio, Decimal('inf'))
self.assertRaises(OverflowError,
Decimal.as_integer_ratio, Decimal('-inf'))
self.assertRaises(ValueError,
Decimal.as_integer_ratio, Decimal('-nan'))
self.assertRaises(ValueError,
Decimal.as_integer_ratio, Decimal('snan123'))
for exp in range(-4, 2):
for coeff in range(1000):
for sign in '+', '-':
d = Decimal('%s%dE%d' % (sign, coeff, exp))
pq = d.as_integer_ratio()
p, q = pq
# check return type
self.assertIsInstance(pq, tuple)
self.assertIsInstance(p, int)
self.assertIsInstance(q, int)
# check normalization: q should be positive;
# p should be relatively prime to q.
self.assertGreater(q, 0)
self.assertEqual(math.gcd(p, q), 1)
# check that p/q actually gives the correct value
self.assertEqual(Decimal(p) / Decimal(q), d)
def test_subclassing(self):
# Different behaviours when subclassing Decimal
Decimal = self.decimal.Decimal
class MyDecimal(Decimal):
y = None
d1 = MyDecimal(1)
d2 = MyDecimal(2)
d = d1 + d2
self.assertIs(type(d), Decimal)
d = d1.max(d2)
self.assertIs(type(d), Decimal)
d = copy.copy(d1)
self.assertIs(type(d), MyDecimal)
self.assertEqual(d, d1)
d = copy.deepcopy(d1)
self.assertIs(type(d), MyDecimal)
self.assertEqual(d, d1)
# Decimal(Decimal)
d = Decimal('1.0')
x = Decimal(d)
self.assertIs(type(x), Decimal)
self.assertEqual(x, d)
# MyDecimal(Decimal)
m = MyDecimal(d)
self.assertIs(type(m), MyDecimal)
self.assertEqual(m, d)
self.assertIs(m.y, None)
# Decimal(MyDecimal)
x = Decimal(m)
self.assertIs(type(x), Decimal)
self.assertEqual(x, d)
# MyDecimal(MyDecimal)
m.y = 9
x = MyDecimal(m)
self.assertIs(type(x), MyDecimal)
self.assertEqual(x, d)
self.assertIs(x.y, None)
def test_implicit_context(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
# Check results when context given implicitly. (Issue 2478)
c = getcontext()
self.assertEqual(str(Decimal(0).sqrt()),
str(c.sqrt(Decimal(0))))
def test_none_args(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
Underflow = self.decimal.Underflow
Subnormal = self.decimal.Subnormal
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
Clamped = self.decimal.Clamped
with localcontext(Context()) as c:
c.prec = 7
c.Emax = 999
c.Emin = -999
x = Decimal("111")
y = Decimal("1e9999")
z = Decimal("1e-9999")
##### Unary functions
c.clear_flags()
self.assertEqual(str(x.exp(context=None)), '1.609487E+48')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(Overflow, y.exp, context=None)
self.assertTrue(c.flags[Overflow])
self.assertIs(z.is_normal(context=None), False)
self.assertIs(z.is_subnormal(context=None), True)
c.clear_flags()
self.assertEqual(str(x.ln(context=None)), '4.709530')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal(-1).ln, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(x.log10(context=None)), '2.045323')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal(-1).log10, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(x.logb(context=None)), '2')
self.assertRaises(DivisionByZero, Decimal(0).logb, context=None)
self.assertTrue(c.flags[DivisionByZero])
c.clear_flags()
self.assertEqual(str(x.logical_invert(context=None)), '1111000')
self.assertRaises(InvalidOperation, y.logical_invert, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(y.next_minus(context=None)), '9.999999E+999')
self.assertRaises(InvalidOperation, Decimal('sNaN').next_minus, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(y.next_plus(context=None)), 'Infinity')
self.assertRaises(InvalidOperation, Decimal('sNaN').next_plus, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(z.normalize(context=None)), '0')
self.assertRaises(Overflow, y.normalize, context=None)
self.assertTrue(c.flags[Overflow])
self.assertEqual(str(z.number_class(context=None)), '+Subnormal')
c.clear_flags()
self.assertEqual(str(z.sqrt(context=None)), '0E-1005')
self.assertTrue(c.flags[Clamped])
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
self.assertTrue(c.flags[Subnormal])
self.assertTrue(c.flags[Underflow])
c.clear_flags()
self.assertRaises(Overflow, y.sqrt, context=None)
self.assertTrue(c.flags[Overflow])
c.capitals = 0
self.assertEqual(str(z.to_eng_string(context=None)), '1e-9999')
c.capitals = 1
##### Binary functions
c.clear_flags()
ans = str(x.compare(Decimal('Nan891287828'), context=None))
self.assertEqual(ans, 'NaN1287828')
self.assertRaises(InvalidOperation, x.compare, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.compare_signal(8224, context=None))
self.assertEqual(ans, '-1')
self.assertRaises(InvalidOperation, x.compare_signal, Decimal('NaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_and(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.logical_and, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_or(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.logical_or, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_xor(101, context=None))
self.assertEqual(ans, '10')
self.assertRaises(InvalidOperation, x.logical_xor, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.max(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.max, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.max_mag(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.max_mag, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.min(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.min, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.min_mag(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.min_mag, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.remainder_near(101, context=None))
self.assertEqual(ans, '10')
self.assertRaises(InvalidOperation, y.remainder_near, 101, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.rotate(2, context=None))
self.assertEqual(ans, '11100')
self.assertRaises(InvalidOperation, x.rotate, 101, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.scaleb(7, context=None))
self.assertEqual(ans, '1.11E+9')
self.assertRaises(InvalidOperation, x.scaleb, 10000, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.shift(2, context=None))
self.assertEqual(ans, '11100')
self.assertRaises(InvalidOperation, x.shift, 10000, context=None)
self.assertTrue(c.flags[InvalidOperation])
##### Ternary functions
c.clear_flags()
ans = str(x.fma(2, 3, context=None))
self.assertEqual(ans, '225')
self.assertRaises(Overflow, x.fma, Decimal('1e9999'), 3, context=None)
self.assertTrue(c.flags[Overflow])
##### Special cases
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral_value(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral_value(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral_value(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral_value, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral_exact(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral_exact(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral_exact(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral_exact, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_UP
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=None, context=None))
self.assertEqual(ans, '1.501')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=None, context=None))
self.assertEqual(ans, '1.500')
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=ROUND_UP, context=None))
self.assertEqual(ans, '1.501')
c.clear_flags()
self.assertRaises(InvalidOperation, y.quantize, Decimal('1e-10'), rounding=ROUND_UP, context=None)
self.assertTrue(c.flags[InvalidOperation])
with localcontext(Context()) as context:
context.prec = 7
context.Emax = 999
context.Emin = -999
with localcontext(ctx=None) as c:
self.assertEqual(c.prec, 7)
self.assertEqual(c.Emax, 999)
self.assertEqual(c.Emin, -999)
def test_conversions_from_int(self):
# Check that methods taking a second Decimal argument will
# always accept an integer in place of a Decimal.
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(4).compare(3),
Decimal(4).compare(Decimal(3)))
self.assertEqual(Decimal(4).compare_signal(3),
Decimal(4).compare_signal(Decimal(3)))
self.assertEqual(Decimal(4).compare_total(3),
Decimal(4).compare_total(Decimal(3)))
self.assertEqual(Decimal(4).compare_total_mag(3),
Decimal(4).compare_total_mag(Decimal(3)))
self.assertEqual(Decimal(10101).logical_and(1001),
Decimal(10101).logical_and(Decimal(1001)))
self.assertEqual(Decimal(10101).logical_or(1001),
Decimal(10101).logical_or(Decimal(1001)))
self.assertEqual(Decimal(10101).logical_xor(1001),
Decimal(10101).logical_xor(Decimal(1001)))
self.assertEqual(Decimal(567).max(123),
Decimal(567).max(Decimal(123)))
self.assertEqual(Decimal(567).max_mag(123),
Decimal(567).max_mag(Decimal(123)))
self.assertEqual(Decimal(567).min(123),
Decimal(567).min(Decimal(123)))
self.assertEqual(Decimal(567).min_mag(123),
Decimal(567).min_mag(Decimal(123)))
self.assertEqual(Decimal(567).next_toward(123),
Decimal(567).next_toward(Decimal(123)))
self.assertEqual(Decimal(1234).quantize(100),
Decimal(1234).quantize(Decimal(100)))
self.assertEqual(Decimal(768).remainder_near(1234),
Decimal(768).remainder_near(Decimal(1234)))
self.assertEqual(Decimal(123).rotate(1),
Decimal(123).rotate(Decimal(1)))
self.assertEqual(Decimal(1234).same_quantum(1000),
Decimal(1234).same_quantum(Decimal(1000)))
self.assertEqual(Decimal('9.123').scaleb(-100),
Decimal('9.123').scaleb(Decimal(-100)))
self.assertEqual(Decimal(456).shift(-1),
Decimal(456).shift(Decimal(-1)))
self.assertEqual(Decimal(-12).fma(Decimal(45), 67),
Decimal(-12).fma(Decimal(45), Decimal(67)))
self.assertEqual(Decimal(-12).fma(45, 67),
Decimal(-12).fma(Decimal(45), Decimal(67)))
self.assertEqual(Decimal(-12).fma(45, Decimal(67)),
Decimal(-12).fma(Decimal(45), Decimal(67)))
class CUsabilityTest(UsabilityTest):
decimal = C
class PyUsabilityTest(UsabilityTest):
decimal = P
class PythonAPItests(unittest.TestCase):
def test_abc(self):
Decimal = self.decimal.Decimal
self.assertTrue(issubclass(Decimal, numbers.Number))
self.assertFalse(issubclass(Decimal, numbers.Real))
self.assertIsInstance(Decimal(0), numbers.Number)
self.assertNotIsInstance(Decimal(0), numbers.Real)
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
Decimal = self.decimal.Decimal
savedecimal = sys.modules['decimal']
# Round trip
sys.modules['decimal'] = self.decimal
d = Decimal('-3.141590000')
p = pickle.dumps(d, proto)
e = pickle.loads(p)
self.assertEqual(d, e)
if C:
# Test interchangeability
x = C.Decimal('-3.123e81723')
y = P.Decimal('-3.123e81723')
sys.modules['decimal'] = C
sx = pickle.dumps(x, proto)
sys.modules['decimal'] = P
r = pickle.loads(sx)
self.assertIsInstance(r, P.Decimal)
self.assertEqual(r, y)
sys.modules['decimal'] = P
sy = pickle.dumps(y, proto)
sys.modules['decimal'] = C
r = pickle.loads(sy)
self.assertIsInstance(r, C.Decimal)
self.assertEqual(r, x)
x = C.Decimal('-3.123e81723').as_tuple()
y = P.Decimal('-3.123e81723').as_tuple()
sys.modules['decimal'] = C
sx = pickle.dumps(x, proto)
sys.modules['decimal'] = P
r = pickle.loads(sx)
self.assertIsInstance(r, P.DecimalTuple)
self.assertEqual(r, y)
sys.modules['decimal'] = P
sy = pickle.dumps(y, proto)
sys.modules['decimal'] = C
r = pickle.loads(sy)
self.assertIsInstance(r, C.DecimalTuple)
self.assertEqual(r, x)
sys.modules['decimal'] = savedecimal
def test_int(self):
Decimal = self.decimal.Decimal
for x in range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(int(d)), r)
self.assertRaises(ValueError, int, Decimal('-nan'))
self.assertRaises(ValueError, int, Decimal('snan'))
self.assertRaises(OverflowError, int, Decimal('inf'))
self.assertRaises(OverflowError, int, Decimal('-inf'))
def test_trunc(self):
Decimal = self.decimal.Decimal
for x in range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(math.trunc(d)), r)
def test_from_float(self):
Decimal = self.decimal.Decimal
class MyDecimal(Decimal):
def __init__(self, _):
self.x = 'y'
self.assertTrue(issubclass(MyDecimal, Decimal))
r = MyDecimal.from_float(0.1)
self.assertEqual(type(r), MyDecimal)
self.assertEqual(str(r),
'0.1000000000000000055511151231257827021181583404541015625')
self.assertEqual(r.x, 'y')
bigint = 12345678901234567890123456789
self.assertEqual(MyDecimal.from_float(bigint), MyDecimal(bigint))
self.assertTrue(MyDecimal.from_float(float('nan')).is_qnan())
self.assertTrue(MyDecimal.from_float(float('inf')).is_infinite())
self.assertTrue(MyDecimal.from_float(float('-inf')).is_infinite())
self.assertEqual(str(MyDecimal.from_float(float('nan'))),
str(Decimal('NaN')))
self.assertEqual(str(MyDecimal.from_float(float('inf'))),
str(Decimal('Infinity')))
self.assertEqual(str(MyDecimal.from_float(float('-inf'))),
str(Decimal('-Infinity')))
self.assertRaises(TypeError, MyDecimal.from_float, 'abc')
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(MyDecimal.from_float(x))) # roundtrip
def test_create_decimal_from_float(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
context = Context(prec=5, rounding=ROUND_DOWN)
self.assertEqual(
context.create_decimal_from_float(math.pi),
Decimal('3.1415')
)
context = Context(prec=5, rounding=ROUND_UP)
self.assertEqual(
context.create_decimal_from_float(math.pi),
Decimal('3.1416')
)
context = Context(prec=5, traps=[Inexact])
self.assertRaises(
Inexact,
context.create_decimal_from_float,
math.pi
)
self.assertEqual(repr(context.create_decimal_from_float(-0.0)),
"Decimal('-0')")
self.assertEqual(repr(context.create_decimal_from_float(1.0)),
"Decimal('1')")
self.assertEqual(repr(context.create_decimal_from_float(10)),
"Decimal('10')")
def test_quantize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
InvalidOperation = self.decimal.InvalidOperation
c = Context(Emax=99999, Emin=-99999)
self.assertEqual(
Decimal('7.335').quantize(Decimal('.01')),
Decimal('7.34')
)
self.assertEqual(
Decimal('7.335').quantize(Decimal('.01'), rounding=ROUND_DOWN),
Decimal('7.33')
)
self.assertRaises(
InvalidOperation,
Decimal("10e99999").quantize, Decimal('1e100000'), context=c
)
c = Context()
d = Decimal("0.871831e800")
x = d.quantize(context=c, exp=Decimal("1e797"), rounding=ROUND_DOWN)
self.assertEqual(x, Decimal('8.71E+799'))
def test_complex(self):
Decimal = self.decimal.Decimal
x = Decimal("9.8182731e181273")
self.assertEqual(x.real, x)
self.assertEqual(x.imag, 0)
self.assertEqual(x.conjugate(), x)
x = Decimal("1")
self.assertEqual(complex(x), complex(float(1)))
self.assertRaises(AttributeError, setattr, x, 'real', 100)
self.assertRaises(AttributeError, setattr, x, 'imag', 100)
self.assertRaises(AttributeError, setattr, x, 'conjugate', 100)
self.assertRaises(AttributeError, setattr, x, '__complex__', 100)
def test_named_parameters(self):
D = self.decimal.Decimal
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
Overflow = self.decimal.Overflow
xc = Context()
xc.prec = 1
xc.Emax = 1
xc.Emin = -1
with localcontext() as c:
c.clear_flags()
self.assertEqual(D(9, xc), 9)
self.assertEqual(D(9, context=xc), 9)
self.assertEqual(D(context=xc, value=9), 9)
self.assertEqual(D(context=xc), 0)
xc.clear_flags()
self.assertRaises(InvalidOperation, D, "xyz", context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
xc.clear_flags()
self.assertEqual(D(2).exp(context=xc), 7)
self.assertRaises(Overflow, D(8).exp, context=xc)
self.assertTrue(xc.flags[Overflow])
self.assertFalse(c.flags[Overflow])
xc.clear_flags()
self.assertEqual(D(2).ln(context=xc), D('0.7'))
self.assertRaises(InvalidOperation, D(-1).ln, context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D(0).log10(context=xc), D('-inf'))
self.assertEqual(D(-1).next_minus(context=xc), -2)
self.assertEqual(D(-1).next_plus(context=xc), D('-0.9'))
self.assertEqual(D("9.73").normalize(context=xc), D('1E+1'))
self.assertEqual(D("9999").to_integral(context=xc), 9999)
self.assertEqual(D("-2000").to_integral_exact(context=xc), -2000)
self.assertEqual(D("123").to_integral_value(context=xc), 123)
self.assertEqual(D("0.0625").sqrt(context=xc), D('0.2'))
self.assertEqual(D("0.0625").compare(context=xc, other=3), -1)
xc.clear_flags()
self.assertRaises(InvalidOperation,
D("0").compare_signal, D('nan'), context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D("0.01").max(D('0.0101'), context=xc), D('0.0'))
self.assertEqual(D("0.01").max(D('0.0101'), context=xc), D('0.0'))
self.assertEqual(D("0.2").max_mag(D('-0.3'), context=xc),
D('-0.3'))
self.assertEqual(D("0.02").min(D('-0.03'), context=xc), D('-0.0'))
self.assertEqual(D("0.02").min_mag(D('-0.03'), context=xc),
D('0.0'))
self.assertEqual(D("0.2").next_toward(D('-1'), context=xc), D('0.1'))
xc.clear_flags()
self.assertRaises(InvalidOperation,
D("0.2").quantize, D('1e10'), context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D("9.99").remainder_near(D('1.5'), context=xc),
D('-0.5'))
self.assertEqual(D("9.9").fma(third=D('0.9'), context=xc, other=7),
D('7E+1'))
self.assertRaises(TypeError, D(1).is_canonical, context=xc)
self.assertRaises(TypeError, D(1).is_finite, context=xc)
self.assertRaises(TypeError, D(1).is_infinite, context=xc)
self.assertRaises(TypeError, D(1).is_nan, context=xc)
self.assertRaises(TypeError, D(1).is_qnan, context=xc)
self.assertRaises(TypeError, D(1).is_snan, context=xc)
self.assertRaises(TypeError, D(1).is_signed, context=xc)
self.assertRaises(TypeError, D(1).is_zero, context=xc)
self.assertFalse(D("0.01").is_normal(context=xc))
self.assertTrue(D("0.01").is_subnormal(context=xc))
self.assertRaises(TypeError, D(1).adjusted, context=xc)
self.assertRaises(TypeError, D(1).conjugate, context=xc)
self.assertRaises(TypeError, D(1).radix, context=xc)
self.assertEqual(D(-111).logb(context=xc), 2)
self.assertEqual(D(0).logical_invert(context=xc), 1)
self.assertEqual(D('0.01').number_class(context=xc), '+Subnormal')
self.assertEqual(D('0.21').to_eng_string(context=xc), '0.21')
self.assertEqual(D('11').logical_and(D('10'), context=xc), 0)
self.assertEqual(D('11').logical_or(D('10'), context=xc), 1)
self.assertEqual(D('01').logical_xor(D('10'), context=xc), 1)
self.assertEqual(D('23').rotate(1, context=xc), 3)
self.assertEqual(D('23').rotate(1, context=xc), 3)
xc.clear_flags()
self.assertRaises(Overflow,
D('23').scaleb, 1, context=xc)
self.assertTrue(xc.flags[Overflow])
self.assertFalse(c.flags[Overflow])
self.assertEqual(D('23').shift(-1, context=xc), 0)
self.assertRaises(TypeError, D.from_float, 1.1, context=xc)
self.assertRaises(TypeError, D(0).as_tuple, context=xc)
self.assertEqual(D(1).canonical(), 1)
self.assertRaises(TypeError, D("-1").copy_abs, context=xc)
self.assertRaises(TypeError, D("-1").copy_negate, context=xc)
self.assertRaises(TypeError, D(1).canonical, context="x")
self.assertRaises(TypeError, D(1).canonical, xyz="x")
def test_exception_hierarchy(self):
decimal = self.decimal
DecimalException = decimal.DecimalException
InvalidOperation = decimal.InvalidOperation
FloatOperation = decimal.FloatOperation
DivisionByZero = decimal.DivisionByZero
Overflow = decimal.Overflow
Underflow = decimal.Underflow
Subnormal = decimal.Subnormal
Inexact = decimal.Inexact
Rounded = decimal.Rounded
Clamped = decimal.Clamped
self.assertTrue(issubclass(DecimalException, ArithmeticError))
self.assertTrue(issubclass(InvalidOperation, DecimalException))
self.assertTrue(issubclass(FloatOperation, DecimalException))
self.assertTrue(issubclass(FloatOperation, TypeError))
self.assertTrue(issubclass(DivisionByZero, DecimalException))
self.assertTrue(issubclass(DivisionByZero, ZeroDivisionError))
self.assertTrue(issubclass(Overflow, Rounded))
self.assertTrue(issubclass(Overflow, Inexact))
self.assertTrue(issubclass(Overflow, DecimalException))
self.assertTrue(issubclass(Underflow, Inexact))
self.assertTrue(issubclass(Underflow, Rounded))
self.assertTrue(issubclass(Underflow, Subnormal))
self.assertTrue(issubclass(Underflow, DecimalException))
self.assertTrue(issubclass(Subnormal, DecimalException))
self.assertTrue(issubclass(Inexact, DecimalException))
self.assertTrue(issubclass(Rounded, DecimalException))
self.assertTrue(issubclass(Clamped, DecimalException))
self.assertTrue(issubclass(decimal.ConversionSyntax, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionImpossible, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionUndefined, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionUndefined, ZeroDivisionError))
self.assertTrue(issubclass(decimal.InvalidContext, InvalidOperation))
class CPythonAPItests(PythonAPItests):
decimal = C
class PyPythonAPItests(PythonAPItests):
decimal = P
class ContextAPItests(unittest.TestCase):
def test_none_args(self):
Context = self.decimal.Context
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
c1 = Context()
c2 = Context(prec=None, rounding=None, Emax=None, Emin=None,
capitals=None, clamp=None, flags=None, traps=None)
for c in [c1, c2]:
self.assertEqual(c.prec, 28)
self.assertEqual(c.rounding, ROUND_HALF_EVEN)
self.assertEqual(c.Emax, 999999)
self.assertEqual(c.Emin, -999999)
self.assertEqual(c.capitals, 1)
self.assertEqual(c.clamp, 0)
assert_signals(self, c, 'flags', [])
assert_signals(self, c, 'traps', [InvalidOperation, DivisionByZero,
Overflow])
@cpython_only
def test_from_legacy_strings(self):
import _testcapi
c = self.decimal.Context()
for rnd in RoundingModes:
c.rounding = _testcapi.unicode_legacy_string(rnd)
self.assertEqual(c.rounding, rnd)
s = _testcapi.unicode_legacy_string('')
self.assertRaises(TypeError, setattr, c, 'rounding', s)
s = _testcapi.unicode_legacy_string('ROUND_\x00UP')
self.assertRaises(TypeError, setattr, c, 'rounding', s)
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
Context = self.decimal.Context
savedecimal = sys.modules['decimal']
# Round trip
sys.modules['decimal'] = self.decimal
c = Context()
e = pickle.loads(pickle.dumps(c, proto))
self.assertEqual(c.prec, e.prec)
self.assertEqual(c.Emin, e.Emin)
self.assertEqual(c.Emax, e.Emax)
self.assertEqual(c.rounding, e.rounding)
self.assertEqual(c.capitals, e.capitals)
self.assertEqual(c.clamp, e.clamp)
self.assertEqual(c.flags, e.flags)
self.assertEqual(c.traps, e.traps)
# Test interchangeability
combinations = [(C, P), (P, C)] if C else [(P, P)]
for dumper, loader in combinations:
for ri, _ in enumerate(RoundingModes):
for fi, _ in enumerate(OrderedSignals[dumper]):
for ti, _ in enumerate(OrderedSignals[dumper]):
prec = random.randrange(1, 100)
emin = random.randrange(-100, 0)
emax = random.randrange(1, 100)
caps = random.randrange(2)
clamp = random.randrange(2)
# One module dumps
sys.modules['decimal'] = dumper
c = dumper.Context(
prec=prec, Emin=emin, Emax=emax,
rounding=RoundingModes[ri],
capitals=caps, clamp=clamp,
flags=OrderedSignals[dumper][:fi],
traps=OrderedSignals[dumper][:ti]
)
s = pickle.dumps(c, proto)
# The other module loads
sys.modules['decimal'] = loader
d = pickle.loads(s)
self.assertIsInstance(d, loader.Context)
self.assertEqual(d.prec, prec)
self.assertEqual(d.Emin, emin)
self.assertEqual(d.Emax, emax)
self.assertEqual(d.rounding, RoundingModes[ri])
self.assertEqual(d.capitals, caps)
self.assertEqual(d.clamp, clamp)
assert_signals(self, d, 'flags', OrderedSignals[loader][:fi])
assert_signals(self, d, 'traps', OrderedSignals[loader][:ti])
sys.modules['decimal'] = savedecimal
def test_equality_with_other_types(self):
Decimal = self.decimal.Decimal
self.assertIn(Decimal(10), ['a', 1.0, Decimal(10), (1,2), {}])
self.assertNotIn(Decimal(10), ['a', 1.0, (1,2), {}])
def test_copy(self):
# All copies should be deep
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy()
self.assertNotEqual(id(c), id(d))
self.assertNotEqual(id(c.flags), id(d.flags))
self.assertNotEqual(id(c.traps), id(d.traps))
k1 = set(c.flags.keys())
k2 = set(d.flags.keys())
self.assertEqual(k1, k2)
self.assertEqual(c.flags, d.flags)
def test__clamp(self):
# In Python 3.2, the private attribute `_clamp` was made
# public (issue 8540), with the old `_clamp` becoming a
# property wrapping `clamp`. For the duration of Python 3.2
# only, the attribute should be gettable/settable via both
# `clamp` and `_clamp`; in Python 3.3, `_clamp` should be
# removed.
Context = self.decimal.Context
c = Context()
self.assertRaises(AttributeError, getattr, c, '_clamp')
def test_abs(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.abs(Decimal(-1))
self.assertEqual(c.abs(-1), d)
self.assertRaises(TypeError, c.abs, '-1')
def test_add(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.add(Decimal(1), Decimal(1))
self.assertEqual(c.add(1, 1), d)
self.assertEqual(c.add(Decimal(1), 1), d)
self.assertEqual(c.add(1, Decimal(1)), d)
self.assertRaises(TypeError, c.add, '1', 1)
self.assertRaises(TypeError, c.add, 1, '1')
def test_compare(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare(Decimal(1), Decimal(1))
self.assertEqual(c.compare(1, 1), d)
self.assertEqual(c.compare(Decimal(1), 1), d)
self.assertEqual(c.compare(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare, '1', 1)
self.assertRaises(TypeError, c.compare, 1, '1')
def test_compare_signal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_signal(Decimal(1), Decimal(1))
self.assertEqual(c.compare_signal(1, 1), d)
self.assertEqual(c.compare_signal(Decimal(1), 1), d)
self.assertEqual(c.compare_signal(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_signal, '1', 1)
self.assertRaises(TypeError, c.compare_signal, 1, '1')
def test_compare_total(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_total(Decimal(1), Decimal(1))
self.assertEqual(c.compare_total(1, 1), d)
self.assertEqual(c.compare_total(Decimal(1), 1), d)
self.assertEqual(c.compare_total(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_total, '1', 1)
self.assertRaises(TypeError, c.compare_total, 1, '1')
def test_compare_total_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_total_mag(Decimal(1), Decimal(1))
self.assertEqual(c.compare_total_mag(1, 1), d)
self.assertEqual(c.compare_total_mag(Decimal(1), 1), d)
self.assertEqual(c.compare_total_mag(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_total_mag, '1', 1)
self.assertRaises(TypeError, c.compare_total_mag, 1, '1')
def test_copy_abs(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_abs(Decimal(-1))
self.assertEqual(c.copy_abs(-1), d)
self.assertRaises(TypeError, c.copy_abs, '-1')
def test_copy_decimal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_decimal(Decimal(-1))
self.assertEqual(c.copy_decimal(-1), d)
self.assertRaises(TypeError, c.copy_decimal, '-1')
def test_copy_negate(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_negate(Decimal(-1))
self.assertEqual(c.copy_negate(-1), d)
self.assertRaises(TypeError, c.copy_negate, '-1')
def test_copy_sign(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_sign(Decimal(1), Decimal(-2))
self.assertEqual(c.copy_sign(1, -2), d)
self.assertEqual(c.copy_sign(Decimal(1), -2), d)
self.assertEqual(c.copy_sign(1, Decimal(-2)), d)
self.assertRaises(TypeError, c.copy_sign, '1', -2)
self.assertRaises(TypeError, c.copy_sign, 1, '-2')
def test_divide(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divide(Decimal(1), Decimal(2))
self.assertEqual(c.divide(1, 2), d)
self.assertEqual(c.divide(Decimal(1), 2), d)
self.assertEqual(c.divide(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divide, '1', 2)
self.assertRaises(TypeError, c.divide, 1, '2')
def test_divide_int(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divide_int(Decimal(1), Decimal(2))
self.assertEqual(c.divide_int(1, 2), d)
self.assertEqual(c.divide_int(Decimal(1), 2), d)
self.assertEqual(c.divide_int(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divide_int, '1', 2)
self.assertRaises(TypeError, c.divide_int, 1, '2')
def test_divmod(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divmod(Decimal(1), Decimal(2))
self.assertEqual(c.divmod(1, 2), d)
self.assertEqual(c.divmod(Decimal(1), 2), d)
self.assertEqual(c.divmod(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divmod, '1', 2)
self.assertRaises(TypeError, c.divmod, 1, '2')
def test_exp(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.exp(Decimal(10))
self.assertEqual(c.exp(10), d)
self.assertRaises(TypeError, c.exp, '10')
def test_fma(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.fma(Decimal(2), Decimal(3), Decimal(4))
self.assertEqual(c.fma(2, 3, 4), d)
self.assertEqual(c.fma(Decimal(2), 3, 4), d)
self.assertEqual(c.fma(2, Decimal(3), 4), d)
self.assertEqual(c.fma(2, 3, Decimal(4)), d)
self.assertEqual(c.fma(Decimal(2), Decimal(3), 4), d)
self.assertRaises(TypeError, c.fma, '2', 3, 4)
self.assertRaises(TypeError, c.fma, 2, '3', 4)
self.assertRaises(TypeError, c.fma, 2, 3, '4')
# Issue 12079 for Context.fma ...
self.assertRaises(TypeError, c.fma,
Decimal('Infinity'), Decimal(0), "not a decimal")
self.assertRaises(TypeError, c.fma,
Decimal(1), Decimal('snan'), 1.222)
# ... and for Decimal.fma.
self.assertRaises(TypeError, Decimal('Infinity').fma,
Decimal(0), "not a decimal")
self.assertRaises(TypeError, Decimal(1).fma,
Decimal('snan'), 1.222)
def test_is_finite(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_finite(Decimal(10))
self.assertEqual(c.is_finite(10), d)
self.assertRaises(TypeError, c.is_finite, '10')
def test_is_infinite(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_infinite(Decimal(10))
self.assertEqual(c.is_infinite(10), d)
self.assertRaises(TypeError, c.is_infinite, '10')
def test_is_nan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_nan(Decimal(10))
self.assertEqual(c.is_nan(10), d)
self.assertRaises(TypeError, c.is_nan, '10')
def test_is_normal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_normal(Decimal(10))
self.assertEqual(c.is_normal(10), d)
self.assertRaises(TypeError, c.is_normal, '10')
def test_is_qnan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_qnan(Decimal(10))
self.assertEqual(c.is_qnan(10), d)
self.assertRaises(TypeError, c.is_qnan, '10')
def test_is_signed(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_signed(Decimal(10))
self.assertEqual(c.is_signed(10), d)
self.assertRaises(TypeError, c.is_signed, '10')
def test_is_snan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_snan(Decimal(10))
self.assertEqual(c.is_snan(10), d)
self.assertRaises(TypeError, c.is_snan, '10')
def test_is_subnormal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_subnormal(Decimal(10))
self.assertEqual(c.is_subnormal(10), d)
self.assertRaises(TypeError, c.is_subnormal, '10')
def test_is_zero(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_zero(Decimal(10))
self.assertEqual(c.is_zero(10), d)
self.assertRaises(TypeError, c.is_zero, '10')
def test_ln(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.ln(Decimal(10))
self.assertEqual(c.ln(10), d)
self.assertRaises(TypeError, c.ln, '10')
def test_log10(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.log10(Decimal(10))
self.assertEqual(c.log10(10), d)
self.assertRaises(TypeError, c.log10, '10')
def test_logb(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logb(Decimal(10))
self.assertEqual(c.logb(10), d)
self.assertRaises(TypeError, c.logb, '10')
def test_logical_and(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_and(Decimal(1), Decimal(1))
self.assertEqual(c.logical_and(1, 1), d)
self.assertEqual(c.logical_and(Decimal(1), 1), d)
self.assertEqual(c.logical_and(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_and, '1', 1)
self.assertRaises(TypeError, c.logical_and, 1, '1')
def test_logical_invert(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_invert(Decimal(1000))
self.assertEqual(c.logical_invert(1000), d)
self.assertRaises(TypeError, c.logical_invert, '1000')
def test_logical_or(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_or(Decimal(1), Decimal(1))
self.assertEqual(c.logical_or(1, 1), d)
self.assertEqual(c.logical_or(Decimal(1), 1), d)
self.assertEqual(c.logical_or(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_or, '1', 1)
self.assertRaises(TypeError, c.logical_or, 1, '1')
def test_logical_xor(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_xor(Decimal(1), Decimal(1))
self.assertEqual(c.logical_xor(1, 1), d)
self.assertEqual(c.logical_xor(Decimal(1), 1), d)
self.assertEqual(c.logical_xor(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_xor, '1', 1)
self.assertRaises(TypeError, c.logical_xor, 1, '1')
def test_max(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.max(Decimal(1), Decimal(2))
self.assertEqual(c.max(1, 2), d)
self.assertEqual(c.max(Decimal(1), 2), d)
self.assertEqual(c.max(1, Decimal(2)), d)
self.assertRaises(TypeError, c.max, '1', 2)
self.assertRaises(TypeError, c.max, 1, '2')
def test_max_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.max_mag(Decimal(1), Decimal(2))
self.assertEqual(c.max_mag(1, 2), d)
self.assertEqual(c.max_mag(Decimal(1), 2), d)
self.assertEqual(c.max_mag(1, Decimal(2)), d)
self.assertRaises(TypeError, c.max_mag, '1', 2)
self.assertRaises(TypeError, c.max_mag, 1, '2')
def test_min(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.min(Decimal(1), Decimal(2))
self.assertEqual(c.min(1, 2), d)
self.assertEqual(c.min(Decimal(1), 2), d)
self.assertEqual(c.min(1, Decimal(2)), d)
self.assertRaises(TypeError, c.min, '1', 2)
self.assertRaises(TypeError, c.min, 1, '2')
def test_min_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.min_mag(Decimal(1), Decimal(2))
self.assertEqual(c.min_mag(1, 2), d)
self.assertEqual(c.min_mag(Decimal(1), 2), d)
self.assertEqual(c.min_mag(1, Decimal(2)), d)
self.assertRaises(TypeError, c.min_mag, '1', 2)
self.assertRaises(TypeError, c.min_mag, 1, '2')
def test_minus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.minus(Decimal(10))
self.assertEqual(c.minus(10), d)
self.assertRaises(TypeError, c.minus, '10')
def test_multiply(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.multiply(Decimal(1), Decimal(2))
self.assertEqual(c.multiply(1, 2), d)
self.assertEqual(c.multiply(Decimal(1), 2), d)
self.assertEqual(c.multiply(1, Decimal(2)), d)
self.assertRaises(TypeError, c.multiply, '1', 2)
self.assertRaises(TypeError, c.multiply, 1, '2')
def test_next_minus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_minus(Decimal(10))
self.assertEqual(c.next_minus(10), d)
self.assertRaises(TypeError, c.next_minus, '10')
def test_next_plus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_plus(Decimal(10))
self.assertEqual(c.next_plus(10), d)
self.assertRaises(TypeError, c.next_plus, '10')
def test_next_toward(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_toward(Decimal(1), Decimal(2))
self.assertEqual(c.next_toward(1, 2), d)
self.assertEqual(c.next_toward(Decimal(1), 2), d)
self.assertEqual(c.next_toward(1, Decimal(2)), d)
self.assertRaises(TypeError, c.next_toward, '1', 2)
self.assertRaises(TypeError, c.next_toward, 1, '2')
def test_normalize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.normalize(Decimal(10))
self.assertEqual(c.normalize(10), d)
self.assertRaises(TypeError, c.normalize, '10')
def test_number_class(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
self.assertEqual(c.number_class(123), c.number_class(Decimal(123)))
self.assertEqual(c.number_class(0), c.number_class(Decimal(0)))
self.assertEqual(c.number_class(-45), c.number_class(Decimal(-45)))
def test_plus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.plus(Decimal(10))
self.assertEqual(c.plus(10), d)
self.assertRaises(TypeError, c.plus, '10')
def test_power(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.power(Decimal(1), Decimal(4))
self.assertEqual(c.power(1, 4), d)
self.assertEqual(c.power(Decimal(1), 4), d)
self.assertEqual(c.power(1, Decimal(4)), d)
self.assertEqual(c.power(Decimal(1), Decimal(4)), d)
self.assertRaises(TypeError, c.power, '1', 4)
self.assertRaises(TypeError, c.power, 1, '4')
self.assertEqual(c.power(modulo=5, b=8, a=2), 1)
def test_quantize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.quantize(Decimal(1), Decimal(2))
self.assertEqual(c.quantize(1, 2), d)
self.assertEqual(c.quantize(Decimal(1), 2), d)
self.assertEqual(c.quantize(1, Decimal(2)), d)
self.assertRaises(TypeError, c.quantize, '1', 2)
self.assertRaises(TypeError, c.quantize, 1, '2')
def test_remainder(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.remainder(Decimal(1), Decimal(2))
self.assertEqual(c.remainder(1, 2), d)
self.assertEqual(c.remainder(Decimal(1), 2), d)
self.assertEqual(c.remainder(1, Decimal(2)), d)
self.assertRaises(TypeError, c.remainder, '1', 2)
self.assertRaises(TypeError, c.remainder, 1, '2')
def test_remainder_near(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.remainder_near(Decimal(1), Decimal(2))
self.assertEqual(c.remainder_near(1, 2), d)
self.assertEqual(c.remainder_near(Decimal(1), 2), d)
self.assertEqual(c.remainder_near(1, Decimal(2)), d)
self.assertRaises(TypeError, c.remainder_near, '1', 2)
self.assertRaises(TypeError, c.remainder_near, 1, '2')
def test_rotate(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.rotate(Decimal(1), Decimal(2))
self.assertEqual(c.rotate(1, 2), d)
self.assertEqual(c.rotate(Decimal(1), 2), d)
self.assertEqual(c.rotate(1, Decimal(2)), d)
self.assertRaises(TypeError, c.rotate, '1', 2)
self.assertRaises(TypeError, c.rotate, 1, '2')
def test_sqrt(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.sqrt(Decimal(10))
self.assertEqual(c.sqrt(10), d)
self.assertRaises(TypeError, c.sqrt, '10')
def test_same_quantum(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.same_quantum(Decimal(1), Decimal(2))
self.assertEqual(c.same_quantum(1, 2), d)
self.assertEqual(c.same_quantum(Decimal(1), 2), d)
self.assertEqual(c.same_quantum(1, Decimal(2)), d)
self.assertRaises(TypeError, c.same_quantum, '1', 2)
self.assertRaises(TypeError, c.same_quantum, 1, '2')
def test_scaleb(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.scaleb(Decimal(1), Decimal(2))
self.assertEqual(c.scaleb(1, 2), d)
self.assertEqual(c.scaleb(Decimal(1), 2), d)
self.assertEqual(c.scaleb(1, Decimal(2)), d)
self.assertRaises(TypeError, c.scaleb, '1', 2)
self.assertRaises(TypeError, c.scaleb, 1, '2')
def test_shift(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.shift(Decimal(1), Decimal(2))
self.assertEqual(c.shift(1, 2), d)
self.assertEqual(c.shift(Decimal(1), 2), d)
self.assertEqual(c.shift(1, Decimal(2)), d)
self.assertRaises(TypeError, c.shift, '1', 2)
self.assertRaises(TypeError, c.shift, 1, '2')
def test_subtract(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.subtract(Decimal(1), Decimal(2))
self.assertEqual(c.subtract(1, 2), d)
self.assertEqual(c.subtract(Decimal(1), 2), d)
self.assertEqual(c.subtract(1, Decimal(2)), d)
self.assertRaises(TypeError, c.subtract, '1', 2)
self.assertRaises(TypeError, c.subtract, 1, '2')
def test_to_eng_string(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_eng_string(Decimal(10))
self.assertEqual(c.to_eng_string(10), d)
self.assertRaises(TypeError, c.to_eng_string, '10')
def test_to_sci_string(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_sci_string(Decimal(10))
self.assertEqual(c.to_sci_string(10), d)
self.assertRaises(TypeError, c.to_sci_string, '10')
def test_to_integral_exact(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_integral_exact(Decimal(10))
self.assertEqual(c.to_integral_exact(10), d)
self.assertRaises(TypeError, c.to_integral_exact, '10')
def test_to_integral_value(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_integral_value(Decimal(10))
self.assertEqual(c.to_integral_value(10), d)
self.assertRaises(TypeError, c.to_integral_value, '10')
self.assertRaises(TypeError, c.to_integral_value, 10, 'x')
class CContextAPItests(ContextAPItests):
decimal = C
class PyContextAPItests(ContextAPItests):
decimal = P
class ContextWithStatement(unittest.TestCase):
# Can't do these as docstrings until Python 2.6
# as doctest can't handle __future__ statements
def test_localcontext(self):
# Use a copy of the current context in the block
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
orig_ctx = getcontext()
with localcontext() as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assertIs(orig_ctx, final_ctx, 'did not restore context correctly')
self.assertIsNot(orig_ctx, set_ctx, 'did not copy the context')
self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context')
def test_localcontextarg(self):
# Use a copy of the supplied context in the block
Context = self.decimal.Context
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
localcontext = self.decimal.localcontext
orig_ctx = getcontext()
new_ctx = Context(prec=42)
with localcontext(new_ctx) as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assertIs(orig_ctx, final_ctx, 'did not restore context correctly')
self.assertEqual(set_ctx.prec, new_ctx.prec, 'did not set correct context')
self.assertIsNot(new_ctx, set_ctx, 'did not copy the context')
self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context')
def test_nested_with_statements(self):
# Use a copy of the supplied context in the block
Decimal = self.decimal.Decimal
Context = self.decimal.Context
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
Clamped = self.decimal.Clamped
Overflow = self.decimal.Overflow
orig_ctx = getcontext()
orig_ctx.clear_flags()
new_ctx = Context(Emax=384)
with localcontext() as c1:
self.assertEqual(c1.flags, orig_ctx.flags)
self.assertEqual(c1.traps, orig_ctx.traps)
c1.traps[Clamped] = True
c1.Emin = -383
self.assertNotEqual(orig_ctx.Emin, -383)
self.assertRaises(Clamped, c1.create_decimal, '0e-999')
self.assertTrue(c1.flags[Clamped])
with localcontext(new_ctx) as c2:
self.assertEqual(c2.flags, new_ctx.flags)
self.assertEqual(c2.traps, new_ctx.traps)
self.assertRaises(Overflow, c2.power, Decimal('3.4e200'), 2)
self.assertFalse(c2.flags[Clamped])
self.assertTrue(c2.flags[Overflow])
del c2
self.assertFalse(c1.flags[Overflow])
del c1
self.assertNotEqual(orig_ctx.Emin, -383)
self.assertFalse(orig_ctx.flags[Clamped])
self.assertFalse(orig_ctx.flags[Overflow])
self.assertFalse(new_ctx.flags[Clamped])
self.assertFalse(new_ctx.flags[Overflow])
def test_with_statements_gc1(self):
localcontext = self.decimal.localcontext
with localcontext() as c1:
del c1
with localcontext() as c2:
del c2
with localcontext() as c3:
del c3
with localcontext() as c4:
del c4
def test_with_statements_gc2(self):
localcontext = self.decimal.localcontext
with localcontext() as c1:
with localcontext(c1) as c2:
del c1
with localcontext(c2) as c3:
del c2
with localcontext(c3) as c4:
del c3
del c4
def test_with_statements_gc3(self):
Context = self.decimal.Context
localcontext = self.decimal.localcontext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
with localcontext() as c1:
del c1
n1 = Context(prec=1)
setcontext(n1)
with localcontext(n1) as c2:
del n1
self.assertEqual(c2.prec, 1)
del c2
n2 = Context(prec=2)
setcontext(n2)
del n2
self.assertEqual(getcontext().prec, 2)
n3 = Context(prec=3)
setcontext(n3)
self.assertEqual(getcontext().prec, 3)
with localcontext(n3) as c3:
del n3
self.assertEqual(c3.prec, 3)
del c3
n4 = Context(prec=4)
setcontext(n4)
del n4
self.assertEqual(getcontext().prec, 4)
with localcontext() as c4:
self.assertEqual(c4.prec, 4)
del c4
class CContextWithStatement(ContextWithStatement):
decimal = C
class PyContextWithStatement(ContextWithStatement):
decimal = P
class ContextFlags(unittest.TestCase):
def test_flags_irrelevant(self):
# check that the result (numeric result + flags raised) of an
# arithmetic operation doesn't depend on the current flags
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
Underflow = self.decimal.Underflow
Clamped = self.decimal.Clamped
Subnormal = self.decimal.Subnormal
def raise_error(context, flag):
if self.decimal == C:
context.flags[flag] = True
if context.traps[flag]:
raise flag
else:
context._raise_error(flag)
context = Context(prec=9, Emin = -425000000, Emax = 425000000,
rounding=ROUND_HALF_EVEN, traps=[], flags=[])
# operations that raise various flags, in the form (function, arglist)
operations = [
(context._apply, [Decimal("100E-425000010")]),
(context.sqrt, [Decimal(2)]),
(context.add, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.multiply, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.subtract, [Decimal("1.23456789"), Decimal("9.87654321")]),
]
# try various flags individually, then a whole lot at once
flagsets = [[Inexact], [Rounded], [Underflow], [Clamped], [Subnormal],
[Inexact, Rounded, Underflow, Clamped, Subnormal]]
for fn, args in operations:
# find answer and flags raised using a clean context
context.clear_flags()
ans = fn(*args)
flags = [k for k, v in context.flags.items() if v]
for extra_flags in flagsets:
# set flags, before calling operation
context.clear_flags()
for flag in extra_flags:
raise_error(context, flag)
new_ans = fn(*args)
# flags that we expect to be set after the operation
expected_flags = list(flags)
for flag in extra_flags:
if flag not in expected_flags:
expected_flags.append(flag)
expected_flags.sort(key=id)
# flags we actually got
new_flags = [k for k,v in context.flags.items() if v]
new_flags.sort(key=id)
self.assertEqual(ans, new_ans,
"operation produces different answers depending on flags set: " +
"expected %s, got %s." % (ans, new_ans))
self.assertEqual(new_flags, expected_flags,
"operation raises different flags depending on flags set: " +
"expected %s, got %s" % (expected_flags, new_flags))
def test_flag_comparisons(self):
Context = self.decimal.Context
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
c = Context()
# Valid SignalDict
self.assertNotEqual(c.flags, c.traps)
self.assertNotEqual(c.traps, c.flags)
c.flags = c.traps
self.assertEqual(c.flags, c.traps)
self.assertEqual(c.traps, c.flags)
c.flags[Rounded] = True
c.traps = c.flags
self.assertEqual(c.flags, c.traps)
self.assertEqual(c.traps, c.flags)
d = {}
d.update(c.flags)
self.assertEqual(d, c.flags)
self.assertEqual(c.flags, d)
d[Inexact] = True
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
# Invalid SignalDict
d = {Inexact:False}
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
d = ["xyz"]
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
@requires_IEEE_754
def test_float_operation(self):
Decimal = self.decimal.Decimal
FloatOperation = self.decimal.FloatOperation
localcontext = self.decimal.localcontext
with localcontext() as c:
##### trap is off by default
self.assertFalse(c.traps[FloatOperation])
# implicit conversion sets the flag
c.clear_flags()
self.assertEqual(Decimal(7.5), 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
self.assertEqual(c.create_decimal(7.5), 7.5)
self.assertTrue(c.flags[FloatOperation])
# explicit conversion does not set the flag
c.clear_flags()
x = Decimal.from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
# comparison sets the flag
self.assertEqual(x, 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
x = c.create_decimal_from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
self.assertEqual(x, 7.5)
self.assertTrue(c.flags[FloatOperation])
##### set the trap
c.traps[FloatOperation] = True
# implicit conversion raises
c.clear_flags()
self.assertRaises(FloatOperation, Decimal, 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
self.assertRaises(FloatOperation, c.create_decimal, 7.5)
self.assertTrue(c.flags[FloatOperation])
# explicit conversion is silent
c.clear_flags()
x = Decimal.from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
c.clear_flags()
x = c.create_decimal_from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
def test_float_comparison(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
FloatOperation = self.decimal.FloatOperation
localcontext = self.decimal.localcontext
def assert_attr(a, b, attr, context, signal=None):
context.clear_flags()
f = getattr(a, attr)
if signal == FloatOperation:
self.assertRaises(signal, f, b)
else:
self.assertIs(f(b), True)
self.assertTrue(context.flags[FloatOperation])
small_d = Decimal('0.25')
big_d = Decimal('3.0')
small_f = 0.25
big_f = 3.0
zero_d = Decimal('0.0')
neg_zero_d = Decimal('-0.0')
zero_f = 0.0
neg_zero_f = -0.0
inf_d = Decimal('Infinity')
neg_inf_d = Decimal('-Infinity')
inf_f = float('inf')
neg_inf_f = float('-inf')
def doit(c, signal=None):
# Order
for attr in '__lt__', '__le__':
assert_attr(small_d, big_f, attr, c, signal)
for attr in '__gt__', '__ge__':
assert_attr(big_d, small_f, attr, c, signal)
# Equality
assert_attr(small_d, small_f, '__eq__', c, None)
assert_attr(neg_zero_d, neg_zero_f, '__eq__', c, None)
assert_attr(neg_zero_d, zero_f, '__eq__', c, None)
assert_attr(zero_d, neg_zero_f, '__eq__', c, None)
assert_attr(zero_d, zero_f, '__eq__', c, None)
assert_attr(neg_inf_d, neg_inf_f, '__eq__', c, None)
assert_attr(inf_d, inf_f, '__eq__', c, None)
# Inequality
assert_attr(small_d, big_f, '__ne__', c, None)
assert_attr(Decimal('0.1'), 0.1, '__ne__', c, None)
assert_attr(neg_inf_d, inf_f, '__ne__', c, None)
assert_attr(inf_d, neg_inf_f, '__ne__', c, None)
assert_attr(Decimal('NaN'), float('nan'), '__ne__', c, None)
def test_containers(c, signal=None):
c.clear_flags()
s = set([100.0, Decimal('100.0')])
self.assertEqual(len(s), 1)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
if signal:
self.assertRaises(signal, sorted, [1.0, Decimal('10.0')])
else:
s = sorted([10.0, Decimal('10.0')])
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
b = 10.0 in [Decimal('10.0'), 1.0]
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
b = 10.0 in {Decimal('10.0'):'a', 1.0:'b'}
self.assertTrue(c.flags[FloatOperation])
nc = Context()
with localcontext(nc) as c:
self.assertFalse(c.traps[FloatOperation])
doit(c, signal=None)
test_containers(c, signal=None)
c.traps[FloatOperation] = True
doit(c, signal=FloatOperation)
test_containers(c, signal=FloatOperation)
def test_float_operation_default(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
FloatOperation= self.decimal.FloatOperation
context = Context()
self.assertFalse(context.flags[FloatOperation])
self.assertFalse(context.traps[FloatOperation])
context.clear_traps()
context.traps[Inexact] = True
context.traps[FloatOperation] = True
self.assertTrue(context.traps[FloatOperation])
self.assertTrue(context.traps[Inexact])
class CContextFlags(ContextFlags):
decimal = C
class PyContextFlags(ContextFlags):
decimal = P
class SpecialContexts(unittest.TestCase):
"""Test the context templates."""
def test_context_templates(self):
BasicContext = self.decimal.BasicContext
ExtendedContext = self.decimal.ExtendedContext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
Underflow = self.decimal.Underflow
Clamped = self.decimal.Clamped
assert_signals(self, BasicContext, 'traps',
[InvalidOperation, DivisionByZero, Overflow, Underflow, Clamped]
)
savecontext = getcontext().copy()
basic_context_prec = BasicContext.prec
extended_context_prec = ExtendedContext.prec
ex = None
try:
BasicContext.prec = ExtendedContext.prec = 441
for template in BasicContext, ExtendedContext:
setcontext(template)
c = getcontext()
self.assertIsNot(c, template)
self.assertEqual(c.prec, 441)
except Exception as e:
ex = e.__class__
finally:
BasicContext.prec = basic_context_prec
ExtendedContext.prec = extended_context_prec
setcontext(savecontext)
if ex:
raise ex
def test_default_context(self):
DefaultContext = self.decimal.DefaultContext
BasicContext = self.decimal.BasicContext
ExtendedContext = self.decimal.ExtendedContext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
self.assertEqual(BasicContext.prec, 9)
self.assertEqual(ExtendedContext.prec, 9)
assert_signals(self, DefaultContext, 'traps',
[InvalidOperation, DivisionByZero, Overflow]
)
savecontext = getcontext().copy()
default_context_prec = DefaultContext.prec
ex = None
try:
c = getcontext()
saveprec = c.prec
DefaultContext.prec = 961
c = getcontext()
self.assertEqual(c.prec, saveprec)
setcontext(DefaultContext)
c = getcontext()
self.assertIsNot(c, DefaultContext)
self.assertEqual(c.prec, 961)
except Exception as e:
ex = e.__class__
finally:
DefaultContext.prec = default_context_prec
setcontext(savecontext)
if ex:
raise ex
class CSpecialContexts(SpecialContexts):
decimal = C
class PySpecialContexts(SpecialContexts):
decimal = P
class ContextInputValidation(unittest.TestCase):
def test_invalid_context(self):
Context = self.decimal.Context
DefaultContext = self.decimal.DefaultContext
c = DefaultContext.copy()
# prec, Emax
for attr in ['prec', 'Emax']:
setattr(c, attr, 999999)
self.assertEqual(getattr(c, attr), 999999)
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(TypeError, setattr, c, attr, 'xyz')
# Emin
setattr(c, 'Emin', -999999)
self.assertEqual(getattr(c, 'Emin'), -999999)
self.assertRaises(ValueError, setattr, c, 'Emin', 1)
self.assertRaises(TypeError, setattr, c, 'Emin', (1,2,3))
self.assertRaises(TypeError, setattr, c, 'rounding', -1)
self.assertRaises(TypeError, setattr, c, 'rounding', 9)
self.assertRaises(TypeError, setattr, c, 'rounding', 1.0)
self.assertRaises(TypeError, setattr, c, 'rounding', 'xyz')
# capitals, clamp
for attr in ['capitals', 'clamp']:
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(ValueError, setattr, c, attr, 2)
self.assertRaises(TypeError, setattr, c, attr, [1,2,3])
# Invalid attribute
self.assertRaises(AttributeError, setattr, c, 'emax', 100)
# Invalid signal dict
self.assertRaises(TypeError, setattr, c, 'flags', [])
self.assertRaises(KeyError, setattr, c, 'flags', {})
self.assertRaises(KeyError, setattr, c, 'traps',
{'InvalidOperation':0})
# Attributes cannot be deleted
for attr in ['prec', 'Emax', 'Emin', 'rounding', 'capitals', 'clamp',
'flags', 'traps']:
self.assertRaises(AttributeError, c.__delattr__, attr)
# Invalid attributes
self.assertRaises(TypeError, getattr, c, 9)
self.assertRaises(TypeError, setattr, c, 9)
# Invalid values in constructor
self.assertRaises(TypeError, Context, rounding=999999)
self.assertRaises(TypeError, Context, rounding='xyz')
self.assertRaises(ValueError, Context, clamp=2)
self.assertRaises(ValueError, Context, capitals=-1)
self.assertRaises(KeyError, Context, flags=["P"])
self.assertRaises(KeyError, Context, traps=["Q"])
# Type error in conversion
self.assertRaises(TypeError, Context, flags=(0,1))
self.assertRaises(TypeError, Context, traps=(1,0))
class CContextInputValidation(ContextInputValidation):
decimal = C
class PyContextInputValidation(ContextInputValidation):
decimal = P
class ContextSubclassing(unittest.TestCase):
def test_context_subclassing(self):
decimal = self.decimal
Decimal = decimal.Decimal
Context = decimal.Context
Clamped = decimal.Clamped
DivisionByZero = decimal.DivisionByZero
Inexact = decimal.Inexact
Overflow = decimal.Overflow
Rounded = decimal.Rounded
Subnormal = decimal.Subnormal
Underflow = decimal.Underflow
InvalidOperation = decimal.InvalidOperation
class MyContext(Context):
def __init__(self, prec=None, rounding=None, Emin=None, Emax=None,
capitals=None, clamp=None, flags=None,
traps=None):
Context.__init__(self)
if prec is not None:
self.prec = prec
if rounding is not None:
self.rounding = rounding
if Emin is not None:
self.Emin = Emin
if Emax is not None:
self.Emax = Emax
if capitals is not None:
self.capitals = capitals
if clamp is not None:
self.clamp = clamp
if flags is not None:
if isinstance(flags, list):
flags = {v:(v in flags) for v in OrderedSignals[decimal] + flags}
self.flags = flags
if traps is not None:
if isinstance(traps, list):
traps = {v:(v in traps) for v in OrderedSignals[decimal] + traps}
self.traps = traps
c = Context()
d = MyContext()
for attr in ('prec', 'rounding', 'Emin', 'Emax', 'capitals', 'clamp',
'flags', 'traps'):
self.assertEqual(getattr(c, attr), getattr(d, attr))
# prec
self.assertRaises(ValueError, MyContext, **{'prec':-1})
c = MyContext(prec=1)
self.assertEqual(c.prec, 1)
self.assertRaises(InvalidOperation, c.quantize, Decimal('9e2'), 0)
# rounding
self.assertRaises(TypeError, MyContext, **{'rounding':'XYZ'})
c = MyContext(rounding=ROUND_DOWN, prec=1)
self.assertEqual(c.rounding, ROUND_DOWN)
self.assertEqual(c.plus(Decimal('9.9')), 9)
# Emin
self.assertRaises(ValueError, MyContext, **{'Emin':5})
c = MyContext(Emin=-1, prec=1)
self.assertEqual(c.Emin, -1)
x = c.add(Decimal('1e-99'), Decimal('2.234e-2000'))
self.assertEqual(x, Decimal('0.0'))
for signal in (Inexact, Underflow, Subnormal, Rounded, Clamped):
self.assertTrue(c.flags[signal])
# Emax
self.assertRaises(ValueError, MyContext, **{'Emax':-1})
c = MyContext(Emax=1, prec=1)
self.assertEqual(c.Emax, 1)
self.assertRaises(Overflow, c.add, Decimal('1e99'), Decimal('2.234e2000'))
if self.decimal == C:
for signal in (Inexact, Overflow, Rounded):
self.assertTrue(c.flags[signal])
# capitals
self.assertRaises(ValueError, MyContext, **{'capitals':-1})
c = MyContext(capitals=0)
self.assertEqual(c.capitals, 0)
x = c.create_decimal('1E222')
self.assertEqual(c.to_sci_string(x), '1e+222')
# clamp
self.assertRaises(ValueError, MyContext, **{'clamp':2})
c = MyContext(clamp=1, Emax=99)
self.assertEqual(c.clamp, 1)
x = c.plus(Decimal('1e99'))
self.assertEqual(str(x), '1.000000000000000000000000000E+99')
# flags
self.assertRaises(TypeError, MyContext, **{'flags':'XYZ'})
c = MyContext(flags=[Rounded, DivisionByZero])
for signal in (Rounded, DivisionByZero):
self.assertTrue(c.flags[signal])
c.clear_flags()
for signal in OrderedSignals[decimal]:
self.assertFalse(c.flags[signal])
# traps
self.assertRaises(TypeError, MyContext, **{'traps':'XYZ'})
c = MyContext(traps=[Rounded, DivisionByZero])
for signal in (Rounded, DivisionByZero):
self.assertTrue(c.traps[signal])
c.clear_traps()
for signal in OrderedSignals[decimal]:
self.assertFalse(c.traps[signal])
class CContextSubclassing(ContextSubclassing):
decimal = C
class PyContextSubclassing(ContextSubclassing):
decimal = P
@skip_if_extra_functionality
class CheckAttributes(unittest.TestCase):
def test_module_attributes(self):
# Architecture dependent context limits
self.assertEqual(C.MAX_PREC, P.MAX_PREC)
self.assertEqual(C.MAX_EMAX, P.MAX_EMAX)
self.assertEqual(C.MIN_EMIN, P.MIN_EMIN)
self.assertEqual(C.MIN_ETINY, P.MIN_ETINY)
self.assertTrue(C.HAVE_THREADS is True or C.HAVE_THREADS is False)
self.assertTrue(P.HAVE_THREADS is True or P.HAVE_THREADS is False)
self.assertEqual(C.__version__, P.__version__)
self.assertEqual(dir(C), dir(P))
def test_context_attributes(self):
x = [s for s in dir(C.Context()) if '__' in s or not s.startswith('_')]
y = [s for s in dir(P.Context()) if '__' in s or not s.startswith('_')]
self.assertEqual(set(x) - set(y), set())
def test_decimal_attributes(self):
x = [s for s in dir(C.Decimal(9)) if '__' in s or not s.startswith('_')]
y = [s for s in dir(C.Decimal(9)) if '__' in s or not s.startswith('_')]
self.assertEqual(set(x) - set(y), set())
class Coverage(unittest.TestCase):
def test_adjusted(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal('1234e9999').adjusted(), 10002)
# XXX raise?
self.assertEqual(Decimal('nan').adjusted(), 0)
self.assertEqual(Decimal('inf').adjusted(), 0)
def test_canonical(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
x = Decimal(9).canonical()
self.assertEqual(x, 9)
c = getcontext()
x = c.canonical(Decimal(9))
self.assertEqual(x, 9)
def test_context_repr(self):
c = self.decimal.DefaultContext.copy()
c.prec = 425000000
c.Emax = 425000000
c.Emin = -425000000
c.rounding = ROUND_HALF_DOWN
c.capitals = 0
c.clamp = 1
for sig in OrderedSignals[self.decimal]:
c.flags[sig] = False
c.traps[sig] = False
s = c.__repr__()
t = "Context(prec=425000000, rounding=ROUND_HALF_DOWN, " \
"Emin=-425000000, Emax=425000000, capitals=0, clamp=1, " \
"flags=[], traps=[])"
self.assertEqual(s, t)
def test_implicit_context(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
with localcontext() as c:
c.prec = 1
c.Emax = 1
c.Emin = -1
# abs
self.assertEqual(abs(Decimal("-10")), 10)
# add
self.assertEqual(Decimal("7") + 1, 8)
# divide
self.assertEqual(Decimal("10") / 5, 2)
# divide_int
self.assertEqual(Decimal("10") // 7, 1)
# fma
self.assertEqual(Decimal("1.2").fma(Decimal("0.01"), 1), 1)
self.assertIs(Decimal("NaN").fma(7, 1).is_nan(), True)
# three arg power
self.assertEqual(pow(Decimal(10), 2, 7), 2)
# exp
self.assertEqual(Decimal("1.01").exp(), 3)
# is_normal
self.assertIs(Decimal("0.01").is_normal(), False)
# is_subnormal
self.assertIs(Decimal("0.01").is_subnormal(), True)
# ln
self.assertEqual(Decimal("20").ln(), 3)
# log10
self.assertEqual(Decimal("20").log10(), 1)
# logb
self.assertEqual(Decimal("580").logb(), 2)
# logical_invert
self.assertEqual(Decimal("10").logical_invert(), 1)
# minus
self.assertEqual(-Decimal("-10"), 10)
# multiply
self.assertEqual(Decimal("2") * 4, 8)
# next_minus
self.assertEqual(Decimal("10").next_minus(), 9)
# next_plus
self.assertEqual(Decimal("10").next_plus(), Decimal('2E+1'))
# normalize
self.assertEqual(Decimal("-10").normalize(), Decimal('-1E+1'))
# number_class
self.assertEqual(Decimal("10").number_class(), '+Normal')
# plus
self.assertEqual(+Decimal("-1"), -1)
# remainder
self.assertEqual(Decimal("10") % 7, 3)
# subtract
self.assertEqual(Decimal("10") - 7, 3)
# to_integral_exact
self.assertEqual(Decimal("1.12345").to_integral_exact(), 1)
# Boolean functions
self.assertTrue(Decimal("1").is_canonical())
self.assertTrue(Decimal("1").is_finite())
self.assertTrue(Decimal("1").is_finite())
self.assertTrue(Decimal("snan").is_snan())
self.assertTrue(Decimal("-1").is_signed())
self.assertTrue(Decimal("0").is_zero())
self.assertTrue(Decimal("0").is_zero())
# Copy
with localcontext() as c:
c.prec = 10000
x = 1228 ** 1523
y = -Decimal(x)
z = y.copy_abs()
self.assertEqual(z, x)
z = y.copy_negate()
self.assertEqual(z, x)
z = y.copy_sign(Decimal(1))
self.assertEqual(z, x)
def test_divmod(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
with localcontext() as c:
q, r = divmod(Decimal("10912837129"), 1001)
self.assertEqual(q, Decimal('10901935'))
self.assertEqual(r, Decimal('194'))
q, r = divmod(Decimal("NaN"), 7)
self.assertTrue(q.is_nan() and r.is_nan())
c.traps[InvalidOperation] = False
q, r = divmod(Decimal("NaN"), 7)
self.assertTrue(q.is_nan() and r.is_nan())
c.traps[InvalidOperation] = False
c.clear_flags()
q, r = divmod(Decimal("inf"), Decimal("inf"))
self.assertTrue(q.is_nan() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
q, r = divmod(Decimal("inf"), 101)
self.assertTrue(q.is_infinite() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
q, r = divmod(Decimal(0), 0)
self.assertTrue(q.is_nan() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.traps[DivisionByZero] = False
c.clear_flags()
q, r = divmod(Decimal(11), 0)
self.assertTrue(q.is_infinite() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation] and
c.flags[DivisionByZero])
def test_power(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
Overflow = self.decimal.Overflow
Rounded = self.decimal.Rounded
with localcontext() as c:
c.prec = 3
c.clear_flags()
self.assertEqual(Decimal("1.0") ** 100, Decimal('1.00'))
self.assertTrue(c.flags[Rounded])
c.prec = 1
c.Emax = 1
c.Emin = -1
c.clear_flags()
c.traps[Overflow] = False
self.assertEqual(Decimal(10000) ** Decimal("0.5"), Decimal('inf'))
self.assertTrue(c.flags[Overflow])
def test_quantize(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
with localcontext() as c:
c.prec = 1
c.Emax = 1
c.Emin = -1
c.traps[InvalidOperation] = False
x = Decimal(99).quantize(Decimal("1e1"))
self.assertTrue(x.is_nan())
def test_radix(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
c = getcontext()
self.assertEqual(Decimal("1").radix(), 10)
self.assertEqual(c.radix(), 10)
def test_rop(self):
Decimal = self.decimal.Decimal
for attr in ('__radd__', '__rsub__', '__rmul__', '__rtruediv__',
'__rdivmod__', '__rmod__', '__rfloordiv__', '__rpow__'):
self.assertIs(getattr(Decimal("1"), attr)("xyz"), NotImplemented)
def test_round(self):
# Python3 behavior: round() returns Decimal
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
c = getcontext()
c.prec = 28
self.assertEqual(str(Decimal("9.99").__round__()), "10")
self.assertEqual(str(Decimal("9.99e-5").__round__()), "0")
self.assertEqual(str(Decimal("1.23456789").__round__(5)), "1.23457")
self.assertEqual(str(Decimal("1.2345").__round__(10)), "1.2345000000")
self.assertEqual(str(Decimal("1.2345").__round__(-10)), "0E+10")
self.assertRaises(TypeError, Decimal("1.23").__round__, "5")
self.assertRaises(TypeError, Decimal("1.23").__round__, 5, 8)
def test_create_decimal(self):
c = self.decimal.Context()
self.assertRaises(ValueError, c.create_decimal, ["%"])
def test_int(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
with localcontext() as c:
c.prec = 9999
x = Decimal(1221**1271) / 10**3923
self.assertEqual(int(x), 1)
self.assertEqual(x.to_integral(), 2)
def test_copy(self):
Context = self.decimal.Context
c = Context()
c.prec = 10000
x = -(1172 ** 1712)
y = c.copy_abs(x)
self.assertEqual(y, -x)
y = c.copy_negate(x)
self.assertEqual(y, -x)
y = c.copy_sign(x, 1)
self.assertEqual(y, -x)
class CCoverage(Coverage):
decimal = C
class PyCoverage(Coverage):
decimal = P
class PyFunctionality(unittest.TestCase):
"""Extra functionality in decimal.py"""
def test_py_alternate_formatting(self):
# triples giving a format, a Decimal, and the expected result
Decimal = P.Decimal
localcontext = P.localcontext
test_values = [
# Issue 7094: Alternate formatting (specified by #)
('.0e', '1.0', '1e+0'),
('#.0e', '1.0', '1.e+0'),
('.0f', '1.0', '1'),
('#.0f', '1.0', '1.'),
('g', '1.1', '1.1'),
('#g', '1.1', '1.1'),
('.0g', '1', '1'),
('#.0g', '1', '1.'),
('.0%', '1.0', '100%'),
('#.0%', '1.0', '100.%'),
]
for fmt, d, result in test_values:
self.assertEqual(format(Decimal(d), fmt), result)
class PyWhitebox(unittest.TestCase):
"""White box testing for decimal.py"""
def test_py_exact_power(self):
# Rarely exercised lines in _power_exact.
Decimal = P.Decimal
localcontext = P.localcontext
with localcontext() as c:
c.prec = 8
x = Decimal(2**16) ** Decimal("-0.5")
self.assertEqual(x, Decimal('0.00390625'))
x = Decimal(2**16) ** Decimal("-0.6")
self.assertEqual(x, Decimal('0.0012885819'))
x = Decimal("256e7") ** Decimal("-0.5")
x = Decimal(152587890625) ** Decimal('-0.0625')
self.assertEqual(x, Decimal("0.2"))
x = Decimal("152587890625e7") ** Decimal('-0.0625')
x = Decimal(5**2659) ** Decimal('-0.0625')
c.prec = 1
x = Decimal("152587890625") ** Decimal('-0.5')
c.prec = 201
x = Decimal(2**578) ** Decimal("-0.5")
def test_py_immutability_operations(self):
# Do operations and check that it didn't change internal objects.
Decimal = P.Decimal
DefaultContext = P.DefaultContext
setcontext = P.setcontext
c = DefaultContext.copy()
c.traps = dict((s, 0) for s in OrderedSignals[P])
setcontext(c)
d1 = Decimal('-25e55')
b1 = Decimal('-25e55')
d2 = Decimal('33e+33')
b2 = Decimal('33e+33')
def checkSameDec(operation, useOther=False):
if useOther:
eval("d1." + operation + "(d2)")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
self.assertEqual(d2._sign, b2._sign)
self.assertEqual(d2._int, b2._int)
self.assertEqual(d2._exp, b2._exp)
else:
eval("d1." + operation + "()")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
Decimal(d1)
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
checkSameDec("__abs__")
checkSameDec("__add__", True)
checkSameDec("__divmod__", True)
checkSameDec("__eq__", True)
checkSameDec("__ne__", True)
checkSameDec("__le__", True)
checkSameDec("__lt__", True)
checkSameDec("__ge__", True)
checkSameDec("__gt__", True)
checkSameDec("__float__")
checkSameDec("__floordiv__", True)
checkSameDec("__hash__")
checkSameDec("__int__")
checkSameDec("__trunc__")
checkSameDec("__mod__", True)
checkSameDec("__mul__", True)
checkSameDec("__neg__")
checkSameDec("__bool__")
checkSameDec("__pos__")
checkSameDec("__pow__", True)
checkSameDec("__radd__", True)
checkSameDec("__rdivmod__", True)
checkSameDec("__repr__")
checkSameDec("__rfloordiv__", True)
checkSameDec("__rmod__", True)
checkSameDec("__rmul__", True)
checkSameDec("__rpow__", True)
checkSameDec("__rsub__", True)
checkSameDec("__str__")
checkSameDec("__sub__", True)
checkSameDec("__truediv__", True)
checkSameDec("adjusted")
checkSameDec("as_tuple")
checkSameDec("compare", True)
checkSameDec("max", True)
checkSameDec("min", True)
checkSameDec("normalize")
checkSameDec("quantize", True)
checkSameDec("remainder_near", True)
checkSameDec("same_quantum", True)
checkSameDec("sqrt")
checkSameDec("to_eng_string")
checkSameDec("to_integral")
def test_py_decimal_id(self):
Decimal = P.Decimal
d = Decimal(45)
e = Decimal(d)
self.assertEqual(str(e), '45')
self.assertNotEqual(id(d), id(e))
def test_py_rescale(self):
# Coverage
Decimal = P.Decimal
localcontext = P.localcontext
with localcontext() as c:
x = Decimal("NaN")._rescale(3, ROUND_UP)
self.assertTrue(x.is_nan())
def test_py__round(self):
# Coverage
Decimal = P.Decimal
self.assertRaises(ValueError, Decimal("3.1234")._round, 0, ROUND_UP)
class CFunctionality(unittest.TestCase):
"""Extra functionality in _decimal"""
@requires_extra_functionality
def test_c_ieee_context(self):
# issue 8786: Add support for IEEE 754 contexts to decimal module.
IEEEContext = C.IEEEContext
DECIMAL32 = C.DECIMAL32
DECIMAL64 = C.DECIMAL64
DECIMAL128 = C.DECIMAL128
def assert_rest(self, context):
self.assertEqual(context.clamp, 1)
assert_signals(self, context, 'traps', [])
assert_signals(self, context, 'flags', [])
c = IEEEContext(DECIMAL32)
self.assertEqual(c.prec, 7)
self.assertEqual(c.Emax, 96)
self.assertEqual(c.Emin, -95)
assert_rest(self, c)
c = IEEEContext(DECIMAL64)
self.assertEqual(c.prec, 16)
self.assertEqual(c.Emax, 384)
self.assertEqual(c.Emin, -383)
assert_rest(self, c)
c = IEEEContext(DECIMAL128)
self.assertEqual(c.prec, 34)
self.assertEqual(c.Emax, 6144)
self.assertEqual(c.Emin, -6143)
assert_rest(self, c)
# Invalid values
self.assertRaises(OverflowError, IEEEContext, 2**63)
self.assertRaises(ValueError, IEEEContext, -1)
self.assertRaises(ValueError, IEEEContext, 1024)
@requires_extra_functionality
def test_c_context(self):
Context = C.Context
c = Context(flags=C.DecClamped, traps=C.DecRounded)
self.assertEqual(c._flags, C.DecClamped)
self.assertEqual(c._traps, C.DecRounded)
@requires_extra_functionality
def test_constants(self):
# Condition flags
cond = (
C.DecClamped, C.DecConversionSyntax, C.DecDivisionByZero,
C.DecDivisionImpossible, C.DecDivisionUndefined,
C.DecFpuError, C.DecInexact, C.DecInvalidContext,
C.DecInvalidOperation, C.DecMallocError,
C.DecFloatOperation, C.DecOverflow, C.DecRounded,
C.DecSubnormal, C.DecUnderflow
)
# IEEEContext
self.assertEqual(C.DECIMAL32, 32)
self.assertEqual(C.DECIMAL64, 64)
self.assertEqual(C.DECIMAL128, 128)
self.assertEqual(C.IEEE_CONTEXT_MAX_BITS, 512)
# Conditions
for i, v in enumerate(cond):
self.assertEqual(v, 1<<i)
self.assertEqual(C.DecIEEEInvalidOperation,
C.DecConversionSyntax|
C.DecDivisionImpossible|
C.DecDivisionUndefined|
C.DecFpuError|
C.DecInvalidContext|
C.DecInvalidOperation|
C.DecMallocError)
self.assertEqual(C.DecErrors,
C.DecIEEEInvalidOperation|
C.DecDivisionByZero)
self.assertEqual(C.DecTraps,
C.DecErrors|C.DecOverflow|C.DecUnderflow)
class CWhitebox(unittest.TestCase):
"""Whitebox testing for _decimal"""
def test_bignum(self):
# Not exactly whitebox, but too slow with pydecimal.
Decimal = C.Decimal
localcontext = C.localcontext
b1 = 10**35
b2 = 10**36
with localcontext() as c:
c.prec = 1000000
for i in range(5):
a = random.randrange(b1, b2)
b = random.randrange(1000, 1200)
x = a ** b
y = Decimal(a) ** Decimal(b)
self.assertEqual(x, y)
def test_invalid_construction(self):
self.assertRaises(TypeError, C.Decimal, 9, "xyz")
def test_c_input_restriction(self):
# Too large for _decimal to be converted exactly
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
Context = C.Context
localcontext = C.localcontext
with localcontext(Context()):
self.assertRaises(InvalidOperation, Decimal,
"1e9999999999999999999")
def test_c_context_repr(self):
# This test is _decimal-only because flags are not printed
# in the same order.
DefaultContext = C.DefaultContext
FloatOperation = C.FloatOperation
c = DefaultContext.copy()
c.prec = 425000000
c.Emax = 425000000
c.Emin = -425000000
c.rounding = ROUND_HALF_DOWN
c.capitals = 0
c.clamp = 1
for sig in OrderedSignals[C]:
c.flags[sig] = True
c.traps[sig] = True
c.flags[FloatOperation] = True
c.traps[FloatOperation] = True
s = c.__repr__()
t = "Context(prec=425000000, rounding=ROUND_HALF_DOWN, " \
"Emin=-425000000, Emax=425000000, capitals=0, clamp=1, " \
"flags=[Clamped, InvalidOperation, DivisionByZero, Inexact, " \
"FloatOperation, Overflow, Rounded, Subnormal, Underflow], " \
"traps=[Clamped, InvalidOperation, DivisionByZero, Inexact, " \
"FloatOperation, Overflow, Rounded, Subnormal, Underflow])"
self.assertEqual(s, t)
def test_c_context_errors(self):
Context = C.Context
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
FloatOperation = C.FloatOperation
localcontext = C.localcontext
getcontext = C.getcontext
setcontext = C.setcontext
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
c = Context()
# SignalDict: input validation
self.assertRaises(KeyError, c.flags.__setitem__, 801, 0)
self.assertRaises(KeyError, c.traps.__setitem__, 801, 0)
self.assertRaises(ValueError, c.flags.__delitem__, Overflow)
self.assertRaises(ValueError, c.traps.__delitem__, InvalidOperation)
self.assertRaises(TypeError, setattr, c, 'flags', ['x'])
self.assertRaises(TypeError, setattr, c,'traps', ['y'])
self.assertRaises(KeyError, setattr, c, 'flags', {0:1})
self.assertRaises(KeyError, setattr, c, 'traps', {0:1})
# Test assignment from a signal dict with the correct length but
# one invalid key.
d = c.flags.copy()
del d[FloatOperation]
d["XYZ"] = 91283719
self.assertRaises(KeyError, setattr, c, 'flags', d)
self.assertRaises(KeyError, setattr, c, 'traps', d)
# Input corner cases
int_max = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
gt_max_emax = 10**18 if HAVE_CONFIG_64 else 10**9
# prec, Emax, Emin
for attr in ['prec', 'Emax']:
self.assertRaises(ValueError, setattr, c, attr, gt_max_emax)
self.assertRaises(ValueError, setattr, c, 'Emin', -gt_max_emax)
# prec, Emax, Emin in context constructor
self.assertRaises(ValueError, Context, prec=gt_max_emax)
self.assertRaises(ValueError, Context, Emax=gt_max_emax)
self.assertRaises(ValueError, Context, Emin=-gt_max_emax)
# Overflow in conversion
self.assertRaises(OverflowError, Context, prec=int_max+1)
self.assertRaises(OverflowError, Context, Emax=int_max+1)
self.assertRaises(OverflowError, Context, Emin=-int_max-2)
self.assertRaises(OverflowError, Context, clamp=int_max+1)
self.assertRaises(OverflowError, Context, capitals=int_max+1)
# OverflowError, general ValueError
for attr in ('prec', 'Emin', 'Emax', 'capitals', 'clamp'):
self.assertRaises(OverflowError, setattr, c, attr, int_max+1)
self.assertRaises(OverflowError, setattr, c, attr, -int_max-2)
if sys.platform != 'win32':
self.assertRaises(ValueError, setattr, c, attr, int_max)
self.assertRaises(ValueError, setattr, c, attr, -int_max-1)
# OverflowError: _unsafe_setprec, _unsafe_setemin, _unsafe_setemax
if C.MAX_PREC == 425000000:
self.assertRaises(OverflowError, getattr(c, '_unsafe_setprec'),
int_max+1)
self.assertRaises(OverflowError, getattr(c, '_unsafe_setemax'),
int_max+1)
self.assertRaises(OverflowError, getattr(c, '_unsafe_setemin'),
-int_max-2)
# ValueError: _unsafe_setprec, _unsafe_setemin, _unsafe_setemax
if C.MAX_PREC == 425000000:
self.assertRaises(ValueError, getattr(c, '_unsafe_setprec'), 0)
self.assertRaises(ValueError, getattr(c, '_unsafe_setprec'),
1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemax'), -1)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemax'),
1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemin'),
-1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemin'), 1)
# capitals, clamp
for attr in ['capitals', 'clamp']:
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(ValueError, setattr, c, attr, 2)
self.assertRaises(TypeError, setattr, c, attr, [1,2,3])
if HAVE_CONFIG_64:
self.assertRaises(ValueError, setattr, c, attr, 2**32)
self.assertRaises(ValueError, setattr, c, attr, 2**32+1)
# Invalid local context
self.assertRaises(TypeError, exec, 'with localcontext("xyz"): pass',
locals())
self.assertRaises(TypeError, exec,
'with localcontext(context=getcontext()): pass',
locals())
# setcontext
saved_context = getcontext()
self.assertRaises(TypeError, setcontext, "xyz")
setcontext(saved_context)
def test_rounding_strings_interned(self):
self.assertIs(C.ROUND_UP, P.ROUND_UP)
self.assertIs(C.ROUND_DOWN, P.ROUND_DOWN)
self.assertIs(C.ROUND_CEILING, P.ROUND_CEILING)
self.assertIs(C.ROUND_FLOOR, P.ROUND_FLOOR)
self.assertIs(C.ROUND_HALF_UP, P.ROUND_HALF_UP)
self.assertIs(C.ROUND_HALF_DOWN, P.ROUND_HALF_DOWN)
self.assertIs(C.ROUND_HALF_EVEN, P.ROUND_HALF_EVEN)
self.assertIs(C.ROUND_05UP, P.ROUND_05UP)
@requires_extra_functionality
def test_c_context_errors_extra(self):
Context = C.Context
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
localcontext = C.localcontext
getcontext = C.getcontext
setcontext = C.setcontext
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
c = Context()
# Input corner cases
int_max = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
# OverflowError, general ValueError
self.assertRaises(OverflowError, setattr, c, '_allcr', int_max+1)
self.assertRaises(OverflowError, setattr, c, '_allcr', -int_max-2)
if sys.platform != 'win32':
self.assertRaises(ValueError, setattr, c, '_allcr', int_max)
self.assertRaises(ValueError, setattr, c, '_allcr', -int_max-1)
# OverflowError, general TypeError
for attr in ('_flags', '_traps'):
self.assertRaises(OverflowError, setattr, c, attr, int_max+1)
self.assertRaises(OverflowError, setattr, c, attr, -int_max-2)
if sys.platform != 'win32':
self.assertRaises(TypeError, setattr, c, attr, int_max)
self.assertRaises(TypeError, setattr, c, attr, -int_max-1)
# _allcr
self.assertRaises(ValueError, setattr, c, '_allcr', -1)
self.assertRaises(ValueError, setattr, c, '_allcr', 2)
self.assertRaises(TypeError, setattr, c, '_allcr', [1,2,3])
if HAVE_CONFIG_64:
self.assertRaises(ValueError, setattr, c, '_allcr', 2**32)
self.assertRaises(ValueError, setattr, c, '_allcr', 2**32+1)
# _flags, _traps
for attr in ['_flags', '_traps']:
self.assertRaises(TypeError, setattr, c, attr, 999999)
self.assertRaises(TypeError, setattr, c, attr, 'x')
def test_c_valid_context(self):
# These tests are for code coverage in _decimal.
DefaultContext = C.DefaultContext
Clamped = C.Clamped
Underflow = C.Underflow
Inexact = C.Inexact
Rounded = C.Rounded
Subnormal = C.Subnormal
c = DefaultContext.copy()
# Exercise all getters and setters
c.prec = 34
c.rounding = ROUND_HALF_UP
c.Emax = 3000
c.Emin = -3000
c.capitals = 1
c.clamp = 0
self.assertEqual(c.prec, 34)
self.assertEqual(c.rounding, ROUND_HALF_UP)
self.assertEqual(c.Emin, -3000)
self.assertEqual(c.Emax, 3000)
self.assertEqual(c.capitals, 1)
self.assertEqual(c.clamp, 0)
self.assertEqual(c.Etiny(), -3033)
self.assertEqual(c.Etop(), 2967)
# Exercise all unsafe setters
if C.MAX_PREC == 425000000:
c._unsafe_setprec(999999999)
c._unsafe_setemax(999999999)
c._unsafe_setemin(-999999999)
self.assertEqual(c.prec, 999999999)
self.assertEqual(c.Emax, 999999999)
self.assertEqual(c.Emin, -999999999)
@requires_extra_functionality
def test_c_valid_context_extra(self):
DefaultContext = C.DefaultContext
c = DefaultContext.copy()
self.assertEqual(c._allcr, 1)
c._allcr = 0
self.assertEqual(c._allcr, 0)
def test_c_round(self):
# Restricted input.
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
localcontext = C.localcontext
MAX_EMAX = C.MAX_EMAX
MIN_ETINY = C.MIN_ETINY
int_max = 2**63-1 if C.MAX_PREC > 425000000 else 2**31-1
with localcontext() as c:
c.traps[InvalidOperation] = True
self.assertRaises(InvalidOperation, Decimal("1.23").__round__,
-int_max-1)
self.assertRaises(InvalidOperation, Decimal("1.23").__round__,
int_max)
self.assertRaises(InvalidOperation, Decimal("1").__round__,
int(MAX_EMAX+1))
self.assertRaises(C.InvalidOperation, Decimal("1").__round__,
-int(MIN_ETINY-1))
self.assertRaises(OverflowError, Decimal("1.23").__round__,
-int_max-2)
self.assertRaises(OverflowError, Decimal("1.23").__round__,
int_max+1)
def test_c_format(self):
# Restricted input
Decimal = C.Decimal
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
self.assertRaises(TypeError, Decimal(1).__format__, "=10.10", [], 9)
self.assertRaises(TypeError, Decimal(1).__format__, "=10.10", 9)
self.assertRaises(TypeError, Decimal(1).__format__, [])
self.assertRaises(ValueError, Decimal(1).__format__, "<>=10.10")
maxsize = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
self.assertRaises(ValueError, Decimal("1.23456789").__format__,
"=%d.1" % maxsize)
def test_c_integral(self):
Decimal = C.Decimal
Inexact = C.Inexact
localcontext = C.localcontext
x = Decimal(10)
self.assertEqual(x.to_integral(), 10)
self.assertRaises(TypeError, x.to_integral, '10')
self.assertRaises(TypeError, x.to_integral, 10, 'x')
self.assertRaises(TypeError, x.to_integral, 10)
self.assertEqual(x.to_integral_value(), 10)
self.assertRaises(TypeError, x.to_integral_value, '10')
self.assertRaises(TypeError, x.to_integral_value, 10, 'x')
self.assertRaises(TypeError, x.to_integral_value, 10)
self.assertEqual(x.to_integral_exact(), 10)
self.assertRaises(TypeError, x.to_integral_exact, '10')
self.assertRaises(TypeError, x.to_integral_exact, 10, 'x')
self.assertRaises(TypeError, x.to_integral_exact, 10)
with localcontext() as c:
x = Decimal("99999999999999999999999999.9").to_integral_value(ROUND_UP)
self.assertEqual(x, Decimal('100000000000000000000000000'))
x = Decimal("99999999999999999999999999.9").to_integral_exact(ROUND_UP)
self.assertEqual(x, Decimal('100000000000000000000000000'))
c.traps[Inexact] = True
self.assertRaises(Inexact, Decimal("999.9").to_integral_exact, ROUND_UP)
def test_c_funcs(self):
# Invalid arguments
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
DivisionByZero = C.DivisionByZero
getcontext = C.getcontext
localcontext = C.localcontext
self.assertEqual(Decimal('9.99e10').to_eng_string(), '99.9E+9')
self.assertRaises(TypeError, pow, Decimal(1), 2, "3")
self.assertRaises(TypeError, Decimal(9).number_class, "x", "y")
self.assertRaises(TypeError, Decimal(9).same_quantum, 3, "x", "y")
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), []
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), getcontext()
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), 10
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), ROUND_UP, 1000
)
with localcontext() as c:
c.clear_traps()
# Invalid arguments
self.assertRaises(TypeError, c.copy_sign, Decimal(1), "x", "y")
self.assertRaises(TypeError, c.canonical, 200)
self.assertRaises(TypeError, c.is_canonical, 200)
self.assertRaises(TypeError, c.divmod, 9, 8, "x", "y")
self.assertRaises(TypeError, c.same_quantum, 9, 3, "x", "y")
self.assertEqual(str(c.canonical(Decimal(200))), '200')
self.assertEqual(c.radix(), 10)
c.traps[DivisionByZero] = True
self.assertRaises(DivisionByZero, Decimal(9).__divmod__, 0)
self.assertRaises(DivisionByZero, c.divmod, 9, 0)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
c.traps[InvalidOperation] = True
self.assertRaises(InvalidOperation, Decimal(9).__divmod__, 0)
self.assertRaises(InvalidOperation, c.divmod, 9, 0)
self.assertTrue(c.flags[DivisionByZero])
c.traps[InvalidOperation] = True
c.prec = 2
self.assertRaises(InvalidOperation, pow, Decimal(1000), 1, 501)
def test_va_args_exceptions(self):
Decimal = C.Decimal
Context = C.Context
x = Decimal("10001111111")
for attr in ['exp', 'is_normal', 'is_subnormal', 'ln', 'log10',
'logb', 'logical_invert', 'next_minus', 'next_plus',
'normalize', 'number_class', 'sqrt', 'to_eng_string']:
func = getattr(x, attr)
self.assertRaises(TypeError, func, context="x")
self.assertRaises(TypeError, func, "x", context=None)
for attr in ['compare', 'compare_signal', 'logical_and',
'logical_or', 'max', 'max_mag', 'min', 'min_mag',
'remainder_near', 'rotate', 'scaleb', 'shift']:
func = getattr(x, attr)
self.assertRaises(TypeError, func, context="x")
self.assertRaises(TypeError, func, "x", context=None)
self.assertRaises(TypeError, x.to_integral, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral, [], [])
self.assertRaises(TypeError, x.to_integral_value, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral_value, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral_value, [], [])
self.assertRaises(TypeError, x.to_integral_exact, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral_exact, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral_exact, [], [])
self.assertRaises(TypeError, x.fma, 1, 2, context="x")
self.assertRaises(TypeError, x.fma, 1, 2, "x", context=None)
self.assertRaises(TypeError, x.quantize, 1, [], context=None)
self.assertRaises(TypeError, x.quantize, 1, [], rounding=None)
self.assertRaises(TypeError, x.quantize, 1, [], [])
c = Context()
self.assertRaises(TypeError, c.power, 1, 2, mod="x")
self.assertRaises(TypeError, c.power, 1, "x", mod=None)
self.assertRaises(TypeError, c.power, "x", 2, mod=None)
@requires_extra_functionality
def test_c_context_templates(self):
self.assertEqual(
C.BasicContext._traps,
C.DecIEEEInvalidOperation|C.DecDivisionByZero|C.DecOverflow|
C.DecUnderflow|C.DecClamped
)
self.assertEqual(
C.DefaultContext._traps,
C.DecIEEEInvalidOperation|C.DecDivisionByZero|C.DecOverflow
)
@requires_extra_functionality
def test_c_signal_dict(self):
# SignalDict coverage
Context = C.Context
DefaultContext = C.DefaultContext
InvalidOperation = C.InvalidOperation
DivisionByZero = C.DivisionByZero
Overflow = C.Overflow
Subnormal = C.Subnormal
Underflow = C.Underflow
Rounded = C.Rounded
Inexact = C.Inexact
Clamped = C.Clamped
DecClamped = C.DecClamped
DecInvalidOperation = C.DecInvalidOperation
DecIEEEInvalidOperation = C.DecIEEEInvalidOperation
def assertIsExclusivelySet(signal, signal_dict):
for sig in signal_dict:
if sig == signal:
self.assertTrue(signal_dict[sig])
else:
self.assertFalse(signal_dict[sig])
c = DefaultContext.copy()
# Signal dict methods
self.assertTrue(Overflow in c.traps)
c.clear_traps()
for k in c.traps.keys():
c.traps[k] = True
for v in c.traps.values():
self.assertTrue(v)
c.clear_traps()
for k, v in c.traps.items():
self.assertFalse(v)
self.assertFalse(c.flags.get(Overflow))
self.assertIs(c.flags.get("x"), None)
self.assertEqual(c.flags.get("x", "y"), "y")
self.assertRaises(TypeError, c.flags.get, "x", "y", "z")
self.assertEqual(len(c.flags), len(c.traps))
s = sys.getsizeof(c.flags)
s = sys.getsizeof(c.traps)
s = c.flags.__repr__()
# Set flags/traps.
c.clear_flags()
c._flags = DecClamped
self.assertTrue(c.flags[Clamped])
c.clear_traps()
c._traps = DecInvalidOperation
self.assertTrue(c.traps[InvalidOperation])
# Set flags/traps from dictionary.
c.clear_flags()
d = c.flags.copy()
d[DivisionByZero] = True
c.flags = d
assertIsExclusivelySet(DivisionByZero, c.flags)
c.clear_traps()
d = c.traps.copy()
d[Underflow] = True
c.traps = d
assertIsExclusivelySet(Underflow, c.traps)
# Random constructors
IntSignals = {
Clamped: C.DecClamped,
Rounded: C.DecRounded,
Inexact: C.DecInexact,
Subnormal: C.DecSubnormal,
Underflow: C.DecUnderflow,
Overflow: C.DecOverflow,
DivisionByZero: C.DecDivisionByZero,
InvalidOperation: C.DecIEEEInvalidOperation
}
IntCond = [
C.DecDivisionImpossible, C.DecDivisionUndefined, C.DecFpuError,
C.DecInvalidContext, C.DecInvalidOperation, C.DecMallocError,
C.DecConversionSyntax,
]
lim = len(OrderedSignals[C])
for r in range(lim):
for t in range(lim):
for round in RoundingModes:
flags = random.sample(OrderedSignals[C], r)
traps = random.sample(OrderedSignals[C], t)
prec = random.randrange(1, 10000)
emin = random.randrange(-10000, 0)
emax = random.randrange(0, 10000)
clamp = random.randrange(0, 2)
caps = random.randrange(0, 2)
cr = random.randrange(0, 2)
c = Context(prec=prec, rounding=round, Emin=emin, Emax=emax,
capitals=caps, clamp=clamp, flags=list(flags),
traps=list(traps))
self.assertEqual(c.prec, prec)
self.assertEqual(c.rounding, round)
self.assertEqual(c.Emin, emin)
self.assertEqual(c.Emax, emax)
self.assertEqual(c.capitals, caps)
self.assertEqual(c.clamp, clamp)
f = 0
for x in flags:
f |= IntSignals[x]
self.assertEqual(c._flags, f)
f = 0
for x in traps:
f |= IntSignals[x]
self.assertEqual(c._traps, f)
for cond in IntCond:
c._flags = cond
self.assertTrue(c._flags&DecIEEEInvalidOperation)
assertIsExclusivelySet(InvalidOperation, c.flags)
for cond in IntCond:
c._traps = cond
self.assertTrue(c._traps&DecIEEEInvalidOperation)
assertIsExclusivelySet(InvalidOperation, c.traps)
def test_invalid_override(self):
Decimal = C.Decimal
try:
from locale import CHAR_MAX
except ImportError:
self.skipTest('locale.CHAR_MAX not available')
def make_grouping(lst):
return ''.join([chr(x) for x in lst])
def get_fmt(x, override=None, fmt='n'):
return Decimal(x).__format__(fmt, override)
invalid_grouping = {
'decimal_point' : ',',
'grouping' : make_grouping([255, 255, 0]),
'thousands_sep' : ','
}
invalid_dot = {
'decimal_point' : 'xxxxx',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : ','
}
invalid_sep = {
'decimal_point' : '.',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : 'yyyyy'
}
if CHAR_MAX == 127: # negative grouping in override
self.assertRaises(ValueError, get_fmt, 12345,
invalid_grouping, 'g')
self.assertRaises(ValueError, get_fmt, 12345, invalid_dot, 'g')
self.assertRaises(ValueError, get_fmt, 12345, invalid_sep, 'g')
def test_exact_conversion(self):
Decimal = C.Decimal
localcontext = C.localcontext
InvalidOperation = C.InvalidOperation
with localcontext() as c:
c.traps[InvalidOperation] = True
# Clamped
x = "0e%d" % sys.maxsize
self.assertRaises(InvalidOperation, Decimal, x)
x = "0e%d" % (-sys.maxsize-1)
self.assertRaises(InvalidOperation, Decimal, x)
# Overflow
x = "1e%d" % sys.maxsize
self.assertRaises(InvalidOperation, Decimal, x)
# Underflow
x = "1e%d" % (-sys.maxsize-1)
self.assertRaises(InvalidOperation, Decimal, x)
def test_from_tuple(self):
Decimal = C.Decimal
localcontext = C.localcontext
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
Underflow = C.Underflow
with localcontext() as c:
c.traps[InvalidOperation] = True
c.traps[Overflow] = True
c.traps[Underflow] = True
# SSIZE_MAX
x = (1, (), sys.maxsize)
self.assertEqual(str(c.create_decimal(x)), '-0E+999999')
self.assertRaises(InvalidOperation, Decimal, x)
x = (1, (0, 1, 2), sys.maxsize)
self.assertRaises(Overflow, c.create_decimal, x)
self.assertRaises(InvalidOperation, Decimal, x)
# SSIZE_MIN
x = (1, (), -sys.maxsize-1)
self.assertEqual(str(c.create_decimal(x)), '-0E-1000026')
self.assertRaises(InvalidOperation, Decimal, x)
x = (1, (0, 1, 2), -sys.maxsize-1)
self.assertRaises(Underflow, c.create_decimal, x)
self.assertRaises(InvalidOperation, Decimal, x)
# OverflowError
x = (1, (), sys.maxsize+1)
self.assertRaises(OverflowError, c.create_decimal, x)
self.assertRaises(OverflowError, Decimal, x)
x = (1, (), -sys.maxsize-2)
self.assertRaises(OverflowError, c.create_decimal, x)
self.assertRaises(OverflowError, Decimal, x)
# Specials
x = (1, (), "N")
self.assertEqual(str(Decimal(x)), '-sNaN')
x = (1, (0,), "N")
self.assertEqual(str(Decimal(x)), '-sNaN')
x = (1, (0, 1), "N")
self.assertEqual(str(Decimal(x)), '-sNaN1')
def test_sizeof(self):
Decimal = C.Decimal
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
self.assertGreater(Decimal(0).__sizeof__(), 0)
if HAVE_CONFIG_64:
x = Decimal(10**(19*24)).__sizeof__()
y = Decimal(10**(19*25)).__sizeof__()
self.assertEqual(y, x+8)
else:
x = Decimal(10**(9*24)).__sizeof__()
y = Decimal(10**(9*25)).__sizeof__()
self.assertEqual(y, x+4)
def test_internal_use_of_overridden_methods(self):
Decimal = C.Decimal
# Unsound subtyping
class X(float):
def as_integer_ratio(self):
return 1
def __abs__(self):
return self
class Y(float):
def __abs__(self):
return [1]*200
class I(int):
def bit_length(self):
return [1]*200
class Z(float):
def as_integer_ratio(self):
return (I(1), I(1))
def __abs__(self):
return self
for cls in X, Y, Z:
self.assertEqual(Decimal.from_float(cls(101.1)),
Decimal.from_float(101.1))
@requires_docstrings
@unittest.skipUnless(C, "test requires C version")
class SignatureTest(unittest.TestCase):
"""Function signatures"""
def test_inspect_module(self):
for attr in dir(P):
if attr.startswith('_'):
continue
p_func = getattr(P, attr)
c_func = getattr(C, attr)
if (attr == 'Decimal' or attr == 'Context' or
inspect.isfunction(p_func)):
p_sig = inspect.signature(p_func)
c_sig = inspect.signature(c_func)
# parameter names:
c_names = list(c_sig.parameters.keys())
p_names = [x for x in p_sig.parameters.keys() if not
x.startswith('_')]
self.assertEqual(c_names, p_names,
msg="parameter name mismatch in %s" % p_func)
c_kind = [x.kind for x in c_sig.parameters.values()]
p_kind = [x[1].kind for x in p_sig.parameters.items() if not
x[0].startswith('_')]
# parameters:
if attr != 'setcontext':
self.assertEqual(c_kind, p_kind,
msg="parameter kind mismatch in %s" % p_func)
def test_inspect_types(self):
POS = inspect._ParameterKind.POSITIONAL_ONLY
POS_KWD = inspect._ParameterKind.POSITIONAL_OR_KEYWORD
# Type heuristic (type annotations would help!):
pdict = {C: {'other': C.Decimal(1),
'third': C.Decimal(1),
'x': C.Decimal(1),
'y': C.Decimal(1),
'z': C.Decimal(1),
'a': C.Decimal(1),
'b': C.Decimal(1),
'c': C.Decimal(1),
'exp': C.Decimal(1),
'modulo': C.Decimal(1),
'num': "1",
'f': 1.0,
'rounding': C.ROUND_HALF_UP,
'context': C.getcontext()},
P: {'other': P.Decimal(1),
'third': P.Decimal(1),
'a': P.Decimal(1),
'b': P.Decimal(1),
'c': P.Decimal(1),
'exp': P.Decimal(1),
'modulo': P.Decimal(1),
'num': "1",
'f': 1.0,
'rounding': P.ROUND_HALF_UP,
'context': P.getcontext()}}
def mkargs(module, sig):
args = []
kwargs = {}
for name, param in sig.parameters.items():
if name == 'self': continue
if param.kind == POS:
args.append(pdict[module][name])
elif param.kind == POS_KWD:
kwargs[name] = pdict[module][name]
else:
raise TestFailed("unexpected parameter kind")
return args, kwargs
def tr(s):
"""The C Context docstrings use 'x' in order to prevent confusion
with the article 'a' in the descriptions."""
if s == 'x': return 'a'
if s == 'y': return 'b'
if s == 'z': return 'c'
return s
def doit(ty):
p_type = getattr(P, ty)
c_type = getattr(C, ty)
for attr in dir(p_type):
if attr.startswith('_'):
continue
p_func = getattr(p_type, attr)
c_func = getattr(c_type, attr)
if inspect.isfunction(p_func):
p_sig = inspect.signature(p_func)
c_sig = inspect.signature(c_func)
# parameter names:
p_names = list(p_sig.parameters.keys())
c_names = [tr(x) for x in c_sig.parameters.keys()]
self.assertEqual(c_names, p_names,
msg="parameter name mismatch in %s" % p_func)
p_kind = [x.kind for x in p_sig.parameters.values()]
c_kind = [x.kind for x in c_sig.parameters.values()]
# 'self' parameter:
self.assertIs(p_kind[0], POS_KWD)
self.assertIs(c_kind[0], POS)
# remaining parameters:
if ty == 'Decimal':
self.assertEqual(c_kind[1:], p_kind[1:],
msg="parameter kind mismatch in %s" % p_func)
else: # Context methods are positional only in the C version.
self.assertEqual(len(c_kind), len(p_kind),
msg="parameter kind mismatch in %s" % p_func)
# Run the function:
args, kwds = mkargs(C, c_sig)
try:
getattr(c_type(9), attr)(*args, **kwds)
except Exception as err:
raise TestFailed("invalid signature for %s: %s %s" % (c_func, args, kwds))
args, kwds = mkargs(P, p_sig)
try:
getattr(p_type(9), attr)(*args, **kwds)
except Exception as err:
raise TestFailed("invalid signature for %s: %s %s" % (p_func, args, kwds))
doit('Decimal')
doit('Context')
all_tests = [
CExplicitConstructionTest, PyExplicitConstructionTest,
CImplicitConstructionTest, PyImplicitConstructionTest,
CFormatTest, PyFormatTest,
CArithmeticOperatorsTest, PyArithmeticOperatorsTest,
CThreadingTest, PyThreadingTest,
CUsabilityTest, PyUsabilityTest,
CPythonAPItests, PyPythonAPItests,
CContextAPItests, PyContextAPItests,
CContextWithStatement, PyContextWithStatement,
CContextFlags, PyContextFlags,
CSpecialContexts, PySpecialContexts,
CContextInputValidation, PyContextInputValidation,
CContextSubclassing, PyContextSubclassing,
CCoverage, PyCoverage,
CFunctionality, PyFunctionality,
CWhitebox, PyWhitebox,
CIBMTestCases, PyIBMTestCases,
]
# Delete C tests if _decimal.so is not present.
if not C:
all_tests = all_tests[1::2]
else:
all_tests.insert(0, CheckAttributes)
all_tests.insert(1, SignatureTest)
def test_main(arith=None, verbose=None, todo_tests=None, debug=None):
""" Execute the tests.
Runs all arithmetic tests if arith is True or if the "decimal" resource
is enabled in regrtest.py
"""
init(C)
init(P)
global TEST_ALL, DEBUG
TEST_ALL = arith if arith is not None else is_resource_enabled('decimal')
DEBUG = debug
if todo_tests is None:
test_classes = all_tests
else:
test_classes = [CIBMTestCases, PyIBMTestCases]
# Dynamically build custom test definition for each file in the test
# directory and add the definitions to the DecimalTest class. This
# procedure insures that new files do not get skipped.
for filename in os.listdir(directory):
if '.decTest' not in filename or filename.startswith("."):
continue
head, tail = filename.split('.')
if todo_tests is not None and head not in todo_tests:
continue
tester = lambda self, f=filename: self.eval_file(directory + f)
setattr(CIBMTestCases, 'test_' + head, tester)
setattr(PyIBMTestCases, 'test_' + head, tester)
del filename, head, tail, tester
try:
run_unittest(*test_classes)
if todo_tests is None:
from doctest import IGNORE_EXCEPTION_DETAIL
savedecimal = sys.modules['decimal']
if C:
sys.modules['decimal'] = C
run_doctest(C, verbose, optionflags=IGNORE_EXCEPTION_DETAIL)
sys.modules['decimal'] = P
run_doctest(P, verbose)
sys.modules['decimal'] = savedecimal
finally:
if C: C.setcontext(ORIGINAL_CONTEXT[C])
P.setcontext(ORIGINAL_CONTEXT[P])
if not C:
warnings.warn('C tests skipped: no module named _decimal.',
UserWarning)
if not orig_sys_decimal is sys.modules['decimal']:
raise TestFailed("Internal error: unbalanced number of changes to "
"sys.modules['decimal'].")
if __name__ == '__main__':
import optparse
p = optparse.OptionParser("test_decimal.py [--debug] [{--skip | test1 [test2 [...]]}]")
p.add_option('--debug', '-d', action='store_true', help='shows the test number and context before each test')
p.add_option('--skip', '-s', action='store_true', help='skip over 90% of the arithmetic tests')
(opt, args) = p.parse_args()
if opt.skip:
test_main(arith=False, verbose=True)
elif args:
test_main(arith=True, verbose=True, todo_tests=args, debug=opt.debug)
else:
test_main(arith=True, verbose=True)
|
session.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Manage sessions to the GraphScope coordinator.
"""
import atexit
import base64
import contextlib
import copy
import json
import logging
import os
import random
import signal
import socket
import subprocess
import sys
import threading
import time
from queue import Empty as EmptyQueue
try:
from kubernetes import config as kube_config
except ImportError:
kube_config = None
import graphscope
from graphscope.client.rpc import GRPCClient
from graphscope.config import GSConfig as gs_config
from graphscope.deploy.kubernetes.cluster import KubernetesCluster
from graphscope.framework.errors import ConnectionError
from graphscope.framework.errors import FatalError
from graphscope.framework.errors import InteractiveEngineInternalError
from graphscope.framework.errors import InvalidArgumentError
from graphscope.framework.errors import K8sError
from graphscope.framework.errors import LearningEngineInternalError
from graphscope.framework.errors import check_argument
from graphscope.framework.operation import Operation
from graphscope.proto import message_pb2
from graphscope.proto import op_def_pb2
from graphscope.proto import types_pb2
try:
import gscoordinator
COORDINATOR_HOME = os.path.abspath(os.path.join(gscoordinator.__file__, "..", ".."))
except ModuleNotFoundError:
# If gscoordinator is not installed, try to locate it by relative path,
# which is strong related with the directory structure of GraphScope
COORDINATOR_HOME = os.path.abspath(
os.path.join(__file__, "..", "..", "..", "..", "coordinator")
)
DEFAULT_CONFIG_FILE = os.environ.get("GS_CONFIG_PATH", "")
_session_dict = {}
logger = logging.getLogger("graphscope")
class Session(object):
"""A class for interacting with GraphScope graph computation service cluster.
A :class:`Session` object encapsulates the environment in which :class:`Operation`
objects are executed/evaluated.
A session may own resources. It is important to release these resources when
they are no longer required. To do this, invoke the :meth:`close` method
on the session.
A Session can register itself as default session with :meth:`as_default`, and all operations
after that will use the default session. Session deregister itself as a default session
when closed.
The following example demonstrates its usage:
.. code:: python
>>> import graphscope as gs
>>> # use session object explicitly
>>> s = gs.session()
>>> g = s.load_from('xxx')
>>> r = s.sssp(g, 4)
>>> s.close()
>>> # or use a session as default
>>> s = gs.session().as_default()
>>> g = gs.load_from('xxx')
>>> r = gs.sssp(g, 4)
>>> s.close()
We support setup a service cluster and create a RPC session in following ways:
- GraphScope graph computation service run in cluster managed by kubernetes.
>>> s = graphscope.session()
Also, :class:`Session` provides several keyword params for users to define the cluster.
You may use the param :code:`k8s_gs_image` to specify the image for all engine pod, and
param :code:`k8s_engine_cpu` or :code:`k8s_engine_mem` to specify the resources. More,
you can find all params detail in :meth:`__init__` method.
>>> s = graphscope.session(
... k8s_gs_image="registry.cn-hongkong.aliyuncs.com/graphscope/graphscope:latest",
... k8s_vineyard_cpu=0.1,
... k8s_vineyard_mem="256Mi",
... k8s_vineyard_shared_mem="4Gi",
... k8s_engine_cpu=0.1,
... k8s_engine_mem="256Mi")
- or all params can be provided by a json configuration file or configuration dict.
>>> s = graphscope.session(config='/tmp/config.json')
>>> # Or
>>> s = graphscope.session(config={'k8s_engine_cpu': 5, 'k8s_engine_mem': '5Gi'})
"""
def __init__(
self,
config=None,
num_workers=gs_config.NUM_WORKERS,
log_level=gs_config.LOG_LEVEL,
show_log=gs_config.SHOW_LOG,
k8s_namespace=gs_config.NAMESPACE,
k8s_service_type=gs_config.SERVICE_TYPE,
k8s_gs_image=gs_config.GS_IMAGE,
k8s_etcd_image=gs_config.ETCD_IMAGE,
k8s_gie_graph_manager_image=gs_config.GIE_GRAPH_MANAGER_IMAGE,
k8s_zookeeper_image=gs_config.ZOOKEEPER_IMAGE,
k8s_image_pull_policy=gs_config.IMAGE_PULL_POLICY,
k8s_image_pull_secrets=gs_config.IMAGE_PULL_SECRETS,
k8s_coordinator_cpu=gs_config.COORDINATOR_CPU,
k8s_coordinator_mem=gs_config.COORDINATOR_MEM,
k8s_vineyard_cpu=gs_config.VINEYARD_CPU,
k8s_vineyard_mem=gs_config.VINEYARD_MEM,
k8s_vineyard_shared_mem=gs_config.VINEYARD_SHARED_MEM,
k8s_engine_cpu=gs_config.ENGINE_CPU,
k8s_engine_mem=gs_config.ENGINE_MEM,
k8s_waiting_for_delete=gs_config.WAITING_FOR_DELETE,
timeout_seconds=gs_config.TIMEOUT_SECONDS,
**kw
):
"""Construct a new GraphScope session.
Args:
config (dict or str, optional): The configuration dict or file about how to launch the GraphScope instance.
For str, it will identify it as a path and read the configuration file to build a
session if file exist. If not specified, the global default configuration
:code:`DEFAULT_CONFIG_FILE` will be used, which get value of GS_CONFIG_PATH
in environment. Note that it will overwrite explicit parameters. Defaults to None.
num_workers (int, optional): The number of workers to launch GraphScope engine. Defaults to 2.
log_level (str, optional): One of in ['info', 'debug'], Defaults to 'info'.
show_log (bool, optional): If true, it will fetch and print engines's log from python client.
Default to false.
k8s_namespace (str, optional): Contains the namespace to create all resource inside.
If param missing or the namespace not exist, a random namespace will be created and deleted
when service stopping. Defaults to None.
k8s_service_type (str, optional): Type determines how the GraphScope service is exposed.
Valid options are NodePort, and LoadBalancer. Defaults to NodePort.
k8s_gs_image (str, optional): The GraphScope engine's image.
k8s_etcd_image (str, optional): The image of etcd, which used by vineyard.
k8s_image_pull_policy (str, optional): Kubernetes image pull policy. Defaults to "IfNotPresent".
k8s_image_pull_secrets (list[str], optional): A list of secret name used to authorize pull image.
k8s_gie_graph_manager_image (str, optional): The GraphScope interactive engine's graph manager image.
k8s_zookeeper_image (str, optional): The image of zookeeper, which used by GIE graph manager.
k8s_vineyard_cpu (float, optional): Minimum number of CPU cores request for vineyard container. Defaults to 0.5.
k8s_vineyard_mem (str, optional): Minimum number of memory request for vineyard container. Defaults to '512Mi'.
k8s_vineyard_shared_mem (str, optional): Init size of vineyard shared memory. Defaults to '4Gi'.
k8s_engine_cpu (float, optional): Minimum number of CPU cores request for engine container. Defaults to 0.5.
k8s_engine_mem (str, optional): Minimum number of memory request for engine container. Defaults to '4Gi'.
k8s_coordinator_cpu (float, optional): Minimum number of CPU cores request for coordinator pod. Defaults to 1.0.
k8s_coordinator_mem (str, optional): Minimum number of memory request for coordinator pod. Defaults to '4Gi'.
timeout_seconds (int, optional): For waiting service ready (or waiting for delete if
k8s_waiting_for_delete is True).
Also, after seconds of client disconnect, coordinator will clean up this graphscope instance.
Defaults to 600.
k8s_waiting_for_delete (bool, optional): Waiting for service delete or not. Defaults to False.
**kw (dict, optional): Other optional parameters will be put to :code:`**kw`.
If your kubernetes cluster deployed on inner virtual machine
(such as minikube with param --vm-driver is not None), you can specify
:code:`k8s_minikube_vm_driver` is :code:`True`.
k8s_client_config (dict, optional): Provide configurable parameters for connecting to remote k8s,
which strongly relies on the `kube_config.new_client_from_config` function.
eg: {"config_file": "~/.kube/config", "context": None, "persist_config": True}
config_file: Name of the kube-config file.
context: set the active context. If is set to None, current_context from config file will be used.
persist_config: If True, config file will be updated when changed(e.g GCP token refresh).
Raises:
TypeError: If the given argument combination is invalid and cannot be used to create
a GraphScope session.
"""
num_workers = int(num_workers)
self._config_params = {}
self._accessable_params = (
"num_workers",
"log_level",
"show_log",
"k8s_namespace",
"k8s_service_type",
"k8s_gs_image",
"k8s_etcd_image",
"k8s_image_pull_policy",
"k8s_image_pull_secrets",
"k8s_gie_graph_manager_image",
"k8s_zookeeper_image",
"k8s_coordinator_cpu",
"k8s_coordinator_mem",
"k8s_vineyard_cpu",
"k8s_vineyard_mem",
"k8s_vineyard_shared_mem",
"k8s_engine_cpu",
"k8s_engine_mem",
"k8s_waiting_for_delete",
"timeout_seconds",
)
saved_locals = locals()
for param in self._accessable_params:
self._config_params[param] = saved_locals[param]
# parse config, which should be a path to config file, or dict
# config has highest priority
if isinstance(config, dict):
self._config_params.update(config)
elif isinstance(config, str):
self._load_config(config)
elif DEFAULT_CONFIG_FILE:
self._load_config(DEFAULT_CONFIG_FILE)
# update other optional params
self._config_params.update(kw)
self._config_logging(
self._config_params["log_level"], self._config_params["show_log"]
)
self._config_params["addr"] = None
# Reserved keyword for local testing.
run_on_local = kw.pop("run_on_local", False)
if not run_on_local:
self._config_params["enable_k8s"] = True
else:
self._run_on_local()
# deploy minikube on virtual machine
self._config_params["k8s_minikube_vm_driver"] = kw.pop(
"k8s_minikube_vm_driver", False
)
# update k8s_client_config params
self._config_params["k8s_client_config"] = kw.pop("k8s_client_config", {})
# There should be no more custom keyword arguments.
if kw:
raise ValueError("Not recognized value: ", list(kw.keys()))
logger.info(
"Initializing graphscope session with parameters: %s", self._config_params
)
self._closed = False
# set _session_type in self._connect()
self._session_type = types_pb2.INVALID_SESSION
# need clean up when session exit
self._proc = None
self._endpoint = None
self._k8s_cluster = None
self._heartbeat_sending_thread = None
self._grpc_client = None
self._session_id = None # unique identifier across sessions
# engine config:
#
# {
# "experiment": "ON/OFF",
# "vineyard_socket": "...",
# "vineyard_rpc_endpoint": "..."
# }
self._engine_config = None
# interactive instance related graph map
self._interactive_instance_dict = {}
# learning engine related graph map
self._learning_instance_dict = {}
self._default_session = None
atexit.register(self.close)
# create and connect session
self._proc, self._endpoint = self._connect()
# heartbeat
self._heartbeat_interval_seconds = 5
self._heartbeat_sending_thread = threading.Thread(
target=self._send_heartbeat, args=()
)
self._heartbeat_sending_thread.daemon = True
self._heartbeat_sending_thread.start()
def __repr__(self):
return str(self.info)
def __str__(self):
return repr(self)
def _config_logging(self, log_level, show_log):
if show_log:
if log_level:
log_level = getattr(logging, log_level.upper(), logging.INFO)
else:
log_level = logging.INFO
logging.getLogger("graphscope").setLevel(log_level)
else:
logging.getLogger("graphscope").setLevel(logging.CRITICAL)
logging.basicConfig(
format="%(asctime)s [%(levelname)s][%(module)s:%(lineno)d]: %(message)s",
stream=sys.stdout,
)
@property
def session_id(self):
return self._session_id
def _load_config(self, path):
config_path = os.path.expandvars(os.path.expanduser(path))
with open(config_path, "r") as f:
data = json.load(f)
self._config_params.update(data)
@property
def info(self):
"""Show all resources info associated with session in json format."""
info = {}
if self._closed:
info["status"] = "closed"
elif self._grpc_client is None:
info["status"] = "disconnected"
else:
info["status"] = "active"
if self._session_type == types_pb2.K8S:
info["type"] = "k8s"
info["engine_hosts"] = ",".join(self._pod_name_list)
info["namespace"] = self._config_params["k8s_namespace"]
else:
info["types"] = "invalid"
info["num_workers"] = self._config_params["num_workers"]
info["coordinator_endpoint"] = self._endpoint
info["engine_config"] = self._engine_config
return info
def _send_heartbeat(self):
while not self._closed:
if self._grpc_client:
self._grpc_client.send_heartbeat()
time.sleep(self._heartbeat_interval_seconds)
def close(self):
"""Closes this session.
This method frees all resources associated with the session.
"""
if self._closed:
return
self._closed = True
self._endpoint = None
self._deregister_default()
if self._heartbeat_sending_thread:
self._heartbeat_sending_thread.join(
timeout=self._heartbeat_interval_seconds
)
self._heartbeat_sending_thread = None
# close all interactive instances
for instance in self._interactive_instance_dict.values():
try:
if instance is not None:
instance.close()
except InteractiveEngineInternalError:
pass
self._interactive_instance_dict.clear()
# close all learning instances
for instance in self._learning_instance_dict.values():
try:
if instance is not None:
instance.close()
except LearningEngineInternalError:
pass
self._learning_instance_dict.clear()
if self._grpc_client:
self._grpc_client.close()
self._grpc_client = None
_session_dict.pop(self._session_id, None)
# clean up
if self._proc is not None:
# coordinator's GRPCServer.wait_for_termination works for SIGINT (Ctrl-C)
self._proc.send_signal(signal.SIGINT)
self._proc.wait()
self._proc = None
if self._config_params["enable_k8s"]:
if self._k8s_cluster:
self._k8s_cluster.stop()
self._pod_name_list = []
def _close_interactive_instance(self, instance):
"""Close a interactive instance."""
if self._grpc_client:
self._grpc_client.close_interactive_engine(instance.object_id)
self._interactive_instance_dict[instance.object_id] = None
def _close_learning_instance(self, instance):
"""Close a learning instance."""
if self._grpc_client:
self._grpc_client.close_learning_engine(instance.object_id)
self._learning_instance_dict[instance.object_id] = None
def __del__(self):
# cleanly ignore all exceptions
try:
self.close()
except Exception: # pylint: disable=broad-except
pass
def as_default(self):
"""Obtain a context manager that make this object as default session.
This method is used when a Session is constructed, which will immediately
install self as a default session.
Raises:
ValueError: If default session exist in current context.
Returns:
A context manager using this session as the default session.
"""
if not _default_session_stack.is_cleared():
raise ValueError(
"A default session is already active. You must explicitly call Session.close()."
)
# session context manager
self._default_session = default_session(self)
self._default_session.__enter__()
def _deregister_default(self):
"""Remove self from the default session stack."""
if self._default_session:
self._default_session.__exit__(None, None, None)
self._default_session = None
def run(self, fetch):
"""Run operations of `fetch`.
Args:
fetch: :class:`Operation`
Raises:
RuntimeError:
Client disconnect to the service. Or run on a closed session.
ValueError:
If fetch is not a instance of :class:`Operation`. Or
the fetch has been evaluated.
InvalidArgumentError:
Not recognized on output type.
Returns:
Different values for different output types of :class:`Operation`
"""
# prepare names to run and fetch
if hasattr(fetch, "op"):
fetch = fetch.op
if not isinstance(fetch, Operation):
raise ValueError("Expect a `Operation`")
if fetch.output is not None:
raise ValueError("The op <%s> are evaluated duplicated." % fetch.key)
# convert to list to be compatible with rpc client method signature
fetch_ops = [fetch]
dag = op_def_pb2.DagDef()
for op in fetch_ops:
dag.op.extend([copy.deepcopy(op.as_op_def())])
if self._closed:
raise RuntimeError("Attempted to use a closed Session.")
if not self._grpc_client:
raise RuntimeError("Session disconnected.")
# execute the query
try:
response = self._grpc_client.run(dag)
except FatalError:
self.close()
raise
check_argument(
len(fetch_ops) == 1, "Cannot execute multiple ops at the same time"
)
return self._parse_value(fetch_ops[0], response)
def _parse_value(self, op, response: message_pb2.RunStepResponse):
# attach an output to op, indicating the op is already run.
op.set_output(response.metrics)
# if loads a arrow property graph, will return {'object_id': xxxx}
if op.output_types == types_pb2.GRAPH:
return response.graph_def
if op.output_types == types_pb2.APP:
return response.result.decode("utf-8")
if op.output_types in (
types_pb2.RESULTS,
types_pb2.VINEYARD_TENSOR,
types_pb2.VINEYARD_DATAFRAME,
):
return response.result.decode("utf-8")
if op.output_types in (types_pb2.TENSOR, types_pb2.DATAFRAME):
return response.result
else:
raise InvalidArgumentError(
"Not recognized output type: %s", op.output_types
)
def _connect(self):
if self._config_params["addr"] is not None:
# try connect to exist coordinator
self._session_type = types_pb2.HOSTS
proc, endpoint = None, self._config_params["addr"]
elif self._config_params["enable_k8s"]:
if (
self._config_params["k8s_etcd_image"] is None
or self._config_params["k8s_gs_image"] is None
):
raise K8sError("None image found.")
api_client = kube_config.new_client_from_config(
**self._config_params["k8s_client_config"]
)
proc = None
self._session_type = types_pb2.K8S
self._k8s_cluster = KubernetesCluster(
api_client=api_client,
namespace=self._config_params["k8s_namespace"],
service_type=self._config_params["k8s_service_type"],
minikube_vm_driver=self._config_params["k8s_minikube_vm_driver"],
num_workers=self._config_params["num_workers"],
log_level=self._config_params["log_level"],
gs_image=self._config_params["k8s_gs_image"],
etcd_image=self._config_params["k8s_etcd_image"],
gie_graph_manager_image=self._config_params[
"k8s_gie_graph_manager_image"
],
zookeeper_image=self._config_params["k8s_zookeeper_image"],
image_pull_policy=self._config_params["k8s_image_pull_policy"],
image_pull_secrets=self._config_params["k8s_image_pull_secrets"],
vineyard_cpu=self._config_params["k8s_vineyard_cpu"],
vineyard_mem=self._config_params["k8s_vineyard_mem"],
vineyard_shared_mem=self._config_params["k8s_vineyard_shared_mem"],
engine_cpu=self._config_params["k8s_engine_cpu"],
engine_mem=self._config_params["k8s_engine_mem"],
coordinator_cpu=float(self._config_params["k8s_coordinator_cpu"]),
coordinator_mem=self._config_params["k8s_coordinator_mem"],
waiting_for_delete=self._config_params["k8s_waiting_for_delete"],
timeout_seconds=self._config_params["timeout_seconds"],
)
endpoint = self._k8s_cluster.start()
if self._config_params["k8s_namespace"] is None:
self._config_params["k8s_namespace"] = self._k8s_cluster.get_namespace()
elif (
isinstance(self._config_params["hosts"], list)
and len(self._config_params["hosts"]) != 0
and self._config_params["num_workers"] > 0
):
# lanuch coordinator with hosts
proc, endpoint = _launch_coordinator_on_local(self._config_params)
self._session_type = types_pb2.HOSTS
else:
raise RuntimeError("Session initialize failed.")
# waiting service ready
self._grpc_client = GRPCClient(endpoint)
self._grpc_client.waiting_service_ready(
show_log=self._config_params["show_log"],
timeout_seconds=self._config_params["timeout_seconds"],
enable_k8s=self._config_params["enable_k8s"],
)
# connect to rpc server
try:
(
self._session_id,
self._engine_config,
self._pod_name_list,
) = self._grpc_client.connect()
_session_dict[self._session_id] = self
except Exception:
if proc is not None and proc.poll() is None:
try:
proc.terminate()
except: # noqa: E722
pass
raise
if self._config_params["enable_k8s"]:
# minikube service
self._k8s_cluster.check_and_set_vineyard_rpc_endpoint(self._engine_config)
return proc, endpoint
def get_config(self):
"""Get configuration of the session."""
return self._config_params
def load_from(self, *args, **kwargs):
"""Load a graph within the session.
See more information in :meth:`graphscope.load_from`.
"""
with default_session(self):
return graphscope.load_from(*args, **kwargs)
def _run_on_local(self):
self._config_params["hosts"] = ["localhost"]
self._config_params["port"] = None
self._config_params["vineyard_socket"] = ""
self._config_params["enable_k8s"] = False
def _get_gl_handle(self, graph):
"""Dump a handler for GraphLearn for interaction.
Fields in :code:`schema` are:
+ the name of node type or edge type
+ whether the graph is weighted graph
+ whether the graph is labeled graph
+ the number of int attributes
+ the number of float attributes
+ the number of string attributes
An example of the graph handle:
.. code:: python
{
"server": "127.0.0.1:8888,127.0.0.1:8889",
"client_count": 1,
"vineyard_socket": "/var/run/vineyard.sock",
"vineyard_id": 13278328736,
"node_schema": [
"user:false:false:10:0:0",
"item:true:false:0:0:5"
],
"edge_schema": [
"user:click:item:true:false:0:0:0",
"user:buy:item:true:true:0:0:0",
"item:similar:item:false:false:10:0:0"
],
"node_attribute_types": {
"person": {
"age": "i",
"name": "s",
},
},
"edge_attribute_types": {
"knows": {
"weight": "f",
},
},
}
The handle can be decoded using:
.. code:: python
base64.b64decode(handle.encode('ascii')).decode('ascii')
Note that the ports are selected from a range :code:`(8000, 9000)`.
Args:
graph (:class:`Graph`): A Property Graph.
client_number (int): Number of client.
Returns:
str: Base64 encoded handle
Raises:
InvalidArgumentError: If the graph is not loaded, or graph_type isn't
`ARROW_PROPERTY`.
"""
if not graph.loaded():
raise InvalidArgumentError("The graph has already been unloaded")
if not graph.graph_type == types_pb2.ARROW_PROPERTY:
raise InvalidArgumentError("The graph should be a property graph.")
def group_property_types(props):
weighted, labeled, i, f, s, attr_types = "false", "false", 0, 0, 0, {}
for field_name, field_type in props.items():
if field_type in [types_pb2.STRING]:
s += 1
attr_types[field_name] = "s"
elif field_type in (types_pb2.FLOAT, types_pb2.DOUBLE):
f += 1
attr_types[field_name] = "f"
else:
i += 1
attr_types[field_name] = "i"
if field_name == "weight":
weighted = "true"
elif field_name == "label":
labeled = "true"
return weighted, labeled, i, f, s, attr_types
node_schema, node_attribute_types = [], dict()
for index, label in enumerate(graph.schema.vertex_labels):
weighted, labeled, i, f, s, attr_types = group_property_types(
graph.schema.vertex_properties[index]
)
node_schema.append(
"{}:{}:{}:{}:{}:{}".format(label, weighted, labeled, i, f, s)
)
node_attribute_types[label] = attr_types
edge_schema, edge_attribute_types = [], dict()
for index, label in enumerate(graph.schema.edge_labels):
weighted, labeled, i, f, s, attr_types = group_property_types(
graph.schema.edge_properties[index]
)
for rel in graph.schema.edge_relationships[index]:
edge_schema.append(
"{}:{}:{}:{}:{}:{}:{}:{}".format(
rel[0], label, rel[1], weighted, labeled, i, f, s
)
)
edge_attribute_types[label] = attr_types
handle = {
"hosts": self.info["engine_hosts"],
"client_count": 1,
"vineyard_id": graph.vineyard_id,
"vineyard_socket": self._engine_config["vineyard_socket"],
"node_schema": node_schema,
"edge_schema": edge_schema,
"node_attribute_types": node_attribute_types,
"edge_attribute_types": edge_attribute_types,
}
handle_json_string = json.dumps(handle)
return base64.b64encode(handle_json_string.encode("utf-8")).decode("utf-8")
def gremlin(self, graph):
"""Get a interactive engine handler to execute gremlin queries.
Args:
graph: :class:`Graph`
Raises:
InvalidArgumentError: :code:`graph` is not a property graph or unloaded.
Returns:
:class:`InteractiveQuery`
"""
if (
graph.vineyard_id in self._interactive_instance_dict
and self._interactive_instance_dict[graph.vineyard_id] is not None
):
return self._interactive_instance_dict[graph.vineyard_id]
if not graph.loaded():
raise InvalidArgumentError("The graph has already been unloaded")
if not graph.graph_type == types_pb2.ARROW_PROPERTY:
raise InvalidArgumentError("The graph should be a property graph.")
from graphscope.interactive.query import InteractiveQuery
response = self._grpc_client.create_interactive_engine(
graph.vineyard_id, graph.schema_path
)
interactive_query = InteractiveQuery(
graphscope_session=self,
object_id=graph.vineyard_id,
front_ip=response.frontend_host,
front_port=response.frontend_port,
)
self._interactive_instance_dict[graph.vineyard_id] = interactive_query
graph.attach_interactive_instance(interactive_query)
return interactive_query
def learning(self, graph, nodes=None, edges=None, gen_labels=None):
"""Start a graph learning engine.
Args:
nodes (list): The node types that will be used for gnn training.
edges (list): The edge types that will be used for gnn training.
gen_labels (list): Extra node and edge labels on original graph for gnn training.
Returns:
`graphscope.learning.Graph`: An instance of `graphscope.learning.Graph`
that could be feed to the learning engine.
"""
if (
graph.vineyard_id in self._learning_instance_dict
and self._learning_instance_dict[graph.vineyard_id] is not None
):
return self._learning_instance_dict[graph.vineyard_id]
if sys.platform != "linux" and sys.platform != "linux2":
raise RuntimeError(
"The learning engine currently supports Linux only, doesn't support %s"
% sys.platform
)
if not graph.loaded():
raise InvalidArgumentError("The graph has already been unloaded")
if not graph.graph_type == types_pb2.ARROW_PROPERTY:
raise InvalidArgumentError("The graph should be a property graph.")
from graphscope.learning.graph import Graph as LearningGraph
handle = self._get_gl_handle(graph)
config = LearningGraph.preprocess_args(handle, nodes, edges, gen_labels)
config = base64.b64encode(json.dumps(config).encode("utf-8")).decode("utf-8")
endpoints = self._grpc_client.create_learning_engine(
graph.vineyard_id, handle, config
)
handle = json.loads(base64.b64decode(handle.encode("utf-8")).decode("utf-8"))
handle["server"] = endpoints
handle["client_count"] = 1
learning_graph = LearningGraph(handle, config, graph.vineyard_id, self)
self._learning_instance_dict[graph.vineyard_id] = learning_graph
graph.attach_learning_instance(learning_graph)
return learning_graph
session = Session
def _is_port_in_use(port):
"""Check whether port of localhost is used
Returns: bool
True if port already in used.
"""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
return s.connect_ex(("localhost", port)) == 0
def _launch_coordinator_on_local(config_params):
"""Launch coordinator locally using specific configuration.
Args:
config_params (dict): Specific configurations,
in which we will look at several specific keys:
port: Port used to launch coordinator, use random port if None.
num_workers: Workers number.
hosts: Hosts name of workers.
log_level: Log level.
timeout_seconds: Wait until reached timeout.
vineyard_socket: Vineyard socket path. Use default path if None.
show_log: Whether direct logs to stdout and stderr.
Returns:
process: instance of Popen object.
endpoint (str): The endpoint to connect to coordinator.
"""
port = config_params["port"]
if port is None:
# use random port
port = random.randint(60801, 63801)
while _is_port_in_use(port):
port = random.randint(60801, 63801)
else:
# check port conflict
if _is_port_in_use(port):
raise ConnectionError("Port already used.")
cmd = [
sys.executable,
"-m",
"gscoordinator",
"--num_workers",
"{}".format(str(config_params["num_workers"])),
"--hosts",
"{}".format(",".join(config_params["hosts"])),
"--log_level",
"{}".format(config_params["log_level"]),
"--timeout_seconds",
"{}".format(config_params["timeout_seconds"]),
"--port",
"{}".format(str(port)),
]
if config_params["vineyard_socket"]:
cmd.extend(["--vineyard_socket", "{}".format(config_params["vineyard_socket"])])
logger.info("Client is initializing coordinator.")
env = os.environ.copy()
env["PYTHONUNBUFFERED"] = "TRUE"
process = subprocess.Popen(
cmd,
cwd=COORDINATOR_HOME,
universal_newlines=True,
encoding="utf-8",
stdin=subprocess.DEVNULL,
stdout=sys.stdout if config_params["show_log"] else subprocess.DEVNULL,
stderr=sys.stderr if config_params["show_log"] else subprocess.DEVNULL,
bufsize=1,
env=env,
)
return process, "localhost:%s" % port
def default_session(session):
"""Python's :code:`with` handler for defining a default session.
This function provides a means of registering a session for handling
and code that need a default session calls.
The :code:`with` keyword to specify that code invocations within
the scope of a block should be executed by a particular session.
Args:
session: :class:`Session`
The session to be installed as the default session.
Returns:
A context manager for the default session.
"""
return _default_session_stack.get_controller(session)
def get_default_session():
"""Returns the default session for the current context.
Raises:
RuntimeError: Default session is not exist.
Returns:
The default :class:`Session`.
"""
return _default_session_stack.get_default()
def get_session_by_id(handle):
"""Return the session by handle."""
if handle not in _session_dict:
raise ValueError("Session not exists.")
return _session_dict.get(handle)
class _DefaultSessionStack(object):
"""A stack of objects for providing implicit defaults."""
def __init__(self):
super().__init__()
self.stack = []
def get_default(self):
if not self.stack:
raise RuntimeError("No default session found.")
return self.stack[-1]
def reset(self):
self.stack = []
def is_cleared(self):
return not self.stack
@contextlib.contextmanager
def get_controller(self, default):
"""A context manager for manipulating a default stack."""
self.stack.append(default)
try:
yield default
finally:
# stack may be empty if reset() was called
if self.stack:
self.stack.remove(default)
_default_session_stack = _DefaultSessionStack() # pylint: disable=protected-access
|
vis.py | # Copyright (C) 2018 Innoviz Technologies
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD 3-Clause license. See the LICENSE file for details.
import numpy as np
import textwrap
import matplotlib.cm as cm
from multiprocessing import Process, Queue
from queue import Empty
from panda3d.core import ModifierButtons, TextNode
from direct.showbase.ShowBase import ShowBase
from direct.gui.DirectGui import OnscreenText
from visualizations.vis_utils import Navigator3D, PointCloudVertexBuffer, Cuboid
import matplotlib.colors as mcolors
from matplotlib.colors import LinearSegmentedColormap, ListedColormap
import collections
pc_cmap = LinearSegmentedColormap.from_list('mycmap', ['red', 'yellow', 'limegreen'])
label_cmap = LinearSegmentedColormap.from_list('mycmap', ['white', 'magenta', 'blue'])
"""
This module allows visualization of point clouds. To use, simply use the pcshow() function.
"""
def pcshow(point_cloud=None, boxes=None, point_cloud_coloring='reflectivity_and_label', max_points=1000000, on_screen_text=None):
"""
This is a convenience function that opens a PCDisplayer and displays point cloud. If used and
another PCDisplayer is already opened, it will override it.
"""
if PointCloudFrameViewer.main_display is None or not PointCloudFrameViewer.main_display.is_running:
PointCloudFrameViewer.main_display = PointCloudFrameViewer(point_cloud, boxes, point_cloud_coloring, max_points, on_screen_text)
else:
PointCloudFrameViewer.main_display.display(point_cloud, boxes, point_cloud_coloring, on_screen_text)
return PointCloudFrameViewer.main_display
class PointCloudFrameViewer(object):
"""
This class is used to display a single point cloud frame that can be manipulated (e.g. navigated in 3d).
In order to display stuff on it, use the display function, or use the paramters of the __init__ function.
"""
main_display = None
def __init__(self, point_cloud=None, boxes=None, point_cloud_coloring='reflectivity_and_label', max_points=17776, on_screen_text=None):
self._queue = Queue(1)
self._max_points = max(max_points, len(point_cloud))
self._process = Process(target=self._gen_viewer, args=(self._queue, max_points))
self._process.start()
self.display(point_cloud, boxes, point_cloud_coloring, on_screen_text)
self._cuboids = []
def display(self, point_cloud, boxes, point_cloud_coloring='reflectivity_and_label', on_screen_text=None):
if self._process.is_alive():
d = {}
if point_cloud is not None:
d['point_cloud'] = point_cloud
d['point_cloud_coloring'] = point_cloud_coloring
if on_screen_text is not None:
d['on_screen_text'] = on_screen_text
if boxes is not None:
d['boxes'] = boxes
self._queue.put(d)
else:
raise Exception("point cloud display was closed")
@staticmethod
def _gen_viewer(queue, max_points=17776, massage_que=None):
base = ShowBase()
base.setBackgroundColor(0, 0, 0)
base.disableMouse()
base.camera.setPos(0, -50, 20)
base.camera.setHpr(0, -22, 0)
base.mouseWatcherNode.set_modifier_buttons(ModifierButtons())
base.buttonThrowers[0].node().set_modifier_buttons(ModifierButtons())
base.setFrameRateMeter(True)
pc_viewer = _PointCloudFrameViewer(point_cloud_size=max_points, queue=queue)
base.run()
@property
def is_running(self):
return self._process.is_alive()
class PointCloudViewerApp(Navigator3D):
def __init__(self, point_cloud_size=17776):
Navigator3D.__init__(self)
self._point_cloud = None
self._point_cloud_coloring = None
self._cuboids = []
self.points_vb = PointCloudVertexBuffer(point_cloud_size)
self._user_text = OnscreenText('', style=1, fg=(1, 1, 1, 1), scale=.04)
self._user_text.setPos(-0.9, 0.9)
def redraw_boxes(self):
if self._boxes is not None:
for c_index, box in enumerate(self._boxes):
self._cuboids[c_index].show()
def draw(self, point_cloud=None, point_cloud_coloring=None, on_screen_text=None, boxes=None):
self._boxes = boxes
if point_cloud is not None:
pc_color = self.color_pc(point_cloud, point_cloud_coloring)
self.draw_pc(point_cloud, pc_color)
if on_screen_text is not None:
self._user_text.setText(textwrap.fill(on_screen_text, 90))
else:
self._user_text.setText('')
if boxes is not None:
self.draw_cuboids(boxes)
# self.redraw_boxes()
def color_pc(self, pc, coloring='reflectivity_and_label', colormap='pc_cmap'):
"""
Generate coloring for point cloud based on multiple options
:param pc: point cloud
:param coloring: Coloring option. Supported: 'reflectivity', np.array of point cloud size x 4 with points colors
:return:
"""
if colormap is 'pc_cmap':
colormap = pc_cmap
points = pc[:, :3]
color = np.zeros((len(pc), 4))
color[:, -1] = 1.
if isinstance(coloring, np.ndarray) and coloring.dtype == np.int and coloring.shape == (points.shape[0],):
cmap = ListedColormap(
['w', 'magenta', 'orange', 'mediumspringgreen', 'deepskyblue', 'pink', 'y', 'g', 'r', 'purple', ])
coloring = np.mod(coloring, len(cmap.colors))
c = cm.ScalarMappable(cmap=cmap, norm=mcolors.Normalize(vmin=0, vmax=len(cmap.colors)-1))
color = c.to_rgba(coloring)
elif isinstance(coloring, np.ndarray):
if coloring.shape == (points.shape[0], 4):
color = coloring
if coloring.shape == (points.shape[0], ):
c = cm.ScalarMappable(cmap=colormap)
color = c.to_rgba(coloring, norm=False)
elif isinstance(coloring, collections.Callable):
colors = coloring(points)
c = cm.ScalarMappable(cmap=colormap)
color = c.to_rgba(colors)
elif coloring == 'reflectivity':
reflectivity = pc[:, 3]
reflectivity[reflectivity > 1] = 1
c = cm.ScalarMappable(cmap=colormap)
color = c.to_rgba(reflectivity, norm=False)
color[reflectivity < 0] = np.array([1.0, 1.0, 1.0, 1.0])
elif coloring == 'reflectivity_and_label':
# pc_colors
reflectivity = pc[:, 3]
reflectivity[reflectivity > 1] = 1
c = cm.ScalarMappable(cmap=colormap)
color = c.to_rgba(reflectivity, norm=False)
if pc.shape[-1] == 5:
labels = pc[:, 4]
labels_valid = labels[labels > 0]
c = cm.ScalarMappable(cmap=label_cmap)
color_labels = c.to_rgba(labels_valid, norm=True)
color[labels > 0] = color_labels
else:
color = np.ones((points.shape[0], 4))
color[:, -1] = 1.
return color
def draw_pc(self, pc, color):
points = pc[:, np.array([1, 0, 2])]
self.points_vb.assign_points(points, color)
def clear_point_cloud(self):
self.points_vb.clear_pc()
def draw_cuboids(self, boxes):
for box_idx, box in enumerate(boxes):
color = box['color'] if hasattr(box, 'color') else np.ones(4)
size = box['size']
translation = box['translation']
rotation = box['rotation'] / np.pi * 180.
try:
text = box['text']
except:
text = ''
if box_idx < len(self._cuboids):
self._cuboids[box_idx].show()
self._cuboids[box_idx].update_values(size, translation, rotation, color, text)
else:
self._cuboids.append(Cuboid(size, translation, rotation, color, text))
for c in self._cuboids[len(boxes):]:
c.hide()
class _PointCloudFrameViewer(PointCloudViewerApp):
"""
Adds a queue on top of PointCloudViewerApp to get data for display asynchronously.
"""
def __init__(self, point_cloud_size=17776, queue=None):
PointCloudViewerApp.__init__(self, point_cloud_size)
self._queue = queue
taskMgr.add(self.dequeue, 'read_queue_task')
def dequeue(self, task):
if self._queue is None:
return task.done
try:
disp_dict = self._queue.get(block=False, timeout=0.1)
except Empty:
return task.cont
if disp_dict is not None:
boxes = disp_dict['boxes'] if 'boxes' in disp_dict else None
pc = disp_dict['point_cloud'] if 'point_cloud' in disp_dict else None
on_screen_text = disp_dict['on_screen_text'] if 'on_screen_text' in disp_dict else None
pc_coloring = 'reflectivity' if 'point_cloud_coloring' not in disp_dict else disp_dict['point_cloud_coloring']
self.draw(pc, pc_coloring, on_screen_text, boxes)
return task.cont
if __name__ == '__main__':
file = './example.csv'
labeled_pc = np.genfromtxt(file, delimiter=' ')
labeled_pc[:, 3][labeled_pc[:, 4] == 1] = -1
pcshow(labeled_pc[:, :4])
|
worker.py | from flask import Flask, request
from django.apps import apps
from django.conf import settings
from threading import Thread, Event
import os
from time import sleep
import logging
from datetime import datetime, timedelta
import pytz
if 'DJANGO_SETTINGS_MODULE' not in os.environ:
os.environ['DJANGO_SETTINGS_MODULE'] = "bcfdata.settings"
apps.populate(settings.INSTALLED_APPS)
from data.models import Sailing
from data.utils import (get_actual_departures, get_current_conditions,
get_ferry_locations, get_sailing_detail)
app = Flask(__name__)
last_run = datetime.utcfromtimestamp(0)
last_detail_run = datetime.utcfromtimestamp(0)
tz = pytz.timezone(settings.DISPLAY_TIME_ZONE)
interval = 600
def shutdown_flask():
func = request.environ.get('werkzeug.server.shutdown')
func()
def periodic_update():
global last_run
while e.is_set() is False:
td = datetime.now() - last_run
if td.seconds > 600:
print("Querying departures...")
get_actual_departures()
print("Querying conditions...")
get_current_conditions()
print("Querying ferry locations...")
get_ferry_locations()
last_run = datetime.now()
else:
print("Sleeping ({} seconds to go)".format(interval - td.seconds))
sleep(10)
print("Shutdown worker")
def periodic_detail_update():
global last_detail_run
while e.is_set() is False:
td = datetime.now() - last_detail_run
if td.seconds > 3600:
get_sailing_detail()
last_detail_run = datetime.now()
else:
print("Sleeping ({} seconds to go)".format(3600 - td.seconds))
sleep(10)
print("Shutdown worker")
@app.route("/status")
def status():
return str(last_run)
@app.route("/departures")
def departures():
get_actual_departures()
return "OK"
@app.route("/conditions")
def conditions():
get_current_conditions()
return "OK"
@app.route("/locations")
def locations():
get_ferry_locations()
return "OK"
@app.route("/update")
def update():
get_actual_departures()
get_current_conditions()
get_ferry_locations()
return "OK"
@app.route("/shutdown")
def shutdown():
e.set()
shutdown_flask()
return "OK"
@app.route("/")
def index():
return str(Sailing.objects.count())
e = Event()
t = Thread(target=periodic_update)
d = Thread(target=periodic_detail_update)
a = Thread(target=app.run, kwargs={
"host": "0.0.0.0", "port": 6124
})
if __name__=='__main__':
t.start()
# d.start()
a.start()
|
server.py | #!/usr/bin/env python
"""Machine Learning Server."""
# System
import threading
import sys
import os
from datetime import datetime
import logging
# Third Party
import time
from pqueue import Queue
import numpy as np
import schedule
from bottle import run, request, route
from file_manager import FileManager
# First Party
from database_manager import DatabaseManager
import concordance
import common
PORT = os.environ.get("PORT", "5151")
API_KEY = os.environ.get("API_KEY")
if not API_KEY:
API_KEY = common.get_secret("API_KEY")
if not API_KEY:
API_KEY = "h/52y/E7cm8Ih4F3cVdlBM4ZQxER+Apk6P0L7yR0lFU="
TEMP_DIR, OQ_DIR, CQ_DIR = "queue_temp", "oqueue", "cqueue"
LB_TEMP_DIR, LBQ_DIR = "lb_temp", "lbqueue"
for d in [TEMP_DIR, OQ_DIR, CQ_DIR, LB_TEMP_DIR, LBQ_DIR]:
if not os.path.exists(d):
os.makedirs(d)
concordance_queue = Queue(CQ_DIR, tempdir=TEMP_DIR)
leaderboard_queue = Queue(LBQ_DIR, tempdir=LB_TEMP_DIR)
@route('/', method='POST')
def queue_for_scoring():
""" Recieves a submission and authenticates that the request has a valid API key.
Once authenticated the submission request is then queued to the leaderboard_queue and later checked for concordance.
"""
json = request.json
submission_id = json["submission_id"]
api_key = json["api_key"]
if API_KEY is None:
logging.getLogger().critical("NO API KEY EXITING")
return
if api_key != API_KEY:
logging.getLogger().info(
"Received invalid post request with incorrect api_key {} and submission_id {}"
.format(api_key, submission_id))
return
logging.getLogger().info(
"Received request to score {}".format(submission_id))
data = {
"submission_id": submission_id,
"enqueue_time": datetime.now(),
}
leaderboard_queue.put(data)
common.update_metrics(submission_id)
def put_submission_on_lb(db_manager, filemanager):
"""Pulls submissions from leaderboard_queue and pushes submissions to concordance queue for scoring"""
while True:
submission = leaderboard_queue.get()
try:
db_manager.update_leaderboard(submission["submission_id"],
filemanager)
for queue in [concordance_queue]:
queue.put(submission)
leaderboard_queue.task_done()
except Exception:
logging.exception("Exception updating submission.")
def score_concordance(db_manager, filemanager):
"""Pulls submission from concordance_queue for concordance check"""
while True:
submission = concordance_queue.get()
try:
concordance.submission_concordance(submission, db_manager,
filemanager)
if 'enqueue_time' in submission:
time_taken = datetime.now() - submission['enqueue_time']
logging.getLogger().info(
"Submission {} took {} to complete concordance".format(
submission['submission_id'], time_taken))
concordance_queue.task_done()
except Exception:
logging.exception("Exception scoring concordance.")
def create_logger():
"""Configure the logger to print process ID."""
root = logging.getLogger()
root.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s - %(process)d - {} - %(message)s'.format(
"Machine learning Server"))
ch.setFormatter(formatter)
root.addHandler(ch)
def schedule_cleanup(filemanager):
"""
Tell the filemanager to clean up every day
"""
# schedule a daily cleanup
schedule.every(1).days.do(filemanager.clean_up)
# run pending jobs every hour
while 1:
schedule.run_pending()
time.sleep(3600)
def main():
"""
The threading in this file works like this
We have a bottle server listening for submissions. When it gets a submission
it gives it to the put_submission_on_lb. This makes sure that the user is on the
leaderboard/ the leaderboard reflects their most up to date submission.
That method then enqueues the submission for concordance check.
"""
np.random.seed(1337)
create_logger()
db_manager = DatabaseManager()
fm = FileManager('/tmp/', logging)
logging.getLogger().info("Creating servers")
threading.Thread(
target=run, kwargs=dict(host='0.0.0.0', port=int(PORT))).start()
logging.getLogger().info("Spawning new threads to score concordance")
threading.Thread(
target=put_submission_on_lb,
kwargs=dict(db_manager=db_manager, filemanager=fm)).start()
threading.Thread(
target=score_concordance,
kwargs=dict(db_manager=db_manager, filemanager=fm)).start()
# clean up the /tmp folder so we don't run out of disk space
threading.Thread(
target=schedule_cleanup, kwargs=dict(filemanager=fm)).start()
if __name__ == '__main__':
main()
|
test_flush.py | import time
import pdb
import threading
import logging
from multiprocessing import Pool, Process
import pytest
from utils import *
dim = 128
segment_row_count = 5000
index_file_size = 10
collection_id = "test_flush"
DELETE_TIMEOUT = 60
nprobe = 1
tag = "1970-01-01"
top_k = 1
nb = 6000
tag = "partition_tag"
field_name = "float_vector"
entity = gen_entities(1)
entities = gen_entities(nb)
raw_vector, binary_entity = gen_binary_entities(1)
raw_vectors, binary_entities = gen_binary_entities(nb)
default_fields = gen_default_fields()
default_single_query = {
"bool": {
"must": [
{"vector": {field_name: {"topk": 10, "query": gen_vectors(1, dim), "metric_type":"L2","params": {"nprobe": 10}}}}
]
}
}
class TestFlushBase:
"""
******************************************************************
The following cases are used to test `flush` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "GPU":
if request.param["index_type"] not in ivf():
pytest.skip("Only support index_type: idmap/flat")
return request.param
@pytest.fixture(
scope="function",
params=gen_single_filter_fields()
)
def get_filter_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_single_vector_fields()
)
def get_vector_field(self, request):
yield request.param
def test_flush_collection_not_existed(self, connect, collection):
'''
target: test flush, params collection_name not existed
method: flush, with collection not existed
expected: error raised
'''
collection_new = gen_unique_str("test_flush_1")
with pytest.raises(Exception) as e:
connect.flush([collection_new])
def test_flush_empty_collection(self, connect, collection):
'''
method: flush collection with no vectors
expected: no error raised
'''
ids = connect.insert(collection, entities)
assert len(ids) == nb
status = connect.delete_entity_by_id(collection, ids)
assert status.OK()
res = connect.count_entities(collection)
assert 0 == res
# with pytest.raises(Exception) as e:
# connect.flush([collection])
def test_add_partition_flush(self, connect, id_collection):
'''
method: add entities into partition in collection, flush serveral times
expected: the length of ids and the collection row count
'''
# vector = gen_vector(nb, dim)
connect.create_partition(id_collection, tag)
# vectors = gen_vectors(nb, dim)
ids = [i for i in range(nb)]
ids = connect.insert(id_collection, entities, ids)
connect.flush([id_collection])
res_count = connect.count_entities(id_collection)
assert res_count == nb
ids = connect.insert(id_collection, entities, ids, partition_tag=tag)
assert len(ids) == nb
connect.flush([id_collection])
res_count = connect.count_entities(id_collection)
assert res_count == nb * 2
def test_add_partitions_flush(self, connect, id_collection):
'''
method: add entities into partitions in collection, flush one
expected: the length of ids and the collection row count
'''
# vectors = gen_vectors(nb, dim)
tag_new = gen_unique_str()
connect.create_partition(id_collection, tag)
connect.create_partition(id_collection, tag_new)
ids = [i for i in range(nb)]
ids = connect.insert(id_collection, entities, ids, partition_tag=tag)
connect.flush([id_collection])
ids = connect.insert(id_collection, entities, ids, partition_tag=tag_new)
connect.flush([id_collection])
res = connect.count_entities(id_collection)
assert res == 2 * nb
def test_add_collections_flush(self, connect, id_collection):
'''
method: add entities into collections, flush one
expected: the length of ids and the collection row count
'''
collection_new = gen_unique_str()
default_fields = gen_default_fields(False)
connect.create_collection(collection_new, default_fields)
connect.create_partition(id_collection, tag)
connect.create_partition(collection_new, tag)
# vectors = gen_vectors(nb, dim)
ids = [i for i in range(nb)]
ids = connect.insert(id_collection, entities, ids, partition_tag=tag)
ids = connect.insert(collection_new, entities, ids, partition_tag=tag)
connect.flush([id_collection])
connect.flush([collection_new])
res = connect.count_entities(id_collection)
assert res == nb
res = connect.count_entities(collection_new)
assert res == nb
def test_add_collections_fields_flush(self, connect, id_collection, get_filter_field, get_vector_field):
'''
method: create collection with different fields, and add entities into collections, flush one
expected: the length of ids and the collection row count
'''
nb_new = 5
filter_field = get_filter_field
vector_field = get_vector_field
collection_new = gen_unique_str("test_flush")
fields = {
"fields": [filter_field, vector_field],
"segment_row_count": segment_row_count,
"auto_id": False
}
connect.create_collection(collection_new, fields)
connect.create_partition(id_collection, tag)
connect.create_partition(collection_new, tag)
# vectors = gen_vectors(nb, dim)
entities_new = gen_entities_by_fields(fields["fields"], nb_new, dim)
ids = [i for i in range(nb)]
ids_new = [i for i in range(nb_new)]
ids = connect.insert(id_collection, entities, ids, partition_tag=tag)
ids = connect.insert(collection_new, entities_new, ids_new, partition_tag=tag)
connect.flush([id_collection])
connect.flush([collection_new])
res = connect.count_entities(id_collection)
assert res == nb
res = connect.count_entities(collection_new)
assert res == nb_new
def test_add_flush_multiable_times(self, connect, collection):
'''
method: add entities, flush serveral times
expected: no error raised
'''
# vectors = gen_vectors(nb, dim)
ids = connect.insert(collection, entities)
for i in range(10):
connect.flush([collection])
res = connect.count_entities(collection)
assert res == len(ids)
# query_vecs = [vectors[0], vectors[1], vectors[-1]]
res = connect.search(collection, default_single_query)
logging.getLogger().debug(res)
assert res
def test_add_flush_auto(self, connect, id_collection):
'''
method: add entities
expected: no error raised
'''
# vectors = gen_vectors(nb, dim)
ids = [i for i in range(nb)]
ids = connect.insert(id_collection, entities, ids)
timeout = 20
start_time = time.time()
while (time.time() - start_time < timeout):
time.sleep(1)
res = connect.count_entities(id_collection)
if res == nb:
break
if time.time() - start_time > timeout:
assert False
@pytest.fixture(
scope="function",
params=[
1,
100
],
)
def same_ids(self, request):
yield request.param
def test_add_flush_same_ids(self, connect, id_collection, same_ids):
'''
method: add entities, with same ids, count(same ids) < 15, > 15
expected: the length of ids and the collection row count
'''
# vectors = gen_vectors(nb, dim)
ids = [i for i in range(nb)]
for i, item in enumerate(ids):
if item <= same_ids:
ids[i] = 0
ids = connect.insert(id_collection, entities, ids)
connect.flush([id_collection])
res = connect.count_entities(id_collection)
assert res == nb
def test_delete_flush_multiable_times(self, connect, collection):
'''
method: delete entities, flush serveral times
expected: no error raised
'''
# vectors = gen_vectors(nb, dim)
ids = connect.insert(collection, entities)
status = connect.delete_entity_by_id(collection, [ids[-1]])
assert status.OK()
for i in range(10):
connect.flush([collection])
# query_vecs = [vectors[0], vectors[1], vectors[-1]]
res = connect.search(collection, default_single_query)
logging.getLogger().debug(res)
assert res
# TODO: CI fail, LOCAL pass
@pytest.mark.level(2)
def test_collection_count_during_flush(self, connect, collection, args):
'''
method: flush collection at background, call `count_entities`
expected: no timeout
'''
ids = []
for i in range(5):
tmp_ids = connect.insert(collection, entities)
connect.flush([collection])
ids.extend(tmp_ids)
disable_flush(connect)
status = connect.delete_entity_by_id(collection, ids)
def flush():
milvus = get_milvus(args["ip"], args["port"], handler=args["handler"])
logging.error("start flush")
milvus.flush([collection])
logging.error("end flush")
p = threading.Thread(target=flush, args=())
p.start()
time.sleep(0.2)
logging.error("start count")
res = connect.count_entities(collection, timeout = 10)
p.join()
res = connect.count_entities(collection)
assert res == 0
class TestFlushAsync:
@pytest.fixture(scope="function", autouse=True)
def skip_http_check(self, args):
if args["handler"] == "HTTP":
pytest.skip("skip in http mode")
"""
******************************************************************
The following cases are used to test `flush` function
******************************************************************
"""
def check_status(self):
logging.getLogger().info("In callback check status")
def test_flush_empty_collection(self, connect, collection):
'''
method: flush collection with no vectors
expected: status ok
'''
future = connect.flush([collection], _async=True)
status = future.result()
def test_flush_async_long(self, connect, collection):
# vectors = gen_vectors(nb, dim)
ids = connect.insert(collection, entities)
future = connect.flush([collection], _async=True)
status = future.result()
def test_flush_async(self, connect, collection):
nb = 100000
vectors = gen_vectors(nb, dim)
connect.insert(collection, entities)
logging.getLogger().info("before")
future = connect.flush([collection], _async=True, _callback=self.check_status)
logging.getLogger().info("after")
future.done()
status = future.result()
class TestCollectionNameInvalid(object):
"""
Test adding vectors with invalid collection names
"""
@pytest.fixture(
scope="function",
# params=gen_invalid_collection_names()
params=gen_invalid_strs()
)
def get_invalid_collection_name(self, request):
yield request.param
@pytest.mark.level(2)
def test_flush_with_invalid_collection_name(self, connect, get_invalid_collection_name):
collection_name = get_invalid_collection_name
if collection_name is None or not collection_name:
pytest.skip("while collection_name is None, then flush all collections")
with pytest.raises(Exception) as e:
connect.flush(collection_name)
|
test_dota_base_q.py | # -*- coding:utf-8 -*-
# Author: Xue Yang <yangxue-2019-sjtu@sjtu.edu.cn>
#
# License: Apache-2.0 license
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import sys
import tensorflow as tf
import cv2
import numpy as np
import math
from tqdm import tqdm
import argparse
from multiprocessing import Queue, Process
from utils import tools
from libs.label_name_dict.label_dict import LabelMap
from libs.utils.draw_box_in_img import DrawBox
from libs.utils.coordinate_convert import forward_convert, backward_convert
from libs.utils import nms_rotate
from libs.utils.rotate_polygon_nms import rotate_gpu_nms
from dataloader.pretrained_weights.pretrain_zoo import PretrainModelZoo
def parse_args():
parser = argparse.ArgumentParser('Start testing.')
parser.add_argument('--test_dir', dest='test_dir',
help='evaluate imgs dir ',
default='/data/dataset/DOTA/test/images/', type=str)
parser.add_argument('--gpus', dest='gpus',
help='gpu id',
default='0,1,2,3,4,5,6,7', type=str)
parser.add_argument('--show_box', '-s', default=False,
action='store_true')
parser.add_argument('--multi_scale', '-ms', default=False,
action='store_true')
parser.add_argument('--flip_img', '-f', default=False,
action='store_true')
parser.add_argument('--cpu_nms', '-cn', default=False,
action='store_true')
parser.add_argument('--num_imgs', dest='num_imgs',
help='test image number',
default=np.inf, type=int)
parser.add_argument('--h_len', dest='h_len',
help='image height',
default=600, type=int)
parser.add_argument('--w_len', dest='w_len',
help='image width',
default=600, type=int)
parser.add_argument('--h_overlap', dest='h_overlap',
help='height overlap',
default=150, type=int)
parser.add_argument('--w_overlap', dest='w_overlap',
help='width overlap',
default=150, type=int)
args = parser.parse_args()
return args
class TestDOTA(object):
def __init__(self, cfgs):
self.cfgs = cfgs
self.args = parse_args()
label_map = LabelMap(cfgs)
self.name_label_map, self.label_name_map = label_map.name2label(), label_map.label2name()
def worker(self, gpu_id, images, det_net, result_queue):
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
img_plac = tf.placeholder(dtype=tf.uint8, shape=[None, None, 3]) # is RGB. not BGR
img_batch = tf.cast(img_plac, tf.float32)
pretrain_zoo = PretrainModelZoo()
if self.cfgs.NET_NAME in pretrain_zoo.pth_zoo or self.cfgs.NET_NAME in pretrain_zoo.mxnet_zoo:
img_batch = (img_batch / 255 - tf.constant(self.cfgs.PIXEL_MEAN_)) / tf.constant(self.cfgs.PIXEL_STD)
else:
img_batch = img_batch - tf.constant(self.cfgs.PIXEL_MEAN)
img_batch = tf.expand_dims(img_batch, axis=0)
detection_boxes, detection_scores, detection_category = det_net.build_whole_detection_network(
input_img_batch=img_batch)
init_op = tf.group(
tf.global_variables_initializer(),
tf.local_variables_initializer()
)
restorer, restore_ckpt = det_net.get_restorer()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(init_op)
if not restorer is None:
restorer.restore(sess, restore_ckpt)
print('restore model %d ...' % gpu_id)
for img_path in images:
# if 'P0016' not in img_path:
# continue
img = cv2.imread(img_path)
box_res_rotate = []
label_res_rotate = []
score_res_rotate = []
imgH = img.shape[0]
imgW = img.shape[1]
img_short_side_len_list = self.cfgs.IMG_SHORT_SIDE_LEN if isinstance(self.cfgs.IMG_SHORT_SIDE_LEN, list) else [
self.cfgs.IMG_SHORT_SIDE_LEN]
img_short_side_len_list = [img_short_side_len_list[0]] if not self.args.multi_scale else img_short_side_len_list
if imgH < self.args.h_len:
temp = np.zeros([self.args.h_len, imgW, 3], np.float32)
temp[0:imgH, :, :] = img
img = temp
imgH = self.args.h_len
if imgW < self.args.w_len:
temp = np.zeros([imgH, self.args.w_len, 3], np.float32)
temp[:, 0:imgW, :] = img
img = temp
imgW = self.args.w_len
for hh in range(0, imgH, self.args.h_len - self.args.h_overlap):
if imgH - hh - 1 < self.args.h_len:
hh_ = imgH - self.args.h_len
else:
hh_ = hh
for ww in range(0, imgW, self.args.w_len - self.args.w_overlap):
if imgW - ww - 1 < self.args.w_len:
ww_ = imgW - self.args.w_len
else:
ww_ = ww
src_img = img[hh_:(hh_ + self.args.h_len), ww_:(ww_ + self.args.w_len), :]
for short_size in img_short_side_len_list:
max_len = self.cfgs.IMG_MAX_LENGTH
if self.args.h_len < self.args.w_len:
new_h, new_w = short_size, min(int(short_size * float(self.args.w_len) / self.args.h_len), max_len)
else:
new_h, new_w = min(int(short_size * float(self.args.h_len) / self.args.w_len), max_len), short_size
img_resize = cv2.resize(src_img, (new_w, new_h))
resized_img, det_boxes_r_, det_scores_r_, det_category_r_ = \
sess.run(
[img_batch, detection_boxes, detection_scores, detection_category],
feed_dict={img_plac: img_resize[:, :, ::-1]}
)
resized_h, resized_w = resized_img.shape[1], resized_img.shape[2]
src_h, src_w = src_img.shape[0], src_img.shape[1]
if len(det_boxes_r_) > 0:
# det_boxes_r_ = forward_convert(det_boxes_r_, False)
det_boxes_r_[:, 0::2] *= (src_w / resized_w)
det_boxes_r_[:, 1::2] *= (src_h / resized_h)
for ii in range(len(det_boxes_r_)):
box_rotate = det_boxes_r_[ii]
box_rotate[0::2] = box_rotate[0::2] + ww_
box_rotate[1::2] = box_rotate[1::2] + hh_
box_res_rotate.append(box_rotate)
label_res_rotate.append(det_category_r_[ii])
score_res_rotate.append(det_scores_r_[ii])
if self.args.flip_img:
det_boxes_r_flip, det_scores_r_flip, det_category_r_flip = \
sess.run(
[detection_boxes, detection_scores, detection_category],
feed_dict={img_plac: cv2.flip(img_resize, flipCode=1)[:, :, ::-1]}
)
if len(det_boxes_r_flip) > 0:
# det_boxes_r_flip = forward_convert(det_boxes_r_flip, False)
det_boxes_r_flip[:, 0::2] *= (src_w / resized_w)
det_boxes_r_flip[:, 1::2] *= (src_h / resized_h)
for ii in range(len(det_boxes_r_flip)):
box_rotate = det_boxes_r_flip[ii]
box_rotate[0::2] = (src_w - box_rotate[0::2]) + ww_
box_rotate[1::2] = box_rotate[1::2] + hh_
box_res_rotate.append(box_rotate)
label_res_rotate.append(det_category_r_flip[ii])
score_res_rotate.append(det_scores_r_flip[ii])
det_boxes_r_flip, det_scores_r_flip, det_category_r_flip = \
sess.run(
[detection_boxes, detection_scores, detection_category],
feed_dict={img_plac: cv2.flip(img_resize, flipCode=0)[:, :, ::-1]}
)
if len(det_boxes_r_flip) > 0:
# det_boxes_r_flip = forward_convert(det_boxes_r_flip, False)
det_boxes_r_flip[:, 0::2] *= (src_w / resized_w)
det_boxes_r_flip[:, 1::2] *= (src_h / resized_h)
for ii in range(len(det_boxes_r_flip)):
box_rotate = det_boxes_r_flip[ii]
box_rotate[0::2] = box_rotate[0::2] + ww_
box_rotate[1::2] = (src_h - box_rotate[1::2]) + hh_
box_res_rotate.append(box_rotate)
label_res_rotate.append(det_category_r_flip[ii])
score_res_rotate.append(det_scores_r_flip[ii])
box_res_rotate = np.array(box_res_rotate)
label_res_rotate = np.array(label_res_rotate)
score_res_rotate = np.array(score_res_rotate)
box_res_rotate_ = []
label_res_rotate_ = []
score_res_rotate_ = []
threshold = {'roundabout': 0.1, 'tennis-court': 0.3, 'swimming-pool': 0.1, 'storage-tank': 0.2,
'soccer-ball-field': 0.3, 'small-vehicle': 0.2, 'ship': 0.2, 'plane': 0.3,
'large-vehicle': 0.1, 'helicopter': 0.2, 'harbor': 0.0001, 'ground-track-field': 0.3,
'bridge': 0.0001, 'basketball-court': 0.3, 'baseball-diamond': 0.3,
'container-crane': 0.05, 'airport': 0.1, 'helipad': 0.1}
for sub_class in range(1, self.cfgs.CLASS_NUM + 1):
index = np.where(label_res_rotate == sub_class)[0]
if len(index) == 0:
continue
tmp_boxes_r = box_res_rotate[index]
tmp_label_r = label_res_rotate[index]
tmp_score_r = score_res_rotate[index]
tmp_boxes_r_ = backward_convert(tmp_boxes_r, False)
# cpu nms better than gpu nms (default)
if self.args.cpu_nms:
try:
inx = nms_rotate.nms_rotate_cpu(boxes=np.array(tmp_boxes_r_),
scores=np.array(tmp_score_r),
iou_threshold=threshold[self.label_name_map[sub_class]],
max_output_size=5000)
except:
tmp_boxes_r_ = np.array(tmp_boxes_r_)
tmp = np.zeros([tmp_boxes_r_.shape[0], tmp_boxes_r_.shape[1] + 1])
tmp[:, 0:-1] = tmp_boxes_r_
tmp[:, -1] = np.array(tmp_score_r)
# Note: the IoU of two same rectangles is 0
jitter = np.zeros([tmp_boxes_r_.shape[0], tmp_boxes_r_.shape[1] + 1])
jitter[:, 0] += np.random.rand(tmp_boxes_r_.shape[0], ) / 1000
inx = rotate_gpu_nms(np.array(tmp, np.float32) + np.array(jitter, np.float32),
float(threshold[self.label_name_map[sub_class]]), 0)
else:
tmp_boxes_r_ = np.array(tmp_boxes_r_)
tmp = np.zeros([tmp_boxes_r_.shape[0], tmp_boxes_r_.shape[1] + 1])
tmp[:, 0:-1] = tmp_boxes_r_
tmp[:, -1] = np.array(tmp_score_r)
# Note: the IoU of two same rectangles is 0
jitter = np.zeros([tmp_boxes_r_.shape[0], tmp_boxes_r_.shape[1] + 1])
jitter[:, 0] += np.random.rand(tmp_boxes_r_.shape[0], ) / 1000
inx = rotate_gpu_nms(np.array(tmp, np.float32) + np.array(jitter, np.float32),
float(threshold[self.label_name_map[sub_class]]), 0)
box_res_rotate_.extend(np.array(tmp_boxes_r)[inx])
score_res_rotate_.extend(np.array(tmp_score_r)[inx])
label_res_rotate_.extend(np.array(tmp_label_r)[inx])
result_dict = {'boxes': np.array(box_res_rotate_), 'scores': np.array(score_res_rotate_),
'labels': np.array(label_res_rotate_), 'image_id': img_path}
result_queue.put_nowait(result_dict)
def test_dota(self, det_net, real_test_img_list, txt_name):
save_path = os.path.join('./test_dota', self.cfgs.VERSION)
nr_records = len(real_test_img_list)
pbar = tqdm(total=nr_records)
gpu_num = len(self.args.gpus.strip().split(','))
nr_image = math.ceil(nr_records / gpu_num)
result_queue = Queue(500)
procs = []
for i, gpu_id in enumerate(self.args.gpus.strip().split(',')):
start = i * nr_image
end = min(start + nr_image, nr_records)
split_records = real_test_img_list[start:end]
proc = Process(target=self.worker, args=(int(gpu_id), split_records, det_net, result_queue))
print('process:%d, start:%d, end:%d' % (i, start, end))
proc.start()
procs.append(proc)
for i in range(nr_records):
res = result_queue.get()
if self.args.show_box:
nake_name = res['image_id'].split('/')[-1]
tools.makedirs(os.path.join(save_path, 'dota_img_vis'))
draw_path = os.path.join(save_path, 'dota_img_vis', nake_name)
draw_img = np.array(cv2.imread(res['image_id']), np.float32)
# detected_boxes = backward_convert(res['boxes'], with_label=False)
detected_indices = res['scores'] >= self.cfgs.VIS_SCORE
detected_scores = res['scores'][detected_indices]
detected_boxes = res['boxes'][detected_indices]
detected_categories = res['labels'][detected_indices]
drawer = DrawBox(self.cfgs)
final_detections = drawer.draw_boxes_with_label_and_scores(draw_img,
boxes=detected_boxes,
labels=detected_categories,
scores=detected_scores,
method=2,
is_csl=False,
in_graph=False)
cv2.imwrite(draw_path, final_detections)
else:
CLASS_DOTA = self.name_label_map.keys()
write_handle = {}
tools.makedirs(os.path.join(save_path, 'dota_res'))
for sub_class in CLASS_DOTA:
if sub_class == 'back_ground':
continue
write_handle[sub_class] = open(os.path.join(save_path, 'dota_res', 'Task1_%s.txt' % sub_class), 'a+')
for i, rbox in enumerate(res['boxes']):
command = '%s %.3f %.1f %.1f %.1f %.1f %.1f %.1f %.1f %.1f\n' % (res['image_id'].split('/')[-1].split('.')[0],
res['scores'][i],
rbox[0], rbox[1], rbox[2], rbox[3],
rbox[4], rbox[5], rbox[6], rbox[7],)
write_handle[self.label_name_map[res['labels'][i]]].write(command)
for sub_class in CLASS_DOTA:
if sub_class == 'back_ground':
continue
write_handle[sub_class].close()
fw = open(txt_name, 'a+')
fw.write('{}\n'.format(res['image_id'].split('/')[-1]))
fw.close()
pbar.set_description("Test image %s" % res['image_id'].split('/')[-1])
pbar.update(1)
for p in procs:
p.join()
def get_test_image(self):
txt_name = '{}.txt'.format(self.cfgs.VERSION)
if not self.args.show_box:
if not os.path.exists(txt_name):
fw = open(txt_name, 'w')
fw.close()
fr = open(txt_name, 'r')
img_filter = fr.readlines()
print('****************************' * 3)
print('Already tested imgs:', img_filter)
print('****************************' * 3)
fr.close()
test_imgname_list = [os.path.join(self.args.test_dir, img_name) for img_name in os.listdir(self.args.test_dir)
if img_name.endswith(('.jpg', '.png', '.jpeg', '.tif', '.tiff')) and
(img_name + '\n' not in img_filter)]
else:
test_imgname_list = [os.path.join(self.args.test_dir, img_name) for img_name in os.listdir(self.args.test_dir)
if img_name.endswith(('.jpg', '.png', '.jpeg', '.tif', '.tiff'))]
assert len(test_imgname_list) != 0, 'test_dir has no imgs there.' \
' Note that, we only support img format of (.jpg, .png, and .tiff) '
if self.args.num_imgs == np.inf:
real_test_img_list = test_imgname_list
else:
real_test_img_list = test_imgname_list[: self.args.num_imgs]
return real_test_img_list
|
server.py | import socket
import struct
import threading
MCAST_IP = 'localhost'
MCAST_PORT = 5555
RETR_IP = 'localhost'
RETR_PORT = 7777
def create_message_block(msg):
return struct.pack('!H{}s'.format(len(msg)),len(msg),msg)
def pack_messages(session_id,start_seq_num,messages):
msg_blocks = b''.join([ create_message_block(msg) for msg in messages ])
data = struct.pack('!10sQH{}s'.format(len(msg_blocks))
,session_id
,start_seq_num
,len(messages)
,msg_blocks)
return data
class SimpleMoldUDPSrever:
def __init__(self) -> None:
# (not) multicast socket for broadcasting
self.mcast = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.mcast.bind((MCAST_IP,MCAST_PORT))
# unicast socket for retransmission
self.retr = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.retr.bind((RETR_IP,RETR_PORT))
# set internal state
self.session_id=b'testdata '
self.next_id = 1
self.messge_buffer = dict()
# other technical variables
self.clients = []
self.halt = False
def send(self,messages,drop=False):
data = pack_messages(b'testdata ',self.next_id,messages)
for msg in messages:
print('#',self.next_id,msg)
self.messge_buffer[self.next_id]=msg
self.next_id+=1
if not drop:
for addr in self.clients:
self.mcast.sendto(data,addr)
def start_retransmitter(self):
t = threading.Thread(target=self.run_retransmitter)
t.start()
def run_retransmitter(self):
self.retr.settimeout(1)
while not self.halt:
try:
data, addr = self.retr.recvfrom(256)
except socket.timeout:
continue
if addr not in self.clients:
self.clients.append(addr)
continue
_, first_id, count = struct.unpack('!10sQH',data)
messages = []
for seq_id in range(first_id,first_id+count):
messages.append(self.messge_buffer[seq_id])
self.retr.sendto(pack_messages(self.session_id,first_id,messages),addr)
self.retr.close()
def stop(self):
self.halt=True
self.mcast.close()
server = SimpleMoldUDPSrever()
server.start_retransmitter()
drop = False
while True:
messages = input('> ')
if messages=='exit':
break
if messages=='DROP':
drop = True
continue
messages = list(map(lambda m: m.encode('utf-8'), messages.split()))
server.send(messages,drop)
drop=False
server.stop() |
monitors_website.py | #!/usr/bin/env python
import asyncio
import json
import threading
import sys
from lib.monitor import Monitor
from lib.stats_writer import StatsWriter
config = json.load(open('config.json'))
def run_monitor():
monitor = Monitor(config)
# asyncio on Windows solution learnt from here: https://github.com/encode/httpx/issues/914#issuecomment-622586610
if (sys.version_info[0] == 3 and
sys.version_info[1] >= 8 and
sys.platform.startswith('win')):
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
asyncio.run(monitor.monitor_and_produce(config['websites']))
def run_consumer():
stats_writer = StatsWriter(config)
stats_writer.consume_and_insert()
if __name__ == '__main__':
try:
monitor_thread = threading.Thread(target=run_monitor, daemon=True)
consumer_thread = threading.Thread(target=run_consumer, daemon=True)
monitor_thread.start()
consumer_thread.start()
# Join temporarily in loop to allow exiting via Ctrl+C from command line
while monitor_thread.is_alive():
monitor_thread.join(1)
while consumer_thread.is_alive():
consumer_thread.join(1)
except KeyboardInterrupt:
print('Ctrl+C pressed. Exiting...')
# TODO: close connections to Kafka and PostgreSQL
sys.exit()
|
test_verified_get_set.py | # Copyright 2021 CodeNotary, Inc. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from immudb.client import ImmudbClient
from random import randint
import pytest
import grpc._channel
import threading
import time
from tests.immuTestClient import ImmuTestClient
def setAfter(client, toWait, key, value):
time.sleep(toWait)
client.set(key, value)
class TestVerifiedGetSet:
def test_verified(self, wrappedClient: ImmuTestClient):
key1 = "verified_key_{:04d}".format(randint(0, 10000))
value1 = "verified_value_{:04d}".format(randint(0, 10000))
resp = wrappedClient.client.verifiedSet(
key1.encode('utf8'), value1.encode('utf8'))
readback1 = wrappedClient.client.verifiedGet(key1.encode('utf8'))
assert value1 == readback1.value.decode('utf8')
tx1id = readback1.id
key2 = "verified_key_{:04d}".format(randint(0, 10000))
value2 = "verified_value_{:04d}".format(randint(0, 10000))
resp = wrappedClient.client.verifiedSet(
key2.encode('utf8'), value2.encode('utf8'))
readback2 = wrappedClient.client.verifiedGet(key2.encode('utf8'))
assert value2 == readback2.value.decode('utf8')
tx2id = readback2.id
readback3 = wrappedClient.client.verifiedGetAt(key1.encode('utf8'), atTx=tx1id)
assert value1 == readback3.value.decode('utf8')
with pytest.raises(grpc._channel._InactiveRpcError):
readback4 = wrappedClient.client.verifiedGetAt(key2.encode('utf8'), atTx=tx1id)
readback5 = wrappedClient.client.verifiedGetSince(key1.encode('utf8'), sinceTx=tx1id)
assert value1 == readback5.value.decode('utf8')
key2 = "verified_key_{:04d}".format(randint(0, 10000)).encode("utf-8")
value2 = "verified_value_{:04d}".format(randint(0, 10000)).encode("utf-8")
if(wrappedClient.serverHigherOrEqualsToVersion("1.2.0")):
# Startin from 1.2.0 it will return an error if txId not exists
with pytest.raises(grpc._channel._InactiveRpcError):
readback6 = wrappedClient.client.verifiedGetSince(key1.encode('utf8'), sinceTx = tx2id + 1)
readback6 = wrappedClient.client.verifiedGetSince(key1.encode('utf8'), sinceTx = tx2id)
assert readback6.value.decode("utf-8") == value1
else:
readback6 = wrappedClient.client.verifiedGetSince(key1.encode('utf8'), sinceTx = tx2id)
assert readback6.value.decode("utf-8") == value1
# Get a non existing transaction, verified Get Since will block until other thread will fill the data and make transaction existing
threading.Thread(target=setAfter, args=(wrappedClient.client, 1.5, key2, value2)).start()
readback6 = wrappedClient.client.verifiedGetSince(key1.encode('utf8'), sinceTx = tx2id + 1)
assert readback6.value.decode("utf-8") == value1
|
test_c10d_nccl.py | import copy
import math
import os
import random
import signal
import sys
import tempfile
import threading
import time
from contextlib import contextmanager
from datetime import timedelta
from itertools import product
from unittest import mock
import torch
import torch.distributed as c10d
if not c10d.is_available():
print("c10d not available, skipping tests", file=sys.stderr)
sys.exit(0)
import test_c10d_common
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import gpus_for_rank, DoubleGpuNet, ConvNet, ModuleForDdpCommHook
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
get_timeout,
skip_if_rocm,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
IS_WINDOWS,
TestCase,
run_tests,
retry_on_connect_failures,
TEST_WITH_ASAN,
TEST_WITH_TSAN,
sandcastle_skip,
sandcastle_skip_if,
)
from torch.utils.checkpoint import checkpoint
if not IS_WINDOWS:
from torch.distributed.optim.functional_sgd import _FunctionalSGD
if TEST_WITH_TSAN:
print(
"Skip as TSAN is not fork-safe since we're forking in a multi-threaded environment",
file=sys.stderr,
)
sys.exit(0)
if TEST_WITH_ASAN:
print(
"Skip ASAN as torch + multiprocessing spawn have known issues", file=sys.stderr
)
sys.exit(0)
class RendezvousEnvTest(TestCase):
@retry_on_connect_failures
@requires_nccl()
@sandcastle_skip_if(
torch.cuda.device_count() == 0, "No GPUs available, skipping test"
)
def test_common_errors(self):
vars = {
"WORLD_SIZE": "1",
"RANK": "0",
"MASTER_ADDR": "127.0.0.1",
"MASTER_PORT": str(common.find_free_port()),
}
class Env(object):
def __init__(self, vars):
self.env_patcher = mock.patch.dict(os.environ, vars, clear=True)
def __enter__(self):
self.env_patcher.start()
def __exit__(self, type, value, traceback):
self.env_patcher.stop()
def without(d, key):
d = d.copy()
d.pop(key)
return d
def withouts(d, keys):
d = d.copy()
for key in keys:
d.pop(key)
return d
with Env(without(vars, "WORLD_SIZE")):
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
with self.assertRaisesRegex(ValueError, "WORLD_SIZE expected"):
gen = c10d.rendezvous("env://")
next(gen)
c10d.init_process_group(backend="nccl", world_size=1)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(without(vars, "RANK")):
self.assertEqual(None, os.environ.get("RANK"))
with self.assertRaisesRegex(ValueError, "RANK expected"):
gen = c10d.rendezvous("env://")
next(gen)
c10d.init_process_group(backend="nccl", rank=0)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(withouts(vars, ["RANK", "WORLD_SIZE"])):
self.assertEqual(None, os.environ.get("RANK"))
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
c10d.init_process_group(backend="nccl", rank=0, world_size=1)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(vars):
c10d.init_process_group(backend="nccl")
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(without(vars, "MASTER_ADDR")):
self.assertEqual(None, os.environ.get("MASTER_ADDR"))
with self.assertRaisesRegex(ValueError, "MASTER_ADDR expected"):
gen = c10d.rendezvous("env://")
next(gen)
with Env(without(vars, "MASTER_PORT")):
self.assertEqual(None, os.environ.get("MASTER_PORT"))
with self.assertRaisesRegex(ValueError, "MASTER_PORT expected"):
gen = c10d.rendezvous("env://")
next(gen)
with Env(without(vars, "WORLD_SIZE")):
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
gen = c10d.rendezvous("env://?world_size={}".format(1))
_, _, size = next(gen)
self.assertEqual(size, 1)
with Env(without(vars, "RANK")):
self.assertEqual(None, os.environ.get("RANK"))
gen = c10d.rendezvous("env://?rank={}".format(0))
_, rank, _ = next(gen)
self.assertEqual(rank, 0)
with Env(withouts(vars, ["RANK", "WORLD_SIZE"])):
self.assertEqual(None, os.environ.get("RANK"))
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
gen = c10d.rendezvous("env://?rank={}&world_size={}".format(0, 1))
_, rank, size = next(gen)
self.assertEqual(rank, 0)
self.assertEqual(size, 1)
class TimeoutTest(test_c10d_common.AbstractTimeoutTest, TestCase):
@requires_nccl()
@retry_on_connect_failures
@sandcastle_skip_if(
torch.cuda.device_count() == 0, "No GPUs available, skipping test"
)
def test_default_store_timeout_nccl(self):
self._test_default_store_timeout("nccl")
class ProcessGroupNCCLNoGPUTest(TestCase):
MAIN_PROCESS_RANK = 0
def setUp(self):
self.rank = self.MAIN_PROCESS_RANK
self.world_size = 1
self.file = tempfile.NamedTemporaryFile(delete=False)
def tearDown(self):
pass
@requires_nccl()
@sandcastle_skip_if(
torch.cuda.device_count() > 0, "GPUs are available, skipping test"
)
def test_init_no_gpus(self):
store = c10d.FileStore(self.file.name, self.world_size)
with self.assertRaisesRegex(
RuntimeError, "ProcessGroupNCCL is only supported with GPUs, no GPUs found!"
):
c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class ProcessGroupNCCLTest(TestCase):
MAIN_PROCESS_RANK = 0
def setUp(self):
self.rank = self.MAIN_PROCESS_RANK
self.world_size = 1
self.file = tempfile.NamedTemporaryFile(delete=False)
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
self.num_gpus = torch.cuda.device_count()
def tearDown(self):
pass
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_empty_tensors(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
xs = [torch.cuda.FloatTensor([])]
pg.broadcast(xs).wait()
self.assertEqual(0, xs[0].numel())
pg.allreduce(xs).wait()
self.assertEqual(0, xs[0].numel())
pg.reduce(xs).wait()
self.assertEqual(0, xs[0].numel())
ys = [[torch.cuda.FloatTensor([]) for _ in range(self.world_size)]]
pg.allgather(ys, xs).wait()
for y in ys[0]:
self.assertEqual(0, y.numel())
ys = [torch.cuda.FloatTensor([])]
xs = [[torch.cuda.FloatTensor([]) for _ in range(self.world_size)]]
pg.reduce_scatter(ys, xs).wait()
self.assertEqual(0, ys[0].numel())
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_broadcast_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def broadcast(xs, rootRank, rootTensor):
opts = c10d.BroadcastOptions()
opts.rootRank = rootRank
opts.rootTensor = rootTensor
work = pg.broadcast(xs, opts)
work.wait()
# for every root tensor
for rt in range(self.num_gpus):
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i]).cuda(i))
broadcast(tensors, self.rank, rt)
for i in range(self.num_gpus):
self.assertEqual(tensors[i], tensors[rt])
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_allreduce_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allreduce(tensors, op):
opts = c10d.AllreduceOptions()
opts.reduceOp = op
work = pg.allreduce(tensors, opts)
work.wait()
# Sum
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i + 1]).cuda(i))
allreduce(tensors, c10d.ReduceOp.SUM)
for i in range(self.num_gpus):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor([float(self.num_gpus * (self.num_gpus + 1) / 2)]),
tensors[i],
)
# Avg (only available for NCCL 2.10+)
if hasattr(c10d.ReduceOp, "AVG"):
tensors = [torch.tensor([i + 1]).cuda(i) for i in range(self.num_gpus)]
allreduce(tensors, c10d.ReduceOp.AVG)
for i in range(self.num_gpus):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
ndev = float(self.num_gpus)
self.assertEqualIgnoreType(
torch.tensor([ndev * (ndev + 1.) / (2. * ndev)]),
tensors[i],
)
# Product
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i + 1]).cuda(i))
allreduce(tensors, c10d.ReduceOp.PRODUCT)
for i in range(self.num_gpus):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor([float(math.factorial(self.num_gpus))]), tensors[i]
)
# Min
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i + 1]).cuda(i))
allreduce(tensors, c10d.ReduceOp.MIN)
for i in range(self.num_gpus):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(torch.tensor([1.0]), tensors[i])
# Max
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i + 1]).cuda(i))
allreduce(tensors, c10d.ReduceOp.MAX)
for i in range(self.num_gpus):
self.assertEqual(torch.tensor([self.num_gpus]), tensors[i])
for op in (c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR):
with self.assertRaisesRegex(
RuntimeError, "Cannot use " + str(op) + " with NCCL"
):
allreduce(tensors, op)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_reduce_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def reduce(xs, rootRank, rootTensor, op=None):
opts = c10d.ReduceOptions()
opts.rootRank = rootRank
opts.rootTensor = rootTensor
if op:
opts.reduceOp = op
work = pg.reduce(xs, opts)
work.wait()
# for every root tensor
for rt in range(self.num_gpus):
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i + 1]).cuda(i))
reduce(tensors, self.rank, rt)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor([float(self.num_gpus * (self.num_gpus + 1) / 2)]),
tensors[rt],
)
for op in (c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR):
with self.assertRaisesRegex(
RuntimeError, "Cannot use " + str(op) + " with NCCL"
):
reduce(tensors, self.rank, rt, op)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_allgather_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allgather(output_ts, input_ts):
work = pg.allgather(output_ts, input_ts)
work.wait()
tensors = []
output_ts = [[] for _ in range(self.num_gpus)]
for idx, ls in enumerate(output_ts):
for _ in range(self.world_size * self.num_gpus):
ls.append(torch.tensor([0]).cuda(idx))
for i in range(self.num_gpus):
tensors.append(torch.tensor([i]).cuda(i))
allgather(output_ts, tensors)
# Verification
for device_ts in output_ts:
for s_idx, t in enumerate(device_ts):
self.assertEqual(torch.tensor([s_idx]), t)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_allgather_base_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allgather_base(output_t, input_t):
work = pg._allgather_base(output_t, input_t)
work.wait()
device_id = self.rank % self.num_gpus
# allgather_base is GPU number agnostic.
# Each rank contribute one tensor regardless of GPU counts
tensor = torch.tensor([self.rank]).cuda(device_id)
output_t = torch.empty((self.world_size), dtype=tensor.dtype).cuda(device_id)
allgather_base(output_t, tensor)
# Verification
self.assertEqual(torch.arange(self.world_size), output_t)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_allgather_base_basics(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allgather_base(output_t, input_t):
work = pg._allgather_base(output_t, input_t)
work.wait()
device_id = self.rank % self.num_gpus
# anticpate an error
with self.assertRaisesRegex(
RuntimeError,
"output tensor size must be equal to world_size times input tensor size",
):
tensor = torch.tensor([self.rank]).cuda(device_id)
output_t = torch.empty((self.world_size + 1), dtype=tensor.dtype).cuda(
device_id
)
# fails the check because output_t is not correctly sized
allgather_base(output_t, tensor)
# anticpate an error
with self.assertRaisesRegex(
RuntimeError, "output tensor must have the same type as input tensor"
):
tensor = torch.tensor([self.rank], dtype=torch.float).cuda(device_id)
output_t = torch.empty((self.world_size + 1), dtype=torch.long).cuda(
device_id
)
# fails the check because the dtype is different
allgather_base(output_t, tensor)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_reduce_scatter_base_basics(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def reduce_scatter_base(output_t, input_t):
work = pg._reduce_scatter_base(output_t, input_t)
work.wait()
device_id = self.rank % self.num_gpus
# anticpate an error
with self.assertRaisesRegex(
RuntimeError,
"input tensor must be the same size as output size times world size",
):
input_t = torch.tensor([self.rank]).cuda(device_id)
output_t = torch.empty((self.world_size + 1), dtype=input_t.dtype).cuda(
device_id
)
# fails the check because output_t is not correctly sized
reduce_scatter_base(output_t, input_t)
# anticpate an error
with self.assertRaisesRegex(
RuntimeError, "input tensor must be the same type as the outut tensor."
):
tensor = torch.tensor([self.rank], dtype=torch.float).cuda(device_id)
output_t = torch.empty((self.world_size + 1), dtype=torch.long).cuda(
device_id
)
# fails the check because the dtype is different
reduce_scatter_base(output_t, tensor)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_reduce_scatter_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def reduce_scatter(outputs, input_lists, op):
opts = c10d.ReduceScatterOptions()
opts.reduceOp = op
work = pg.reduce_scatter(outputs, input_lists, opts)
work.wait()
virtual_rank = self.rank * self.world_size
virtual_world_size = self.num_gpus * self.world_size
output = [torch.tensor([0]).cuda(i) for i in range(self.num_gpus)]
# 0 1 2
# 0 [0..11] [1..12]
# 1 [3..14]
# 2
# 3
# Sum
tensor_lists = [
[
torch.tensor([self.rank * self.num_gpus + i + j]).cuda(i)
for j in range(virtual_world_size)
]
for i in range(self.num_gpus)
]
reduce_scatter(output, tensor_lists, c10d.ReduceOp.SUM)
for i in range(self.num_gpus):
expected = torch.tensor(
[
float(self.num_gpus * (self.num_gpus - 1) / 2)
+ (virtual_rank + i) * virtual_world_size
]
)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(expected, output[i])
# Min
reduce_scatter(output, tensor_lists, c10d.ReduceOp.MIN)
for i in range(self.num_gpus):
expected = torch.tensor([self.rank * self.world_size + i])
self.assertEqual(expected, output[i])
# Max
reduce_scatter(output, tensor_lists, c10d.ReduceOp.MAX)
for i in range(self.num_gpus):
expected = torch.tensor(
[self.rank * self.world_size + i + virtual_world_size - 1]
)
self.assertEqual(expected, output[i])
# Product
tensor_lists = [
[
torch.tensor(
[(self.rank * self.num_gpus + i + j) % virtual_world_size + 1]
).cuda(i)
for j in range(virtual_world_size)
]
for i in range(self.num_gpus)
]
reduce_scatter(output, tensor_lists, c10d.ReduceOp.PRODUCT)
for i in range(self.num_gpus):
expected = torch.tensor([float(math.factorial(virtual_world_size))])
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(expected, output[i])
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_reduce_scatter_base_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def reduce_scatter_base(output_t, input_t):
work = pg._reduce_scatter_base(output_t, input_t)
work.wait()
device_id = self.rank % self.num_gpus
# reduce_scatter_base is GPU number agnostic.
# Each rank contribute one tensor regardless of GPU counts
output_t = torch.empty([1]).cuda(device_id)
tensor = torch.arange(self.world_size, dtype=output_t.dtype).cuda(device_id)
reduce_scatter_base(output_t, tensor)
# Verification
self.assertEqual(output_t[0], self.rank * self.world_size)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_barrier(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allreduce(tensors):
opts = c10d.AllreduceOptions()
work = pg.allreduce(tensors, opts)
return work
# Making the collective to operate on
# 1, 2, 3, 4, .... self.num_gpus GPUs
tensors_list = [[] for _ in range(2, self.num_gpus + 1)]
for i in range(2, self.num_gpus + 1):
for j in range(i):
tensors_list[i - 2].append(torch.tensor([j + 1]).cuda(j))
works = []
for tensors in tensors_list:
work = allreduce(tensors)
works.append(work)
# Barrier will ensure that all previous work is completed
pg.barrier().wait()
for i in range(2, self.num_gpus + 1):
for j in range(i):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor([float(i * (i + 1) / 2)]), tensors_list[i - 2][j]
)
class DistributedDataParallelTest(
test_c10d_common.AbstractDistributedDataParallelTest, MultiProcessTestCase
):
def setUp(self):
super(DistributedDataParallelTest, self).setUp()
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
self._spawn_processes()
def _test_nccl_backend(
self, devices, device_ids, multi_device=False, gradient_as_bucket_view=False
):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
self._test_ddp_with_process_group(
process_group, devices, device_ids, multi_device, gradient_as_bucket_view
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_multi_device_ids_not_allowed(self):
int_devices = list(range(torch.cuda.device_count()))
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
with self.assertRaisesRegex(
ValueError, "device_ids can only be None or contain a single element."
):
self._test_nccl_backend(devices, int_devices)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_single_device_module_device_ids_None(self):
self._test_nccl_backend(None, None)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_single_device_module_empty_device_ids(self):
# This tests the backward compatibility of accepting an empty list as `device_ids`,
# although we no longer document this in favor of the default value of `None`,
# which is consistent with multi-device modules and CPU modules.
self._test_nccl_backend(None, [])
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_backend_multi_device_module_device_ids_None(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, None, multi_device=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_1gpu_module_device_ids_integer_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, int_devices)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_1gpu_module_device_ids_torch_device_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, devices)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_backend_2gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, None, multi_device=True)
@requires_nccl()
@skip_if_lt_x_gpu(8)
def test_nccl_backend_4gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:4]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, None, multi_device=True)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_ddp_multi_device_module_config(self):
gpus = gpus_for_rank(self.world_size)[self.rank]
self.assertTrue(len(gpus) >= 2, "expecting at least 2 gpus per process")
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
gpus = gpus[:2]
model = DoubleGpuNet(gpus)
with self.assertRaisesRegex(
ValueError,
"DistributedDataParallel device_ids and output_device arguments only work with "
"single-device/multiple-device GPU modules or CPU modules",
):
ddp_model = DistributedDataParallel(
model, output_device=gpus[1], process_group=process_group
)
with self.assertRaisesRegex(
ValueError, "device_ids can only be None or contain a single element."
):
ddp_model = DistributedDataParallel(
model, device_ids=gpus, process_group=process_group
)
with self.assertRaisesRegex(
ValueError, "input module must be on the same type of devices"
):
model.fc1 = model.fc1.cpu()
ddp_model = DistributedDataParallel(model, process_group=process_group)
model = model.cpu()
with self.assertRaisesRegex(
ValueError, "device_ids can only be None or contain a single element."
):
ddp_model = DistributedDataParallel(
model, device_ids=gpus, process_group=process_group
)
def _test_fp16(self, gradient_as_bucket_view=False):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
gpus = gpus_for_rank(self.world_size)[self.rank]
model = nn.Linear(1, 1, bias=False).cuda(gpus[0]).half()
nn.init.constant_(model.weight, 1)
ddp_model = DistributedDataParallel(
model,
device_ids=[gpus[0]],
process_group=process_group,
bucket_cap_mb=0.001,
gradient_as_bucket_view=gradient_as_bucket_view,
)
# Input 2**15, so that the gradients will overflow with a
# world_size of 2, unless we normalize the gradient by the
# world_size before the reduction
input = torch.tensor([[2 ** 15]]).cuda(gpus[0]).half()
# Step model
ddp_model.train()
output = ddp_model(input)
loss = output.sum()
loss.backward()
self.assertFalse(any(torch.isinf(p.grad).any() for p in ddp_model.parameters()))
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16(self):
self._test_fp16()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16_grad_is_view(self):
self._test_fp16(gradient_as_bucket_view=True)
def _test_arbitrary_forward_return_value(self, gradient_as_bucket_view=False):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class ForwardReturnValueModule(nn.Module):
def __init__(self):
super(ForwardReturnValueModule, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.fc3 = nn.Linear(4, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x, fn):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
# The first softmax does NOT include fc3 in its autograd graph
# whereas the second softmax DOES. If we pass only the first
# tensor we see in the output to the reducer, it marks the
# gradient for fc3 as ready (because it doesn't show up). If
# downstream uses of this return value choose to differentiate
# against the second output tensor, it would still receive a
# gradient and a callback for this tensor, resulting in a crash.
return fn(
F.softmax(x, dim=1),
F.softmax(self.fc3(x), dim=1),
)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = DistributedDataParallel(
ForwardReturnValueModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
# Always run "backward" to ensure the reducer is called by autograd.
# If we don't correctly capture the output tensors from the return value,
# the reducer won't see a hook for the unused parameter, and throw an error.
# The correct capture is what we're testing in this function.
def test(box, unbox):
output = model(input, fn=box)
loss = criterion(unbox(output), target)
loss.backward()
# Test with identity return value
test(
box=lambda x, y: (x, y),
unbox=lambda obj: obj[1],
)
# Test with list return value
test(
box=lambda x, y: ["foo", x, "bar", y],
unbox=lambda obj: obj[3],
)
# Test with tuple return value
test(
box=lambda x, y: ("foo", x, "bar", y),
unbox=lambda obj: obj[3],
)
# Test with dict return value
test(
box=lambda x, y: {"foo": "bar", "a": x, "b": y},
unbox=lambda obj: obj["b"],
)
# Test with list with dict return value
test(
box=lambda x, y: ["foo", "bar", {"a": x, "b": y}],
unbox=lambda obj: obj[2]["b"],
)
# Test with dict with list return value
test(
box=lambda x, y: {"foo": "bar", "list": [0, x, 1, y]},
unbox=lambda obj: obj["list"][3],
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_arbitrary_forward_return_value(self):
self._test_arbitrary_forward_return_value()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_arbitrary_forward_return_value_grad_is_view(self):
self._test_arbitrary_forward_return_value(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_with_lazy_parameters(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
with self.assertRaisesRegex(
RuntimeError, "Modules with uninitialized parameters"
):
DistributedDataParallel(
torch.nn.LazyLinear(10), process_group=process_group
)
def _test_find_unused_parameters_kwarg(self, gradient_as_bucket_view=False):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
torch.cuda.set_device(self.rank)
dist.init_process_group(
backend="nccl",
world_size=self.world_size,
rank=self.rank,
init_method=f"file://{self.file_name}",
)
process_group = c10d.distributed_c10d._get_default_group()
class FindUnusedParametersModule(nn.Module):
def __init__(self):
super(FindUnusedParametersModule, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.fc3 = nn.Linear(4, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
# Return the fc3 module so that the caller can invoke it
# outside of the forward function. While this is bad practice,
# we can use it to trigger a reducer error.
return (F.softmax(x, dim=1), self.fc3)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
ddp_model = None
def test_find_unused_parameters(
find_unused_parameters, test_default=False, gradient_as_bucket_view=False
):
if test_default:
model = DistributedDataParallel(
FindUnusedParametersModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
else:
model = DistributedDataParallel(
FindUnusedParametersModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
find_unused_parameters=find_unused_parameters,
gradient_as_bucket_view=gradient_as_bucket_view,
)
nonlocal ddp_model
ddp_model = model
output, fc3 = model(input)
output = fc3(output)
loss = criterion(output, target)
loss.backward()
# First test that finding unused params under these conditions is to
# trigger an error when `backward` is called (because fc3 is an unused
# parameter and will therefore be marked ready twice).
try:
test_find_unused_parameters(
True, gradient_as_bucket_view=gradient_as_bucket_view
)
except Exception as ex:
self.assertTrue(
str(ex).startswith(
"Expected to mark a variable ready only once.",
)
)
unused_index = 2
unused_index_str = f"Parameter at index {unused_index}"
model = ddp_model.module
for module_name, module in model.named_modules():
if module == model.fc3:
for parameter_name, _ in module.named_parameters(recurse=False):
unused_fqn = f"{module_name}.{parameter_name}"
# Only one such parameter in model.fc3, since bias=False
break
if dist._get_debug_mode() != dist._DistributedDebugLevel.OFF:
unused_index_str += f" with name {unused_fqn}"
self.assertTrue(unused_index_str in str(ex))
else:
self.fail("Expected exception")
dist.barrier(process_group)
# Then test that the default behavior can be overridden by setting
# `find_unused_parameters=False`.
try:
test_find_unused_parameters(
False, gradient_as_bucket_view=gradient_as_bucket_view
)
except Exception as ex:
self.fail("Unexpected exception: %s" % ex)
# Test find_unused_parameters defaults to False
try:
test_find_unused_parameters(
True, test_default=True, gradient_as_bucket_view=gradient_as_bucket_view
)
except Exception as ex:
self.fail("Unexpected exception: %s" % ex)
# TODO: Combine the following tests once https://github.com/pytorch/pytorch/issues/55967
# is resolved.
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["DETAIL"])
def test_find_unused_parameters_kwarg_debug_detail(self):
self._test_find_unused_parameters_kwarg()
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["INFO"])
def test_find_unused_parameters_kwarg_debug_info(self):
self._test_find_unused_parameters_kwarg()
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["OFF"])
def test_find_unused_parameters_kwarg_debug_off(self):
self._test_find_unused_parameters_kwarg()
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["DETAIL"])
def test_find_unused_parameters_kwarg_grad_is_view_debug_detail(self):
self._test_find_unused_parameters_kwarg(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["INFO"])
def test_find_unused_parameters_kwarg_grad_is_view_debug_info(self):
self._test_find_unused_parameters_kwarg(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["OFF"])
def test_find_unused_parameters_kwarg_grad_is_view_debug_off(self):
self._test_find_unused_parameters_kwarg(gradient_as_bucket_view=True)
def _test_multiple_outputs_multiple_backward(self, gradient_as_bucket_view=False):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class MultipleOutputModule(nn.Module):
def __init__(self):
super(MultipleOutputModule, self).__init__()
def define_module():
return nn.Sequential(
nn.Linear(2, 10, bias=False),
nn.ReLU(),
nn.Linear(10, 4, bias=False),
nn.ReLU(),
)
self.module0 = define_module()
self.module1 = define_module()
def forward(self, x):
return (
F.softmax(self.module0(x), dim=1),
F.softmax(self.module1(x), dim=1),
)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = DistributedDataParallel(
MultipleOutputModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
# Compute loss and gradients for both outputs
output1, output2 = model(input)
loss1 = criterion(output1, target)
loss1.backward()
loss2 = criterion(output2, target)
loss2.backward()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_multiple_outputs_multiple_backward(self):
self._test_multiple_outputs_multiple_backward()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_multiple_outputs_multiple_backward_grad_is_view(self):
self._test_multiple_outputs_multiple_backward(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_no_grad(self):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class NoGradModule(nn.Module):
def __init__(self):
super(NoGradModule, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = DistributedDataParallel(
NoGradModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
)
batch_size = 4
input = torch.rand([batch_size, 2], dtype=torch.float)
def check_no_grads():
for p in model.parameters():
self.assertTrue(p.requires_grad)
self.assertIsNone(p.grad)
# After initialization, no parameter has their gradient set.
check_no_grads()
# Run `forward` function with torch.no_grad()
with torch.no_grad():
output = model(input)
self.assertTrue(isinstance(output, torch.Tensor))
# No parameter should have their gradient set.
check_no_grads()
def _test_accumulate_gradients_module(self, gradient_as_bucket_view=False):
# This is NOT the recommended way to implement accumulating grads, but
# we would like to make sure DDP does not mess up with the underlying
# module.
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
global_batch_size = self.world_size
model, ddp_model, input, target = self._prepare_single_device_module(
process_group, devices, devices, global_batch_size, gradient_as_bucket_view
)
def step_model(model, input, target):
model.train()
output = model(input)
loss = F.mse_loss(output, target.to(output.device))
loss.backward()
# ensure accumulate grads works with no_grad
with torch.no_grad():
ddp_model.train()
ddp_model.module(input)
# Check two model parameters over 4 iterations.
# Use 4 iterations because we alternate between reducing and
# not reducing and want to make sure we switch both ways.
for iteration in range(4):
step_model(model, input, target)
if iteration % 2 == 0:
# Skip gradients sync without calling prepare_for_backward
step_model(
ddp_model.module,
input[self.rank : (self.rank + 1)],
target[self.rank : (self.rank + 1)],
)
for i, j in zip(model.parameters(), ddp_model.parameters()):
self.assertNotEqual(i.grad, j.grad)
else:
step_model(
ddp_model,
input[self.rank : (self.rank + 1)],
target[self.rank : (self.rank + 1)],
)
for i, j in zip(model.parameters(), ddp_model.parameters()):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(i.grad, j.grad, rtol=1.3e-06, atol=5e-5)
# Shuffle the input so that DDP input is different
torch.manual_seed(1337 + iteration)
input = input[torch.randperm(global_batch_size)]
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_accumulate_gradients_module(self):
self._test_accumulate_gradients_module()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_accumulate_gradients_module_with_grad_is_view(self):
self._test_accumulate_gradients_module(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_failure_recovery(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# need to create a separate file for the recovered FileStore, because
# the original one will be deleted when destructing the first FileStore.
recovery_filename = self.file_name + "_recovery"
if self.rank == 0:
# the file will be deleted by the recovered FileStore
open(recovery_filename, "w").close()
# not necessary to run barrier here, as DDP will synchronize
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = TestModel().float().to(device_id)
ddp = DistributedDataParallel(
model,
device_ids=[device_id],
process_group=process_group,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
for _ in range(6):
output = ddp(input)
loss = criterion(output, target)
loss.backward()
del ddp
del process_group
del store # this will delete self.file_name
store = c10d.FileStore(recovery_filename, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
ddp = DistributedDataParallel(
model,
device_ids=[device_id],
process_group=process_group,
)
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
for _ in range(6):
output = ddp(input)
loss = criterion(output, target)
loss.backward()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_pass_default_pg(self):
dist.init_process_group(
"nccl",
init_method=f"file://{self.file_name}",
world_size=self.world_size,
rank=self.rank,
)
default_pg = c10d.distributed_c10d._get_default_group()
dist.destroy_process_group(default_pg)
self.assertFalse(dist.is_initialized())
def _test_grad_layout(self, replica_devices, layer_devs, local_batch_size):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
global_batch_size = local_batch_size * self.world_size
# Carry out some trials with small buckets and some with big buckets.
bucketsizes = (0.000001, 25)
# Tuples of lists. Each list describes per-layer characteristics for one trial.
layer_formats = (
[torch.contiguous_format] * 4,
[torch.channels_last] * 2 + [torch.contiguous_format] * 2,
[torch.channels_last] * 4,
)
layer_dtypes = (
[torch.float] * 4,
[torch.float] * 2 + [torch.half] * 2,
[torch.half] * 4,
)
input_dev = layer_devs[0] if isinstance(layer_devs, list) else layer_devs
target_dev = layer_devs[-1] if isinstance(layer_devs, list) else layer_devs
input = torch.randn(
(global_batch_size, 8, 8, 8), device=input_dev, dtype=torch.float
)
target = torch.randn(
(global_batch_size, 8, 4, 4), device=target_dev, dtype=torch.float
)
local_batch_start = self.rank * local_batch_size
local_batch_end = (self.rank + 1) * local_batch_size
# Reducer.cpp sneakily creates one "initial bucket" that ignores the "bucket_cap_mb"
# argument. The following makes sure the initial bucket also complies.
@contextmanager
def first_bucket_size(ddp_bucket_mb):
old_DEFAULT_FIRST_BUCKET_BYTES = dist._DEFAULT_FIRST_BUCKET_BYTES
dist._DEFAULT_FIRST_BUCKET_BYTES = int(ddp_bucket_mb * 1.0e6)
try:
yield
finally:
dist._DEFAULT_FIRST_BUCKET_BYTES = old_DEFAULT_FIRST_BUCKET_BYTES
with torch.backends.cudnn.flags(
enabled=True, deterministic=True, benchmark=False
):
for formats, dtypes, bucketsize in product(
layer_formats, layer_dtypes, bucketsizes
):
with first_bucket_size(bucketsize):
model_msg = (
"rank = {} formats = {} dtypes = {} bucketsize = {} ".format(
self.rank, formats, dtypes, bucketsize
)
)
try:
m = ConvNet(layer_devs, formats, dtypes)
m_ddp = DistributedDataParallel(
copy.deepcopy(m),
device_ids=replica_devices,
process_group=process_group,
bucket_cap_mb=bucketsize,
)
opt = torch.optim.SGD(m.parameters(), lr=0.1)
opt_ddp = torch.optim.SGD(m_ddp.parameters(), lr=0.1)
has_half = any(p.dtype is torch.half for p in m.parameters())
tol = 1.0e-3 if has_half else 1.0e-5
except BaseException:
# Prints case-specific debugging info to narrow down failing case.
print(
"Caught exception during model creation for " + model_msg,
flush=True,
)
raise
# 3 iters: First iter creates grads, second iter retests after rebucketing,
# third iter tries zeroed grads.
for it in range(3):
iter_msg = "iter = {} ".format(it) + model_msg
named_msg = iter_msg
try:
F.mse_loss(m(input).float(), target).backward()
F.mse_loss(
m_ddp(input[local_batch_start:local_batch_end]).float(),
target[local_batch_start:local_batch_end],
).backward()
for i, ((layer_name, m_child), m_ddp_child) in enumerate(
zip(m.named_children(), m_ddp.module.children())
):
named_msg = layer_name + ".weight" + " " + iter_msg
self.assertTrue(
m_child.weight.grad.is_contiguous(
memory_format=formats[i]
),
named_msg,
)
self.assertTrue(
m_ddp_child.weight.grad.is_contiguous(
memory_format=formats[i]
),
named_msg,
)
for j, ((param_name, p), p_ddp) in enumerate(
zip(
m_child.named_parameters(),
m_ddp_child.parameters(),
)
):
named_msg = (
layer_name + "." + param_name + " " + iter_msg
)
self.assertEqual(
p.grad, p_ddp.grad, rtol=tol, atol=tol
)
opt.step()
opt_ddp.step()
if it == 0:
for p, p_ddp in zip(m.parameters(), m_ddp.parameters()):
p.grad = None
p_ddp.grad = None
else:
m.zero_grad()
m_ddp.zero_grad()
except BaseException:
# Makes sure we still get info if an error occurred somewhere other than the asserts.
print(
"Caught exception during iterations at " + named_msg,
flush=True,
)
raise
@requires_nccl()
@skip_if_lt_x_gpu(2)
@skip_if_rocm
def test_grad_layout_1devicemodule_1replicaperprocess(self):
dev0 = torch.device("cuda:" + str(gpus_for_rank(self.world_size)[self.rank][0]))
# Tells DDP to use just one device.
replica_devices = [dev0]
# Tells _test_grad_layout to construct ConvNet with all layers on this process's first assigned device.
layer_devs = dev0
local_batch_size = 8
self._test_grad_layout(replica_devices, layer_devs, local_batch_size)
@requires_nccl()
@skip_if_lt_x_gpu(4)
@skip_if_rocm
def test_grad_layout_2devicemodule(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
dev0 = torch.device("cuda:" + str(int_devices[0]))
dev1 = torch.device("cuda:" + str(int_devices[1]))
# DDP's default behavior for a multi-device module is "don't replicate."
replica_devices = None
# Tells _test_grad_layout to constructs this process's ConvNet on 2 devices, with 2 layers on each device.
layer_devs = [dev0] * 2 + [dev1] * 2
local_batch_size = 8
self._test_grad_layout(replica_devices, layer_devs, local_batch_size)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_param_layout_mismatch_error(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
dev0 = torch.device("cuda:" + str(gpus_for_rank(self.world_size)[self.rank][0]))
layer_devs = dev0
layer_formats = (
[torch.contiguous_format] * 4
if self.rank == 0
else [torch.channels_last] * 4
)
layer_dtypes = [torch.float] * 4
m = ConvNet(layer_devs, layer_formats, layer_dtypes)
if self.rank == 0:
m_ddp = DistributedDataParallel(
m, device_ids=[dev0], process_group=process_group
)
else:
with self.assertRaisesRegex(
RuntimeError,
".* appears not to match strides of the same param in process 0",
):
m_ddp = DistributedDataParallel(
m, device_ids=[dev0], process_group=process_group
)
def _gpu_model_with_ddp_comm_hook(
self,
process_group,
hook=None,
gradient_as_bucket_view=False,
state=None,
static_graph=False,
):
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
ModuleForDdpCommHook().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
if static_graph:
gpu_model._set_static_graph()
# Register a DDP communication hook if any.
if hook is not None:
gpu_model.register_comm_hook(state, hook)
return gpu_model
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_future_passing_gpu_nccl(self):
"""
This unit test verifies whether the Future object is passed properly using nccl backend.
The hook callback function creates a Future object and sets a value to it.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# Get GPU model with simple_hook registered.
gpu_model = self._gpu_model_with_ddp_comm_hook(process_group, self._simple_hook)
# check whether the grads are equal to what simple_hook's then callback returns.
# without the comm_hook, result would be 0.25 * torch.ones(2, 2).
self._run_and_verify_hook(gpu_model, 8, 2 * torch.ones(2, 2))
def _test_ddp_comm_hook_allreduce_hook_nccl(
self, gradient_as_bucket_view=False, static_graph=False
):
"""
This unit test verifies whether a DDP communication hook that just calls
allreduce gives the same result with the case of no hook registered.
Without the then callback, the future_value in reducer is no longer
a PyObject, and this unit test verifies future_value is properly checked.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allreduce_hook(
state: object, bucket: dist.GradBucket
) -> torch.futures.Future[torch.Tensor]:
tensors = [bucket.buffer() / self.world_size]
return (
process_group.allreduce(tensors)
.get_future()
.then(lambda fut: fut.value()[0])
)
# Get GPU model with allreduce_hook registered.
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group, allreduce_hook, gradient_as_bucket_view, static_graph
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_default_ddp_comm_hooks_nccl(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether default Python DDP communication hooks ALLREDUCE and FP16_COMPRESS
can give the same result with the case of no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# For these default DDP comm hooks, the only state is process group.
state = process_group
for hook in [default.allreduce_hook, default.fp16_compress_hook]:
# Get GPU model with the hook registered.
# The first arg 'process_group' is used for initializing the test environment,
# so it cannot be replaced by 'state', although they have the same value.
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group, hook, gradient_as_bucket_view, state
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_fp16_compress_wrapper(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether wrapping the ALLREDUCE and POWER_SGD hooks with
the FP16_WRAPPER can give the same result as when there is no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
powerSGD_state = powerSGD.PowerSGDState(process_group=process_group)
hook_args = [
(powerSGD.powerSGD_hook, powerSGD_state),
(default.allreduce_hook, process_group),
]
for hook, state in hook_args:
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group,
default.fp16_compress_wrapper(hook),
gradient_as_bucket_view,
state,
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_hook_then_optimizer(self, gradient_as_bucket_view=False):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
hook, hook_state = default.allreduce_hook, process_group
sgd_lr = 1e-2
sgd_momentum = 0.9
sgd_weight_decay = 0.01
opt_hook_state = default._OptimizerHookState(
_FunctionalSGD, sgd_lr, momentum=sgd_momentum, weight_decay=sgd_weight_decay
)
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group,
default._hook_then_optimizer(hook, opt_hook_state),
gradient_as_bucket_view,
hook_state,
)
prev_params = copy.deepcopy(list(gpu_model.parameters()))
# Run model with optimizer as part of hook
for _ in range(8):
gpu_model.zero_grad()
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
new_params = list(gpu_model.parameters())
# Run plain model with allreduce hook and separate optimizer step.
# Verify gradients are the same.
gpu_model_allreduce = self._gpu_model_with_ddp_comm_hook(
process_group, default.allreduce_hook, gradient_as_bucket_view, hook_state
)
sgd = torch.optim.SGD(
gpu_model_allreduce.parameters(),
sgd_lr,
momentum=sgd_momentum,
weight_decay=sgd_weight_decay,
)
for _ in range(8):
gpu_model_allreduce.zero_grad()
self._run_and_verify_hook(gpu_model_allreduce, 8, 0.25 * torch.ones(2, 2))
sgd.step()
post_opt_params = list(gpu_model_allreduce.parameters())
for opt_as_hook_param, post_opt_param in zip(new_params, post_opt_params):
self.assertEqual(opt_as_hook_param, post_opt_param)
def _test_powerSGD_ddp_comm_hook_nccl(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether Python DDP communication hook POWER_SGD
can give the same result with the case of no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# Get GPU model with the hook registered.
# Test the hook with different algorithmic configs.
for use_error_feedback, warm_start in product([True, False], [True, False]):
state = powerSGD.PowerSGDState(
process_group=process_group,
matrix_approximation_rank=1,
use_error_feedback=use_error_feedback,
warm_start=warm_start,
)
for hook in [powerSGD.powerSGD_hook, powerSGD.batched_powerSGD_hook]:
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group, hook, gradient_as_bucket_view, state
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_builtin_ddp_comm_hooks_nccl(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether built-in C++ DDP communication hooks ALLREDUCE and FP16_COMPRESS
can give the same result with the case of no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
for comm_hook_type in [
dist.BuiltinCommHookType.ALLREDUCE,
dist.BuiltinCommHookType.FP16_COMPRESS,
]:
# Get GPU model with the built-in communication hook.
gpu_model = self._gpu_model_with_builtin_ddp_comm_hook(
process_group, comm_hook_type, gradient_as_bucket_view
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_hook_nccl(self):
self._test_ddp_comm_hook_allreduce_hook_nccl()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_default_ddp_comm_hooks_nccl(self):
self._test_default_ddp_comm_hooks_nccl()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16_compress_wrapper_nccl(self):
self._test_fp16_compress_wrapper()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_hook_then_optimizer_nccl(self):
self._test_hook_then_optimizer()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_hook_then_optimizer_nccl_grad_as_bucket_view(self):
self._test_hook_then_optimizer(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_builtin_ddp_comm_hooks_nccl(self):
self._test_builtin_ddp_comm_hooks_nccl()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_powerSGD_ddp_comm_hook_nccl(self):
self._test_powerSGD_ddp_comm_hook_nccl()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_hook_nccl_grad_is_view(self):
self._test_ddp_comm_hook_allreduce_hook_nccl(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_hook_nccl_static_graph(self):
self._test_ddp_comm_hook_allreduce_hook_nccl(static_graph=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_default_ddp_comm_hooks_nccl_is_view(self):
self._test_default_ddp_comm_hooks_nccl(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16_compress_wrapper_is_view(self):
self._test_fp16_compress_wrapper(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_builtin_ddp_comm_hooks_nccl_grad_is_view(self):
self._test_builtin_ddp_comm_hooks_nccl(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_powerSGD_ddp_comm_hook_nccl_grad_is_view(self):
self._test_powerSGD_ddp_comm_hook_nccl(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_with_then_hook_nccl(self):
"""
This unit test verifies whether a DDP communication hook that calls allreduce and then
multiplies the result by ten and divides by two gives the expected result.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allreduce_with_then_hook(
state: object, bucket: dist.GradBucket
) -> torch.futures.Future[torch.Tensor]:
tensors = [bucket.buffer() / self.world_size]
fut = process_group.allreduce(tensors).get_future()
def mult(fut):
# Multiply the result by 10.
return 10 * fut.value()[0]
def div(fut):
# Divide the result by 2.
return 0.5 * fut.value()
return fut.then(mult).then(div)
# Get GPU model with allreduce_with_then_hook registered.
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group, allreduce_with_then_hook
)
# check whether the grads are equal to what allreduce returns multuplied by 5.
# without the comm_hook, result would be still 0.25 * torch.ones(2, 2).
self._run_and_verify_hook(gpu_model, 8, 1.25 * torch.ones(2, 2))
class AcceptsParam(torch.nn.Module):
def __init__(self, p, factor):
super().__init__()
self.a = p
self.f = factor
def forward(self, input):
return input + self.a * self.f
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_weight_sharing(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
size = 2048 * 2048
dev = self.rank
world = self.world_size
p = torch.nn.Parameter(torch.randn(size, requires_grad=True))
for try_set_to_none, use_bucket_view in product((False, True), (False, True)):
m = torch.nn.Sequential(
self.AcceptsParam(p, dev + 1), self.AcceptsParam(p, dev + 1)
).cuda(dev)
m = torch.nn.parallel.DistributedDataParallel(
m,
bucket_cap_mb=1,
gradient_as_bucket_view=use_bucket_view,
device_ids=[dev],
process_group=process_group,
)
for i in range(3):
m.zero_grad(set_to_none=try_set_to_none)
m(1).sum().backward()
# Each param value is multiplied by "rank + 1" twice in forward, so the grad
# values produced by a particular rank should be 2. * (rank + 1).
# Summing these over ranks and dividing by world size gives the expected result:
analytic = torch.full_like(
p, 2.0 * (world * (world + 1.0) / 2.0) / world, device=dev
)
for name, p in m.named_parameters():
self.assertEqual(
p.grad,
analytic,
"mismatch at "
+ name
+ ".grad for "
+ "set_to_none = {}, use_bucket_view = {}".format(
try_set_to_none, use_bucket_view
),
)
# A list of tests for ddp with activation checkpointing
# when gradient_as_bucket_view=True, False.
# Most of the tests are referred to
# https://github.com/facebookresearch/fairscale/blob/master/tests/nn/pipe/test_checkpoint_ddp.py
class CheckpointOnceModule(nn.Module):
def __init__(self):
super().__init__()
self.l1 = nn.Linear(20, 20)
self.l2 = nn.Linear(20, 20)
def forward(self, inp):
x = self.l1(inp)
x = checkpoint(self.l2, x)
return x
class CheckpointTwiceModule(CheckpointOnceModule):
def __init__(self):
super().__init__()
def forward(self, inp):
x = self.l1(inp)
x = checkpoint(self.l2, x)
x = checkpoint(self.l2, x)
return x
def _prepare_dummy_data(self):
ddp_bs = 16
bs = ddp_bs * self.world_size
input = torch.rand((bs, 20), device="cuda", requires_grad=True)
target = torch.randn((bs, 20), device="cuda")
offset = self.rank * ddp_bs
ddp_input = input[offset : offset + ddp_bs]
ddp_target = target[offset : offset + ddp_bs]
return input, ddp_input, target, ddp_target
def _train_model(self, model, input_var, target, loss, run_checkpoint=False):
model.train()
if run_checkpoint:
output = checkpoint(model, input_var)
else:
output = model(input_var)
l = loss(output, target)
l.backward()
def _test_ddp_checkpointing(
self,
input_model,
process_group,
use_bucket_view,
find_unused_parameters=False,
static_graph=False,
run_checkpoint=False,
):
# to reprodce the same training results
torch.cuda.set_device(self.rank)
torch.manual_seed(31415)
model = copy.deepcopy(input_model).cuda()
ddp_model = copy.deepcopy(input_model).cuda()
ddp_model = nn.parallel.DistributedDataParallel(
ddp_model,
bucket_cap_mb=1,
gradient_as_bucket_view=use_bucket_view,
device_ids=[self.rank],
process_group=process_group,
find_unused_parameters=find_unused_parameters,
)
if static_graph:
ddp_model._set_static_graph()
self.assertEqual(
ddp_model._get_ddp_logging_data().get("static_graph", 0), static_graph
)
input, ddp_input, target, ddp_target = self._prepare_dummy_data()
loss = nn.MSELoss()
for i in range(5):
model.zero_grad(set_to_none=False)
ddp_model.zero_grad(set_to_none=False)
self._train_model(model, input, target, loss, run_checkpoint=run_checkpoint)
self._train_model(
ddp_model, ddp_input, ddp_target, loss, run_checkpoint=run_checkpoint
)
for i, j in zip(model.parameters(), ddp_model.parameters()):
self.assertTrue(i.grad is not None)
self.assertTrue(j.grad is not None)
self.assertEqual(i.grad, j.grad, rtol=1.3e-06, atol=5e-5)
# DDP works as expect when layer is checkpointed only once
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_checkpointing_once(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
for use_bucket_view, static_graph in product((False, True), (False, True)):
self._test_ddp_checkpointing(
self.CheckpointOnceModule(),
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=static_graph,
)
# DDP will fail when there are unused_parameters in the model
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_checkpointing_unused_params(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
for use_bucket_view in (True, False):
with self.assertRaisesRegex(
RuntimeError,
"Expected to mark a variable ready only once.",
):
model = self._test_ddp_checkpointing(
self.CheckpointOnceModule(),
process_group=process_group,
use_bucket_view=use_bucket_view,
find_unused_parameters=True,
static_graph=False,
)
# test passes when static_graph is true
model = self._test_ddp_checkpointing(
self.CheckpointOnceModule(),
process_group=process_group,
use_bucket_view=use_bucket_view,
find_unused_parameters=True,
static_graph=True,
)
# DDP will fail when the same layer is checkponted twice
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_checkpointing_twice(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
for use_bucket_view in (True, False):
with self.assertRaisesRegex(
RuntimeError,
"Expected to mark a variable ready only once.",
):
model = self._test_ddp_checkpointing(
self.CheckpointTwiceModule(),
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=False,
)
model = self._test_ddp_checkpointing(
self.CheckpointTwiceModule(),
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=True,
)
# DDP works as expected if there is weight sharing among layers
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_checkpointing_weight_sharing(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
torch.cuda.set_device(self.rank)
for use_bucket_view, static_graph in product((False, True), (False, True)):
torch.manual_seed(31415)
l1 = nn.Linear(20, 20)
l2 = nn.Linear(20, 20)
l1.weight = l2.weight
model = nn.Sequential(l1, l2)
self._test_ddp_checkpointing(
model,
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=static_graph,
run_checkpoint=True,
)
class NcclErrorHandlingTest(MultiProcessTestCase):
def setUp(self):
super(NcclErrorHandlingTest, self).setUp()
# Need to skip return code checking for these tests since the child
# processes don't exit cleanly.
self.skip_return_code_checks = [
self.test_nccl_errors_blocking_abort.__wrapped__,
self.test_nccl_errors_blocking_sigkill.__wrapped__,
self.test_nccl_errors_blocking_sigterm.__wrapped__,
self.test_nccl_errors_blocking_nonzero_exit.__wrapped__,
]
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
self._spawn_processes()
def tearDown(self):
super(NcclErrorHandlingTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
@property
def op_timeout_sec(self):
return 1
@property
def world_size(self):
return 3
@property
def blocking_wait_error_msg(self):
return "Caught collective operation timeout"
def _run_all_reduce(self, pg):
pg.allreduce(torch.rand(10).cuda(self.rank))
@requires_nccl()
@requires_nccl_version(2400, "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_nonblocking(self):
# Note: we unset and restore NCCL_ASYNC_ERROR_HANDLING for this test
# since test_c10d_common runs with async error handling by default, but this
# tests behavior when it is not enabled.
prev_nccl_async_error_handling = os.environ.get(
"NCCL_ASYNC_ERROR_HANDLING", None
)
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "0"
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
process_group.allreduce(torch.rand(10).cuda(self.rank))
if self.rank == 0:
# This allreduce does not block Python thread as allreduce enqueues
# the cuda operation, and then wait only blocks the current cuda
# stream.
work = process_group.allreduce(torch.rand(10).cuda(self.rank))
work.wait()
# Now the work scheduled next should hang forever since the previous
# allreduce will never complete.
t = threading.Thread(target=self._run_all_reduce, args=(process_group,))
t.daemon = True
t.start()
t.join(int(get_timeout(self.id()) / 5))
self.assertTrue(t.is_alive())
if prev_nccl_async_error_handling is not None:
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = prev_nccl_async_error_handling
def _test_nccl_errors_blocking(self, func):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(
store,
self.rank,
self.world_size,
timeout=timedelta(seconds=self.op_timeout_sec),
)
process_group.allreduce(torch.rand(10).cuda(self.rank))
if self.rank == 0:
work = process_group.allreduce(torch.rand(10).cuda(self.rank))
with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg):
# Operation would time out in blocking mode.
work.wait()
# Run some GPU operations to make sure cuda has not gotten stuck.
# It was observed cuda could get stuck if NCCL communicators were
# not properly aborted before throwing RuntimeError.
a = torch.rand(10).cuda(self.rank)
elif self.rank == 1:
# Clean up structures (ex: files for FileStore before going down)
del process_group
func()
else:
# Wait for timeout
time.sleep(2 * self.op_timeout_sec)
# Now verify communicators on this rank have been aborted by the watchdog thread.
self._wait_for_comm_abort(process_group)
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version(2400, "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_blocking_clean_exit(self):
self._test_nccl_errors_blocking(lambda: sys.exit(0))
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version(2400, "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_blocking_nonzero_exit(self):
self._test_nccl_errors_blocking(lambda: sys.exit(1))
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version(2400, "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
@sandcastle_skip(
"Frequently times out see https://github.com/pytorch/pytorch/issues/58920"
)
def test_nccl_errors_blocking_abort(self):
self._test_nccl_errors_blocking(lambda: os.abort())
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version(2400, "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_blocking_sigkill(self):
self._test_nccl_errors_blocking(lambda: os.kill(os.getpid(), signal.SIGKILL))
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version(2400, "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_blocking_sigterm(self):
self._test_nccl_errors_blocking(lambda: os.kill(os.getpid(), signal.SIGTERM))
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version(2400, "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
def test_nccl_blocking_wait_with_barrier(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(
store,
self.rank,
self.world_size,
timeout=timedelta(seconds=self.op_timeout_sec),
)
process_group.barrier().wait()
if self.rank == 0:
with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg):
# This should timeout
process_group.barrier().wait()
def _run_invalid_nccl_blocking_wait_env(self, val):
os.environ["NCCL_BLOCKING_WAIT"] = val
store = c10d.FileStore(self.file_name, self.world_size)
with self.assertRaises(RuntimeError):
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
@requires_nccl()
@skip_if_lt_x_gpu(3)
def test_invalid_nccl_blocking_wait_env(self):
self._run_invalid_nccl_blocking_wait_env("abc")
self._run_invalid_nccl_blocking_wait_env("-1")
self._run_invalid_nccl_blocking_wait_env("2147483647")
self._run_invalid_nccl_blocking_wait_env("4294967295")
def _wait_for_comm_abort(self, process_group):
"""
Waits for the watchdog thread to abort communicators for the process group.
"""
while True:
try:
process_group.allreduce(torch.rand(10).cuda(self.rank))
except Exception as e:
if "NCCL communicator was aborted" in str(e):
return
else:
raise e
time.sleep(1)
@with_nccl_blocking_wait
@requires_nccl()
@skip_if_lt_x_gpu(3)
def test_nccl_timeout(self):
store = c10d.FileStore(self.file_name, self.world_size)
# Initialize process_group.
timeout = 1
process_group = c10d.ProcessGroupNCCL(
store, self.rank, self.world_size, timeout=timedelta(seconds=timeout)
)
process_group.allreduce(torch.rand(10).cuda(self.rank)).wait()
if self.rank == 0:
# This should timeout in about 1 second.
start = time.time()
# Watchdog may abort timed out work resulting in NCCL error instead of operation timed out.
with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg):
process_group.allreduce(torch.rand(10).cuda(self.rank)).wait()
else:
# Sleep to ensure timeout.
time.sleep(2 * timeout)
self._wait_for_comm_abort(process_group)
class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
def setUp(self):
super(CommTest, self).setUp()
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
self._spawn_processes()
def tearDown(self):
super(CommTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
def _test_broadcast_coalesced(self, process_group, device, root_rank):
half = torch.float16
# No support for float16 for CPU tensors
if device == torch.device("cpu"):
half = torch.float32
target = torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float32, device=device).chunk(5)
target += torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float64, device=device).chunk(5)
target += torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float32, device=device).chunk(5)
# The tensors to pass to broadcast are idential to the target
# only on the process that is the root of the broadcast.
if self.rank == root_rank:
tensors = list(tensor.clone() for tensor in target)
else:
tensors = list(torch.zeros_like(tensor) for tensor in target)
if self.rank != root_rank:
self.assertNotEqual(tensors, target)
c10d._broadcast_coalesced(
process_group, tensors, buffer_size=256, src=root_rank
)
if self.rank != root_rank:
self.assertEqual(tensors, target)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_broadcast_coalesced_nccl(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
device = torch.device("cuda:%d" % self.rank)
ranks = [0, 1]
for root_rank in ranks:
self._test_broadcast_coalesced(process_group, device, root_rank)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_sequence_num_set_default_pg_nccl(self):
torch.cuda.set_device(self.rank)
self._test_sequence_num_set_default_pg(backend="nccl")
@skip_if_lt_x_gpu(2)
@requires_nccl()
def test_sequence_num_incremented_nccl_default(self):
self._test_sequence_num_incremented_default_group("nccl")
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_sequence_num_incremented_nccl_subgroup(self):
if self.world_size < 4:
return sandcastle_skip("Test requires world_size of at least 4")
self._test_sequence_num_incremented_subgroup("nccl")
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_sequence_num_set_nccl_new_group(self):
torch.cuda.set_device(self.rank)
self._test_sequence_num_set_new_group(backend="nccl")
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_pass_nccl_options_high_priority_stream(self):
pg_opts = c10d.ProcessGroupNCCL.Options()
pg_opts.is_high_priority_stream = True
store = c10d.FileStore(self.file_name, self.world_size)
# Test init_process_group accepts options
dist.init_process_group(
"nccl",
world_size=self.world_size,
rank=self.rank,
store=store,
pg_options=pg_opts,
)
# Test with new_group
pg = c10d.new_group([0, 1], pg_options=pg_opts)
# test if the process group constructed with high priority stream
self.assertTrue(pg.options.is_high_priority_stream)
# test the process group works as expected
t = torch.tensor([self.rank + 1] * 10).cuda(self.rank)
pg.allreduce(t).wait()
expected_tensor = torch.tensor([3] * 10).cuda(self.rank)
self.assertEqual(expected_tensor, t)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_barrier(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl", rank=self.rank, world_size=self.world_size, store=store
)
t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
c10d.all_reduce(t)
expected_tensor = torch.tensor([3] * 10).cuda(2 * self.rank)
self.assertEqual(expected_tensor, t)
# Test with new_group
pg = c10d.new_group([0, 1])
t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
pg.allreduce(t).wait()
self.assertEqual(expected_tensor, t)
pg = c10d.new_group([0])
if self.rank == 0:
t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
expected_tensor = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
pg.allreduce(t).wait()
self.assertEqual(expected_tensor, t)
pg = c10d.new_group([1])
if self.rank == 1:
t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
expected_tensor = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
pg.allreduce(t).wait()
self.assertEqual(expected_tensor, t)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_barrier_timeout(self):
store = c10d.FileStore(self.file_name, self.world_size)
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.init_process_group(
backend="nccl",
rank=self.rank,
world_size=self.world_size,
store=store,
timeout=timedelta(seconds=1),
)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_barrier_timeout_new_group(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl",
rank=self.rank,
world_size=self.world_size,
store=store,
timeout=timedelta(seconds=1),
)
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.new_group([0, 1], timeout=timedelta(seconds=1))
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.new_group([0], timeout=timedelta(seconds=1))
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_barrier_timeout_new_group_non_member(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl",
rank=self.rank,
world_size=self.world_size,
store=store,
timeout=timedelta(seconds=1),
)
if self.rank == 1:
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.new_group([0, 1], timeout=timedelta(seconds=1))
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.new_group([0], timeout=timedelta(seconds=1))
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_barrier_device_ids(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl", rank=self.rank, world_size=self.world_size, store=store
)
c10d.barrier(device_ids=[self.rank])
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_barrier_device_ids_function_argument(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl", rank=self.rank, world_size=self.world_size, store=store
)
with self.assertRaisesRegex(RuntimeError, "Invalid function argument"):
c10d.barrier(device_ids=self.rank)
if __name__ == "__main__":
assert (
not torch.cuda._initialized
), "test_distributed must not have initialized CUDA context on main process"
run_tests()
|
shutdown_train_2.py | """Simulate the shutdown sequence on the train pi."""
import socket
import threading
import time
# Phase 2
# listen for shutdown message instead of just the connection
# send sound on/off command to master
# - maybe just an input() to toggle it?
# should master pi have the server socket?
def talk_to_master(startShutdown, shutdownComplete):
with socket.socket() as sock:
#sock.bind(('', 31337))
#sock.listen()
#conn, _ = sock.accept()
#print('master pi has asked us to shutdown')
#startShutdown.set()
#shutdownComplete.wait()
#print('telling master pi we have shutdown')
#conn.close()
# TODO: replace localhost with static ip of train pi
# port number has to match the socket.bind() of the other program.
sock.connect(('localhost', 31337))
print('made connection to master pi')
while True:
toWrite = []
# if we want to send a message:
# toWrite.append(sock)
readable, writable, _ = select.select([sock], toWrite, [], 1)
for s in readable:
msg = s.recv(1024)
# if len(msg) > 0:
# assume we were told to shutdown
if __name__ == '__main__':
# create a background thread to listen to a connection from the master pi
# upon connection, background thread sets shutdown event
# main thread begins shutdown
# background thread waits for shutdown complete
# background thread signals master pi that shutdown is complete
# main thread joins on background thread
startShutdown = threading.Event()
shutdownComplete = threading.Event()
bg = threading.Thread(target=talk_to_master, args=(startShutdown, shutdownComplete))
bg.start()
# Simulate the main loop of the train program.
while not startShutdown.is_set():
print('trains doing train stuff')
time.sleep(2)
print('trains returning to station')
for i in range(20, 0, -1):
time.sleep(1)
print(i)
shutdownComplete.set()
bg.join()
print('all done!')
|
compare_WchainCNOT_qng.py | import qiskit
import numpy as np
import sys
sys.path.insert(1, '../')
import qtm.base, qtm.constant, qtm.nqubit, qtm.fubini_study, qtm.encoding
import importlib
import multiprocessing
def run_wchain(num_layers, num_qubits):
thetas = np.ones(num_layers*num_qubits*3)
psi = 2*np.random.rand(2**num_qubits)-1
psi = psi / np.linalg.norm(psi)
qc = qiskit.QuantumCircuit(num_qubits, num_qubits)
qc.initialize(psi, qubits = range(0, num_qubits))
loss_values = []
thetass = []
for i in range(0, 400):
if i % 20 == 0:
print('W_chain: (' + str(num_layers) + ',' + str(num_qubits) + '): ' + str(i))
G = qtm.fubini_study.qng(qc.copy(), thetas, qtm.nqubit.create_WchainCNOT_layerd_state, num_layers)
grad_loss = qtm.base.grad_loss(
qc,
qtm.nqubit.create_WchainCNOT_layerd_state,
thetas, num_layers = num_layers)
thetas = np.real(thetas - qtm.constant.learning_rate*(np.linalg.inv(G) @ grad_loss))
thetass.append(thetas.copy())
qc_copy = qtm.nqubit.create_WchainCNOT_layerd_state(qc.copy(), thetas, num_layers)
loss = qtm.base.loss_basis(qtm.base.measure(qc_copy, list(range(qc_copy.num_qubits))))
loss_values.append(loss)
traces = []
fidelities = []
for thetas in thetass:
qc = qiskit.QuantumCircuit(num_qubits, num_qubits)
qc = qtm.nqubit.create_WchainCNOT_layerd_state(qc, thetas, num_layers = num_layers).inverse()
psi_hat = qiskit.quantum_info.Statevector.from_instruction(qc)
trace, fidelity = qtm.base.get_metrics(psi, psi_hat)
traces.append(trace)
fidelities.append(fidelity)
print('Writting ... ' + str(num_layers) + ' layers,' + str(num_qubits) +
' qubits')
np.savetxt("../../experiments/tomographyCNOT/tomography_wchain_" + str(num_layers) + "/" +
str(num_qubits) + "/loss_values_qng.csv",
loss_values,
delimiter=",")
np.savetxt("../../experiments/tomographyCNOT/tomography_wchain_" + str(num_layers) + "/" +
str(num_qubits) + "/thetass_qng.csv",
thetass,
delimiter=",")
np.savetxt("../../experiments/tomographyCNOT/tomography_wchain_" + str(num_layers) + "/" +
str(num_qubits) + "/traces_qng.csv",
traces,
delimiter=",")
np.savetxt("../../experiments/tomographyCNOT/tomography_wchain_" + str(num_layers) + "/" +
str(num_qubits) + "/fidelities_qng.csv",
fidelities,
delimiter=",")
if __name__ == "__main__":
# creating thread
num_layers = [1, 2, 3, 4, 5]
num_qubits = [3, 4, 5]
t_wchains = []
for i in num_layers:
for j in num_qubits:
t_wchains.append(
multiprocessing.Process(target=run_wchain, args=(i, j)))
for t_wchain in t_wchains:
t_wchain.start()
for t_wchain in t_wchains:
t_wchain.join()
print("Done!") |
TrayMsg.py | #coding: utf-8
'''以托盘图标的方式发出右下角的提示信息。'''
# 原来源: http://angeloce.iteye.com/blog/493681
import win32gui
import win32con
import time
import threading
from sine.utils import ReStartableThread
class TrayMsg:
count = 0
lock = threading.Lock() # 共享变量count锁
def __init__(self, hicon, LC, AC):
self.LC=LC
self.AC=AC
# 注册一个窗口类
wc = win32gui.WNDCLASS()
hinst = wc.hInstance = win32gui.GetModuleHandle(None)
TrayMsg.lock.acquire()
self.className = wc.lpszClassName = "TrayMsg" + str(TrayMsg.count)
TrayMsg.count += 1
TrayMsg.lock.release()
wc.lpfnWndProc = self.WndProc
classAtom = win32gui.RegisterClass(wc)
style = win32con.WS_OVERLAPPED | win32con.WS_SYSMENU
self.hwnd = win32gui.CreateWindow( classAtom, self.className, style,
0, 0, win32con.CW_USEDEFAULT, win32con.CW_USEDEFAULT,
0, 0, hinst, None)
if hicon == None:
hicon = win32gui.LoadIcon(0, win32con.IDI_APPLICATION)
self.hicon = hicon
nid = (self.hwnd, 0, win32gui.NIF_ICON, win32con.WM_USER+20, hicon, self.className)
win32gui.Shell_NotifyIcon(win32gui.NIM_ADD, nid)
def showMsg(self, title, msg):
# 原作者使用Shell_NotifyIconA方法代替包装后的Shell_NotifyIcon方法
# 据称是不能win32gui structure, 我稀里糊涂搞出来了.
# 具体对比原代码.
nid = (self.hwnd, # 句柄
0, # 托盘图标ID
win32gui.NIF_INFO, # 标识
0, # 回调消息ID
self.hicon, # 托盘图标句柄
"TestMessage", # 图标字符串
msg, # 气球提示字符串
0, # 提示的显示时间
title, # 提示标题
# win32gui.NIIF_INFO # 提示用到的图标
)
win32gui.Shell_NotifyIcon(win32gui.NIM_MODIFY, nid)
def WndProc(self, hwnd, msg, wParam, lParam):
# 经过试验得到这2个参数,分别代表点击了消息窗口(然后窗口关闭了)和窗口自动关闭。
# 在以上情况下关闭窗口
if lParam == win32con.WM_PSD_ENVSTAMPRECT:
self.LC(self)
if lParam == win32con.WM_PSD_GREEKTEXTRECT:
self.AC(self)
if msg == win32con.WM_DESTROY:
nid = (self.hwnd, 0)
win32gui.Shell_NotifyIcon(win32gui.NIM_DELETE, nid)
win32gui.PostQuitMessage(0) # Terminate the app.
return win32gui.DefWindowProc(hwnd, msg, wParam, lParam)
def _nothing(*args, **kw):
pass
def send(title, msg, hicon=None, LC=_nothing, AC=_nothing):
'''最后2个分别是回调: 左键点击消息框或图标(框会关闭),消息自动消失。回调参数为TrayMsg对象。'''
def thread():
t = TrayMsg(hicon, LC, AC)
className = t.className
t.showMsg(title, msg)
def thread2():
try:
time.sleep(10)
win32gui.PostMessage(t.hwnd, win32con.WM_CLOSE, 0, 0)
time.sleep(0.5)
except Exception as e:
pass
ReStartableThread(target=thread2).start()
win32gui.PumpMessages()
try:
win32gui.UnregisterClass(className, 0)
except Exception as e:
pass
ReStartableThread(target=thread).start()
|
task_manager.py | from multiprocessing import Process, Queue, current_process
from enum import Enum
from tqdm import tqdm
from collections import defaultdict
import json
import tweepy
import os
import queue
import pickle
sid = -1
tfp = ""
class TaskType(Enum):
"""
This defines the types of tasks that the TaskManager can perform.
"""
tweet_details = 0
retweets = 1
followers = 2
twohup_followers = 3
followees = 4
timeline = 5
rt_timeline = 6
user_relation = 7
class TaskManager:
"""
The TaskManager allows scheduling of different type of Twitter data
tasks in a queue, which are executed in parallel by multiple processes.
Instance Variables:
- The tasks_pending queue stores all the pending tasks in a FIFO queue.
- The tasks_pending_dict is a dictionary that stores the set of tasks
pending corresponding to each task type.
- The folder paths corresponding to where the different types of
information will be stored are also defined.
"""
def __init__(self, base_folder_path, twitter_folder_path, **args):
self.base_folder_path = base_folder_path
self.twitter_folder_path = twitter_folder_path
global tfp
tfp = twitter_folder_path
self.timeline_folder_path = twitter_folder_path + 'timelines/'
if not os.path.exists(self.timeline_folder_path):
os.makedirs(self.timeline_folder_path)
self.rt_timeline_folder_path = twitter_folder_path + 'rt_timelines/'
if not os.path.exists(self.rt_timeline_folder_path):
os.makedirs(self.rt_timeline_folder_path)
self.follower_folder_path = twitter_folder_path + 'followers/'
if not os.path.exists(self.follower_folder_path):
os.makedirs(self.follower_folder_path)
self.followee_folder_path = twitter_folder_path + 'followees/'
if not os.path.exists(self.followee_folder_path):
os.makedirs(self.followee_folder_path)
self.tweet_details_folder_path = twitter_folder_path + 'tweet_details/'
if not os.path.exists(self.tweet_details_folder_path):
os.makedirs(self.tweet_details_folder_path)
self.retweets_folder_path = twitter_folder_path + 'retweets/'
if not os.path.exists(self.retweets_folder_path):
os.makedirs(self.retweets_folder_path)
self.user_relation_folder_path = twitter_folder_path + 'user_relation/'
if not os.path.exists(self.user_relation_folder_path):
os.makedirs(self.user_relation_folder_path)
self.tasks_pending = Queue()
self.tasks_pending_dict = defaultdict(set)
def do_task(self, api):
"""
Queries the tasks_pending queue to fetch a new task if available,
and executes the tasks based on the TaskType.
"""
while True:
try:
object_id, task_type = self.tasks_pending.get_nowait()
except queue.Empty:
break
else:
try:
if task_type == TaskType.tweet_details:
self._get_tweet_details(object_id, api)
elif task_type == TaskType.retweets:
self._get_retweets(object_id, api)
elif task_type == TaskType.followers:
self._get_followers(object_id, api)
elif task_type == TaskType.followees:
self._get_followees(object_id, api)
elif task_type == TaskType.timeline:
self._get_timelines(object_id, api)
elif task_type == TaskType.rt_timeline:
self._get_rt_timelines(object_id, api)
elif task_type == TaskType.user_relation:
self._get_user_relation(object_id, api)
except Exception as e:
print("\nError: Unable to complete " + str(task_type) +
" for id: " + str(object_id) + " - " + str(e) + '\n')
continue
finally:
print("\nProcessed: " + str(task_type) + " for id " +
str(object_id) + " is processed by " +
current_process().name + ".\nTasks left: " +
str(self.tasks_pending.qsize()) + '\n')
if object_id in self.tasks_pending_dict[task_type]:
self.tasks_pending_dict[task_type].remove(object_id)
return True
def run_tasks(self, apis):
"""
Create processes for parallel execution - each process will use
one API key to accomplish one task at a time.
"""
processes = []
for idx in range(len(apis)):
current_api = apis[idx]
# api_object = dill.dumps(current_api)
p = Process(target=self.do_task, args=(current_api,))
processes.append(p)
p.start()
# Avoiding deadlock
for idx, p in enumerate(processes):
while True:
running = p.is_alive()
if not self.tasks_pending.empty():
self.do_task(apis[idx])
else:
if not running:
break
# Completing processes
print("Waiting for processes to finish...")
for p in processes:
p.join()
def _get_user_relation(self, tweet_id, api):
source_id = tweet_id[0][0]
source_name = tweet_id[0][1]
target_id = tweet_id[1][0]
target_name = tweet_id[1][1]
print("Getting user_relation between {} and {}".format(source_id,target_id))
relation_details = api.show_friendship(source_id,source_name,target_id,target_name)
print(relation_details)
print("Writing the details to file...")
with open(self.user_relation_folder_path + '/' + str(source_id) +"_"+ str(target_id)+'.pkl', 'wb') as fw:
fw.write(pickle.dumps(relation_details))
return relation_details
def _get_tweet_details(self, tweet_id, api):
print("Getting tweet details of tweet {}".format(tweet_id))
tweet_details = api.get_status(tweet_id)
print("Writing the details of {} to file...".format(tweet_id))
with open(self.tweet_details_folder_path + '/' + str(tweet_id) +
'.json', 'w') as fw:
json.dump(tweet_details._json, fw)
return tweet_details
def _get_retweets(self, tweet_id, api):
print("Getting retweets of tweet {}".format(tweet_id))
retweets = api.retweets(tweet_id, 200)
print("Writing the {0} retweets of {1} to file".format(
len(retweets), tweet_id))
retweets_arr = []
for retweet in retweets:
retweets_arr.append(json.dumps(retweet._json))
with open(self.retweets_folder_path + str(tweet_id) +
'.json', 'w') as fw:
json.dump(retweets_arr, fw)
def _get_followers(self, user_id, api):
user_obj = api.get_user(user_id)
if self.add_user_to_ignore_list(user_obj):
return
user_id = user_obj.id_str
all_followers = self.get_all_followers(user_id)
followers_added = []
followers_subtracted = []
followers_current = set()
print("Getting followers of user {}".format(user_id))
try:
for follower in tqdm(tweepy.Cursor(
api.followers_ids, id=user_id).items(), unit="followers"):
followers_current.add(follower)
except Exception as e:
print("Error while fetching user followers: " + str(e))
else:
followers_added = [item for item in followers_current
if item not in all_followers]
followers_subtracted = [item for item in all_followers
if item not in followers_current]
followers = {'followers_added': followers_added,
'followers_subtracted': followers_subtracted}
print("Writing followers for user {}. Added: {}, Subtracted: {}"
.format(user_id, len(followers_added),
len(followers_subtracted)))
with open(self.follower_folder_path + str(user_id) +
'.json', 'w') as fw:
json.dump(followers, fw)
def _get_followees(self, user_id, api):
user_obj = api.get_user(user_id)
if self.add_user_to_ignore_list(user_obj):
return
user_id = user_obj.id_str
all_followees = self.get_all_followees(user_id)
followees_added = []
followees_subtracted = []
followees_current = set()
print("Getting followees of user {}".format(user_id))
try:
for followee in tqdm(tweepy.Cursor(
api.friends_ids, id=user_id).items(), unit="followees"):
followees_current.add(followee)
except Exception as e:
print("Error while fetching user followees: " + str(e))
else:
followees_added = [item for item in followees_current
if item not in all_followees]
followees_subtracted = [item for item in all_followees
if item not in followees_current]
followees = {'followees_added': followees_added,
'followees_subtracted': followees_subtracted}
print("Writing followees for user {}. Added: {}, Subtracted: {}"
.format(user_id, len(followees_added),
len(followees_subtracted)))
with open(self.followee_folder_path + str(user_id) +
'.json', 'w') as fw:
json.dump(followees, fw)
def _get_timelines(self, user_id, api):
user_obj = api.get_user(user_id)
if self.add_user_to_ignore_list(user_obj):
return
user_id = user_obj.id_str
last_tweet_id = self.get_last_tweet_id(user_id)
tweets_arr = []
print("Fetching timelines for user {}".format(user_id))
try:
if last_tweet_id != -1:
for tweet in tqdm(tweepy.Cursor(
api.user_timeline, id=user_id,
since_id=int(last_tweet_id))
.items(), unit="tweets"):
tweets_arr.append(json.dumps(tweet._json))
else:
for tweet in tweepy.Cursor(
api.user_timeline, id=user_id).items():
tweets_arr.append(json.dumps(tweet._json))
except Exception as e:
print("Error while fetching user timeline: " + str(e))
else:
print("Writing {} tweets of user {}"
.format(len(tweets_arr), user_id))
if (len(tweets_arr) != 0):
with open(self.timeline_folder_path + str(user_id) +
'.json', 'w') as fw:
json.dump(tweets_arr, fw)
def _get_rt_timelines(self, user_id, api):
rt_source_folder = tfp + 'rt_timelines/' + str(sid) + '/'
if not os.path.exists(rt_source_folder):
os.makedirs(rt_source_folder)
user_obj = api.get_user(user_id)
if self.add_user_to_ignore_list(user_obj):
return
user_id = user_obj.id_str
last_tweet_id = self.get_last_tweet_id(user_id)
tweets_arr = []
print("Fetching timelines for user (retweeters) {}".format(user_id))
try:
if last_tweet_id != -1:
for tweet in tqdm(tweepy.Cursor(
api.user_timeline, id=user_id,
since_id=int(last_tweet_id))
.items(), unit="tweets"):
tweets_arr.append(json.dumps(tweet._json))
else:
for tweet in tweepy.Cursor(
api.user_timeline, id=user_id).items():
tweets_arr.append(json.dumps(tweet._json))
except Exception as e:
print("Error while fetching user (retweeters) timeline: " + str(e))
else:
print("Writing {} tweets of user (retweeters) {}"
.format(len(tweets_arr), user_id))
if (len(tweets_arr) != 0):
with open(self.rt_timeline_folder_path + str(sid) + '/' + str(user_id) +
'.json', 'w') as fw:
json.dump(tweets_arr, fw)
def get_tweet_details(self, tweet_ids):
for tweet_id in tweet_ids:
if not os.path.exists(self.tweet_details_folder_path +
str(tweet_id) + '.json') \
and tweet_id not in \
self.tasks_pending_dict[TaskType.tweet_details]:
self.tasks_pending_dict[TaskType.tweet_details].add(tweet_id)
self.tasks_pending.put((tweet_id, TaskType.tweet_details))
def get_retweets(self, tweet_ids):
for tweet_id in tweet_ids:
if not os.path.exists(self.retweets_folder_path +
str(tweet_id) + ".json") \
and tweet_id not in \
self.tasks_pending_dict[TaskType.retweets]:
self.tasks_pending_dict[TaskType.retweets].add(tweet_id)
self.tasks_pending.put((tweet_id, TaskType.retweets))
def get_followers(self, user_ids):
for user_id in user_ids:
if not os.path.exists(self.follower_folder_path +
str(user_id) + ".json") \
and user_id not in \
self.tasks_pending_dict[TaskType.followers]:
self.tasks_pending_dict[TaskType.followers].add(user_id)
self.tasks_pending.put((user_id, TaskType.followers))
def get_followees(self, user_ids):
for user_id in user_ids:
if not os.path.exists(self.followee_folder_path +
str(user_id) + ".json") \
and user_id not in \
self.tasks_pending_dict[TaskType.followees]:
self.tasks_pending_dict[TaskType.followees].add(user_id)
self.tasks_pending.put((user_id, TaskType.followees))
def get_timelines(self, user_ids):
for user_id in user_ids:
if not os.path.exists(self.timeline_folder_path +
str(user_id) + ".json") \
and user_id not in \
self.tasks_pending_dict[TaskType.timeline]:
self.tasks_pending_dict[TaskType.timeline].add(user_id)
self.tasks_pending.put((user_id, TaskType.timeline))
def get_rt_timelines(self, user_ids,source_id):
global sid
sid = source_id
for user_id in user_ids:
if not os.path.exists(self.rt_timeline_folder_path +
str(source_id) + "/" + str(user_id) + ".json") \
and user_id not in \
self.tasks_pending_dict[TaskType.rt_timeline]:
self.tasks_pending_dict[TaskType.rt_timeline].add(user_id)
self.tasks_pending.put((user_id, TaskType.rt_timeline))
def get_user_relation(self, ids_combination):
for id_pair in ids_combination:
# combined_ids = id_pair[0]+id_pair[1]
if not os.path.exists(self.tweet_details_folder_path +
str(id_pair[0]) +"_"+str(id_pair[1])+'.json') \
and id_pair not in \
self.tasks_pending_dict[TaskType.user_relation]:
self.tasks_pending_dict[TaskType.user_relation].add(id_pair)
self.tasks_pending.put((id_pair, TaskType.user_relation))
def get_all_followers(self, user_id):
"""
Parses through the already fetched followers at previous timesteps
to find the complete list of followers of a user.
"""
all_followers = set()
time_folder_list = sorted(os.listdir(self.base_folder_path),
reverse=True)
for time_folder in time_folder_list:
if '.txt' in time_folder:
continue
followers_folder = self.base_folder_path + time_folder + \
'/twitter/followers/'
followers_file = followers_folder + str(user_id) + '.json'
if os.path.exists(followers_file):
print("Existing file found for user" + str(user_id) +
"in folder " + str(time_folder))
with open(followers_file) as f:
data = json.load(f)
all_followers.update(data['followers_added'])
for item in data['followers_subtracted']:
if item in all_followers:
all_followers.remove(item)
return all_followers
def get_all_followees(self, user_id):
"""
Parses through the already fetched followees at previous timesteps
to find the complete list of followees of a user.
"""
all_followees = set()
time_folder_list = sorted(os.listdir(self.base_folder_path),
reverse=True)
for time_folder in time_folder_list:
if '.txt' in time_folder:
continue
followees_folder = self.base_folder_path + time_folder + \
'/twitter/followees/'
followees_file = followees_folder + str(user_id) + '.json'
if os.path.exists(followees_file):
print("Existing file found for user" + str(user_id) +
"in folder " + str(time_folder))
with open(followees_file) as f:
data = json.load(f)
all_followees.update(data['followees_added'])
for item in data['followees_subtracted']:
if item in all_followees:
all_followees.remove(item)
return all_followees
def get_last_tweet_id(self, user_id):
"""
Parses through the already fetched timelines at previous timesteps
to find the last tweet_id fetched from a user's timeline.
"""
time_folder_list = sorted(os.listdir(self.base_folder_path),
reverse=True)
for time_folder in time_folder_list:
if '.txt' in time_folder:
continue
timelines_folder = self.base_folder_path + time_folder + \
'/twitter/timelines/'
timelines_file = timelines_folder + str(user_id) + '.json'
if os.path.exists(timelines_file):
print("Timeline file found for user" + str(user_id) +
"in folder " + str(time_folder))
with open(timelines_file) as f:
data = json.load(f)
last_tweet = json.loads(data[0])
last_tweet_id = int(last_tweet["id"])
return last_tweet_id
return -1
def add_user_to_ignore_list(self, user_obj):
"""
An user-level ignore list is maintined so that celebrity like users
are not processed, thereby avoiding exceedance of Twitter API rate
limits quickly.
"""
if user_obj.followers_count > 20000 or user_obj.friends_count > 20000:
print("IgnoreList: The user has more than 20000 " +
"followers/followees, ignoring.")
with open(self.base_folder_path +
'user_ignore_list.txt', 'a+') as fw:
fw.write(str(user_obj.id_str) + '\n')
return True
return False
|
test_functools.py | import abc
import builtins
import collections
import collections.abc
import copy
from itertools import permutations
import pickle
from random import choice
import sys
from test import support
import threading
import time
import typing
import unittest
import unittest.mock
from weakref import proxy
import contextlib
import functools
py_functools = support.import_fresh_module('functools', blocked=['_functools'])
c_functools = support.import_fresh_module('functools', fresh=['_functools'])
decimal = support.import_fresh_module('decimal', fresh=['_decimal'])
@contextlib.contextmanager
def replaced_module(name, replacement):
original_module = sys.modules[name]
sys.modules[name] = replacement
try:
yield
finally:
sys.modules[name] = original_module
def capture(*args, **kw):
"""capture all positional and keyword arguments"""
return args, kw
def signature(part):
""" return the signature of a partial object """
return (part.func, part.args, part.keywords, part.__dict__)
class MyTuple(tuple):
pass
class BadTuple(tuple):
def __add__(self, other):
return list(self) + list(other)
class MyDict(dict):
pass
class TestPartial:
def test_basic_examples(self):
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertTrue(callable(p))
self.assertEqual(p(3, 4, b=30, c=40),
((1, 2, 3, 4), dict(a=10, b=30, c=40)))
p = self.partial(map, lambda x: x*10)
self.assertEqual(list(p([1,2,3,4])), [10, 20, 30, 40])
def test_attributes(self):
p = self.partial(capture, 1, 2, a=10, b=20)
# attributes should be readable
self.assertEqual(p.func, capture)
self.assertEqual(p.args, (1, 2))
self.assertEqual(p.keywords, dict(a=10, b=20))
def test_argument_checking(self):
self.assertRaises(TypeError, self.partial) # need at least a func arg
try:
self.partial(2)()
except TypeError:
pass
else:
self.fail('First arg not checked for callability')
def test_protection_of_callers_dict_argument(self):
# a caller's dictionary should not be altered by partial
def func(a=10, b=20):
return a
d = {'a':3}
p = self.partial(func, a=5)
self.assertEqual(p(**d), 3)
self.assertEqual(d, {'a':3})
p(b=7)
self.assertEqual(d, {'a':3})
def test_kwargs_copy(self):
# Issue #29532: Altering a kwarg dictionary passed to a constructor
# should not affect a partial object after creation
d = {'a': 3}
p = self.partial(capture, **d)
self.assertEqual(p(), ((), {'a': 3}))
d['a'] = 5
self.assertEqual(p(), ((), {'a': 3}))
def test_arg_combinations(self):
# exercise special code paths for zero args in either partial
# object or the caller
p = self.partial(capture)
self.assertEqual(p(), ((), {}))
self.assertEqual(p(1,2), ((1,2), {}))
p = self.partial(capture, 1, 2)
self.assertEqual(p(), ((1,2), {}))
self.assertEqual(p(3,4), ((1,2,3,4), {}))
def test_kw_combinations(self):
# exercise special code paths for no keyword args in
# either the partial object or the caller
p = self.partial(capture)
self.assertEqual(p.keywords, {})
self.assertEqual(p(), ((), {}))
self.assertEqual(p(a=1), ((), {'a':1}))
p = self.partial(capture, a=1)
self.assertEqual(p.keywords, {'a':1})
self.assertEqual(p(), ((), {'a':1}))
self.assertEqual(p(b=2), ((), {'a':1, 'b':2}))
# keyword args in the call override those in the partial object
self.assertEqual(p(a=3, b=2), ((), {'a':3, 'b':2}))
def test_positional(self):
# make sure positional arguments are captured correctly
for args in [(), (0,), (0,1), (0,1,2), (0,1,2,3)]:
p = self.partial(capture, *args)
expected = args + ('x',)
got, empty = p('x')
self.assertTrue(expected == got and empty == {})
def test_keyword(self):
# make sure keyword arguments are captured correctly
for a in ['a', 0, None, 3.5]:
p = self.partial(capture, a=a)
expected = {'a':a,'x':None}
empty, got = p(x=None)
self.assertTrue(expected == got and empty == ())
def test_no_side_effects(self):
# make sure there are no side effects that affect subsequent calls
p = self.partial(capture, 0, a=1)
args1, kw1 = p(1, b=2)
self.assertTrue(args1 == (0,1) and kw1 == {'a':1,'b':2})
args2, kw2 = p()
self.assertTrue(args2 == (0,) and kw2 == {'a':1})
def test_error_propagation(self):
def f(x, y):
x / y
self.assertRaises(ZeroDivisionError, self.partial(f, 1, 0))
self.assertRaises(ZeroDivisionError, self.partial(f, 1), 0)
self.assertRaises(ZeroDivisionError, self.partial(f), 1, 0)
self.assertRaises(ZeroDivisionError, self.partial(f, y=0), 1)
def test_weakref(self):
f = self.partial(int, base=16)
p = proxy(f)
self.assertEqual(f.func, p.func)
f = None
self.assertRaises(ReferenceError, getattr, p, 'func')
def test_with_bound_and_unbound_methods(self):
data = list(map(str, range(10)))
join = self.partial(str.join, '')
self.assertEqual(join(data), '0123456789')
join = self.partial(''.join)
self.assertEqual(join(data), '0123456789')
def test_nested_optimization(self):
partial = self.partial
inner = partial(signature, 'asdf')
nested = partial(inner, bar=True)
flat = partial(signature, 'asdf', bar=True)
self.assertEqual(signature(nested), signature(flat))
def test_nested_partial_with_attribute(self):
# see issue 25137
partial = self.partial
def foo(bar):
return bar
p = partial(foo, 'first')
p2 = partial(p, 'second')
p2.new_attr = 'spam'
self.assertEqual(p2.new_attr, 'spam')
def test_repr(self):
args = (object(), object())
args_repr = ', '.join(repr(a) for a in args)
kwargs = {'a': object(), 'b': object()}
kwargs_reprs = ['a={a!r}, b={b!r}'.format_map(kwargs),
'b={b!r}, a={a!r}'.format_map(kwargs)]
if self.partial in (c_functools.partial, py_functools.partial):
name = 'functools.partial'
else:
name = self.partial.__name__
f = self.partial(capture)
self.assertEqual(f'{name}({capture!r})', repr(f))
f = self.partial(capture, *args)
self.assertEqual(f'{name}({capture!r}, {args_repr})', repr(f))
f = self.partial(capture, **kwargs)
self.assertIn(repr(f),
[f'{name}({capture!r}, {kwargs_repr})'
for kwargs_repr in kwargs_reprs])
f = self.partial(capture, *args, **kwargs)
self.assertIn(repr(f),
[f'{name}({capture!r}, {args_repr}, {kwargs_repr})'
for kwargs_repr in kwargs_reprs])
def test_recursive_repr(self):
if self.partial in (c_functools.partial, py_functools.partial):
name = 'functools.partial'
else:
name = self.partial.__name__
f = self.partial(capture)
f.__setstate__((f, (), {}, {}))
try:
self.assertEqual(repr(f), '%s(...)' % (name,))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (f,), {}, {}))
try:
self.assertEqual(repr(f), '%s(%r, ...)' % (name, capture,))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (), {'a': f}, {}))
try:
self.assertEqual(repr(f), '%s(%r, a=...)' % (name, capture,))
finally:
f.__setstate__((capture, (), {}, {}))
def test_pickle(self):
with self.AllowPickle():
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertEqual(signature(f_copy), signature(f))
def test_copy(self):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.copy(f)
self.assertEqual(signature(f_copy), signature(f))
self.assertIs(f_copy.attr, f.attr)
self.assertIs(f_copy.args, f.args)
self.assertIs(f_copy.keywords, f.keywords)
def test_deepcopy(self):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.deepcopy(f)
self.assertEqual(signature(f_copy), signature(f))
self.assertIsNot(f_copy.attr, f.attr)
self.assertIsNot(f_copy.args, f.args)
self.assertIsNot(f_copy.args[0], f.args[0])
self.assertIsNot(f_copy.keywords, f.keywords)
self.assertIsNot(f_copy.keywords['bar'], f.keywords['bar'])
def test_setstate(self):
f = self.partial(signature)
f.__setstate__((capture, (1,), dict(a=10), dict(attr=[])))
self.assertEqual(signature(f),
(capture, (1,), dict(a=10), dict(attr=[])))
self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
f.__setstate__((capture, (1,), dict(a=10), None))
self.assertEqual(signature(f), (capture, (1,), dict(a=10), {}))
self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
f.__setstate__((capture, (1,), None, None))
#self.assertEqual(signature(f), (capture, (1,), {}, {}))
self.assertEqual(f(2, b=20), ((1, 2), {'b': 20}))
self.assertEqual(f(2), ((1, 2), {}))
self.assertEqual(f(), ((1,), {}))
f.__setstate__((capture, (), {}, None))
self.assertEqual(signature(f), (capture, (), {}, {}))
self.assertEqual(f(2, b=20), ((2,), {'b': 20}))
self.assertEqual(f(2), ((2,), {}))
self.assertEqual(f(), ((), {}))
def test_setstate_errors(self):
f = self.partial(signature)
self.assertRaises(TypeError, f.__setstate__, (capture, (), {}))
self.assertRaises(TypeError, f.__setstate__, (capture, (), {}, {}, None))
self.assertRaises(TypeError, f.__setstate__, [capture, (), {}, None])
self.assertRaises(TypeError, f.__setstate__, (None, (), {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, None, {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, [], {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, (), [], None))
def test_setstate_subclasses(self):
f = self.partial(signature)
f.__setstate__((capture, MyTuple((1,)), MyDict(a=10), None))
s = signature(f)
self.assertEqual(s, (capture, (1,), dict(a=10), {}))
self.assertIs(type(s[1]), tuple)
self.assertIs(type(s[2]), dict)
r = f()
self.assertEqual(r, ((1,), {'a': 10}))
self.assertIs(type(r[0]), tuple)
self.assertIs(type(r[1]), dict)
f.__setstate__((capture, BadTuple((1,)), {}, None))
s = signature(f)
self.assertEqual(s, (capture, (1,), {}, {}))
self.assertIs(type(s[1]), tuple)
r = f(2)
self.assertEqual(r, ((1, 2), {}))
self.assertIs(type(r[0]), tuple)
def test_recursive_pickle(self):
with self.AllowPickle():
f = self.partial(capture)
f.__setstate__((f, (), {}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(RecursionError):
pickle.dumps(f, proto)
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (f,), {}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
try:
self.assertIs(f_copy.args[0], f_copy)
finally:
f_copy.__setstate__((capture, (), {}, {}))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (), {'a': f}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
try:
self.assertIs(f_copy.keywords['a'], f_copy)
finally:
f_copy.__setstate__((capture, (), {}, {}))
finally:
f.__setstate__((capture, (), {}, {}))
# Issue 6083: Reference counting bug
def test_setstate_refcount(self):
class BadSequence:
def __len__(self):
return 4
def __getitem__(self, key):
if key == 0:
return max
elif key == 1:
return tuple(range(1000000))
elif key in (2, 3):
return {}
raise IndexError
f = self.partial(object)
self.assertRaises(TypeError, f.__setstate__, BadSequence())
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialC(TestPartial, unittest.TestCase):
if c_functools:
partial = c_functools.partial
class AllowPickle:
def __enter__(self):
return self
def __exit__(self, type, value, tb):
return False
def test_attributes_unwritable(self):
# attributes should not be writable
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertRaises(AttributeError, setattr, p, 'func', map)
self.assertRaises(AttributeError, setattr, p, 'args', (1, 2))
self.assertRaises(AttributeError, setattr, p, 'keywords', dict(a=1, b=2))
p = self.partial(hex)
try:
del p.__dict__
except TypeError:
pass
else:
self.fail('partial object allowed __dict__ to be deleted')
def test_manually_adding_non_string_keyword(self):
p = self.partial(capture)
# Adding a non-string/unicode keyword to partial kwargs
p.keywords[1234] = 'value'
r = repr(p)
self.assertIn('1234', r)
self.assertIn("'value'", r)
with self.assertRaises(TypeError):
p()
def test_keystr_replaces_value(self):
p = self.partial(capture)
class MutatesYourDict(object):
def __str__(self):
p.keywords[self] = ['sth2']
return 'astr'
# Replacing the value during key formatting should keep the original
# value alive (at least long enough).
p.keywords[MutatesYourDict()] = ['sth']
r = repr(p)
self.assertIn('astr', r)
self.assertIn("['sth']", r)
class TestPartialPy(TestPartial, unittest.TestCase):
partial = py_functools.partial
class AllowPickle:
def __init__(self):
self._cm = replaced_module("functools", py_functools)
def __enter__(self):
return self._cm.__enter__()
def __exit__(self, type, value, tb):
return self._cm.__exit__(type, value, tb)
if c_functools:
class CPartialSubclass(c_functools.partial):
pass
class PyPartialSubclass(py_functools.partial):
pass
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialCSubclass(TestPartialC):
if c_functools:
partial = CPartialSubclass
# partial subclasses are not optimized for nested calls
test_nested_optimization = None
class TestPartialPySubclass(TestPartialPy):
partial = PyPartialSubclass
class TestPartialMethod(unittest.TestCase):
class A(object):
nothing = functools.partialmethod(capture)
positional = functools.partialmethod(capture, 1)
keywords = functools.partialmethod(capture, a=2)
both = functools.partialmethod(capture, 3, b=4)
nested = functools.partialmethod(positional, 5)
over_partial = functools.partialmethod(functools.partial(capture, c=6), 7)
static = functools.partialmethod(staticmethod(capture), 8)
cls = functools.partialmethod(classmethod(capture), d=9)
a = A()
def test_arg_combinations(self):
self.assertEqual(self.a.nothing(), ((self.a,), {}))
self.assertEqual(self.a.nothing(5), ((self.a, 5), {}))
self.assertEqual(self.a.nothing(c=6), ((self.a,), {'c': 6}))
self.assertEqual(self.a.nothing(5, c=6), ((self.a, 5), {'c': 6}))
self.assertEqual(self.a.positional(), ((self.a, 1), {}))
self.assertEqual(self.a.positional(5), ((self.a, 1, 5), {}))
self.assertEqual(self.a.positional(c=6), ((self.a, 1), {'c': 6}))
self.assertEqual(self.a.positional(5, c=6), ((self.a, 1, 5), {'c': 6}))
self.assertEqual(self.a.keywords(), ((self.a,), {'a': 2}))
self.assertEqual(self.a.keywords(5), ((self.a, 5), {'a': 2}))
self.assertEqual(self.a.keywords(c=6), ((self.a,), {'a': 2, 'c': 6}))
self.assertEqual(self.a.keywords(5, c=6), ((self.a, 5), {'a': 2, 'c': 6}))
self.assertEqual(self.a.both(), ((self.a, 3), {'b': 4}))
self.assertEqual(self.a.both(5), ((self.a, 3, 5), {'b': 4}))
self.assertEqual(self.a.both(c=6), ((self.a, 3), {'b': 4, 'c': 6}))
self.assertEqual(self.a.both(5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
self.assertEqual(self.A.both(self.a, 5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
def test_nested(self):
self.assertEqual(self.a.nested(), ((self.a, 1, 5), {}))
self.assertEqual(self.a.nested(6), ((self.a, 1, 5, 6), {}))
self.assertEqual(self.a.nested(d=7), ((self.a, 1, 5), {'d': 7}))
self.assertEqual(self.a.nested(6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
self.assertEqual(self.A.nested(self.a, 6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
def test_over_partial(self):
self.assertEqual(self.a.over_partial(), ((self.a, 7), {'c': 6}))
self.assertEqual(self.a.over_partial(5), ((self.a, 7, 5), {'c': 6}))
self.assertEqual(self.a.over_partial(d=8), ((self.a, 7), {'c': 6, 'd': 8}))
self.assertEqual(self.a.over_partial(5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
self.assertEqual(self.A.over_partial(self.a, 5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
def test_bound_method_introspection(self):
obj = self.a
self.assertIs(obj.both.__self__, obj)
self.assertIs(obj.nested.__self__, obj)
self.assertIs(obj.over_partial.__self__, obj)
self.assertIs(obj.cls.__self__, self.A)
self.assertIs(self.A.cls.__self__, self.A)
def test_unbound_method_retrieval(self):
obj = self.A
self.assertFalse(hasattr(obj.both, "__self__"))
self.assertFalse(hasattr(obj.nested, "__self__"))
self.assertFalse(hasattr(obj.over_partial, "__self__"))
self.assertFalse(hasattr(obj.static, "__self__"))
self.assertFalse(hasattr(self.a.static, "__self__"))
def test_descriptors(self):
for obj in [self.A, self.a]:
with self.subTest(obj=obj):
self.assertEqual(obj.static(), ((8,), {}))
self.assertEqual(obj.static(5), ((8, 5), {}))
self.assertEqual(obj.static(d=8), ((8,), {'d': 8}))
self.assertEqual(obj.static(5, d=8), ((8, 5), {'d': 8}))
self.assertEqual(obj.cls(), ((self.A,), {'d': 9}))
self.assertEqual(obj.cls(5), ((self.A, 5), {'d': 9}))
self.assertEqual(obj.cls(c=8), ((self.A,), {'c': 8, 'd': 9}))
self.assertEqual(obj.cls(5, c=8), ((self.A, 5), {'c': 8, 'd': 9}))
def test_overriding_keywords(self):
self.assertEqual(self.a.keywords(a=3), ((self.a,), {'a': 3}))
self.assertEqual(self.A.keywords(self.a, a=3), ((self.a,), {'a': 3}))
def test_invalid_args(self):
with self.assertRaises(TypeError):
class B(object):
method = functools.partialmethod(None, 1)
def test_repr(self):
self.assertEqual(repr(vars(self.A)['both']),
'functools.partialmethod({}, 3, b=4)'.format(capture))
def test_abstract(self):
class Abstract(abc.ABCMeta):
@abc.abstractmethod
def add(self, x, y):
pass
add5 = functools.partialmethod(add, 5)
self.assertTrue(Abstract.add.__isabstractmethod__)
self.assertTrue(Abstract.add5.__isabstractmethod__)
for func in [self.A.static, self.A.cls, self.A.over_partial, self.A.nested, self.A.both]:
self.assertFalse(getattr(func, '__isabstractmethod__', False))
class TestUpdateWrapper(unittest.TestCase):
def check_wrapper(self, wrapper, wrapped,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
# Check attributes were assigned
for name in assigned:
self.assertIs(getattr(wrapper, name), getattr(wrapped, name))
# Check attributes were updated
for name in updated:
wrapper_attr = getattr(wrapper, name)
wrapped_attr = getattr(wrapped, name)
for key in wrapped_attr:
if name == "__dict__" and key == "__wrapped__":
# __wrapped__ is overwritten by the update code
continue
self.assertIs(wrapped_attr[key], wrapper_attr[key])
# Check __wrapped__
self.assertIs(wrapper.__wrapped__, wrapped)
def _default_update(self):
def f(a:'This is a new annotation'):
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is a bald faced lie"
def wrapper(b:'This is the prior annotation'):
pass
functools.update_wrapper(wrapper, f)
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertIs(wrapper.__wrapped__, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
self.assertEqual(wrapper.__annotations__['a'], 'This is a new annotation')
self.assertNotIn('b', wrapper.__annotations__)
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, f = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
def wrapper():
pass
functools.update_wrapper(wrapper, f, (), ())
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.__annotations__, {})
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
functools.update_wrapper(wrapper, f, assign, update)
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
def test_missing_attributes(self):
def f():
pass
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
# Missing attributes on wrapped object are ignored
functools.update_wrapper(wrapper, f, assign, update)
self.assertNotIn('attr', wrapper.__dict__)
self.assertEqual(wrapper.dict_attr, {})
# Wrapper must have expected attributes for updating
del wrapper.dict_attr
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
wrapper.dict_attr = 1
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
@support.requires_docstrings
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_builtin_update(self):
# Test for bug #1576241
def wrapper():
pass
functools.update_wrapper(wrapper, max)
self.assertEqual(wrapper.__name__, 'max')
self.assertTrue(wrapper.__doc__.startswith('max('))
self.assertEqual(wrapper.__annotations__, {})
class TestWraps(TestUpdateWrapper):
def _default_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is still a bald faced lie"
@functools.wraps(f)
def wrapper():
pass
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, _ = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
@functools.wraps(f, (), ())
def wrapper():
pass
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def add_dict_attr(f):
f.dict_attr = {}
return f
assign = ('attr',)
update = ('dict_attr',)
@functools.wraps(f, assign, update)
@add_dict_attr
def wrapper():
pass
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestReduce(unittest.TestCase):
if c_functools:
func = c_functools.reduce
def test_reduce(self):
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self):
return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n += 1
return self.sofar[i]
def add(x, y):
return x + y
self.assertEqual(self.func(add, ['a', 'b', 'c'], ''), 'abc')
self.assertEqual(
self.func(add, [['a', 'c'], [], ['d', 'w']], []),
['a','c','d','w']
)
self.assertEqual(self.func(lambda x, y: x*y, range(2,8), 1), 5040)
self.assertEqual(
self.func(lambda x, y: x*y, range(2,21), 1),
2432902008176640000
)
self.assertEqual(self.func(add, Squares(10)), 285)
self.assertEqual(self.func(add, Squares(10), 0), 285)
self.assertEqual(self.func(add, Squares(0), 0), 0)
self.assertRaises(TypeError, self.func)
self.assertRaises(TypeError, self.func, 42, 42)
self.assertRaises(TypeError, self.func, 42, 42, 42)
self.assertEqual(self.func(42, "1"), "1") # func is never called with one item
self.assertEqual(self.func(42, "", "1"), "1") # func is never called with one item
self.assertRaises(TypeError, self.func, 42, (42, 42))
self.assertRaises(TypeError, self.func, add, []) # arg 2 must not be empty sequence with no initial value
self.assertRaises(TypeError, self.func, add, "")
self.assertRaises(TypeError, self.func, add, ())
self.assertRaises(TypeError, self.func, add, object())
class TestFailingIter:
def __iter__(self):
raise RuntimeError
self.assertRaises(RuntimeError, self.func, add, TestFailingIter())
self.assertEqual(self.func(add, [], None), None)
self.assertEqual(self.func(add, [], 42), 42)
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, self.func, 42, BadSeq())
# Test reduce()'s use of iterators.
def test_iterator_usage(self):
class SequenceClass:
def __init__(self, n):
self.n = n
def __getitem__(self, i):
if 0 <= i < self.n:
return i
else:
raise IndexError
from operator import add
self.assertEqual(self.func(add, SequenceClass(5)), 10)
self.assertEqual(self.func(add, SequenceClass(5), 42), 52)
self.assertRaises(TypeError, self.func, add, SequenceClass(0))
self.assertEqual(self.func(add, SequenceClass(0), 42), 42)
self.assertEqual(self.func(add, SequenceClass(1)), 0)
self.assertEqual(self.func(add, SequenceClass(1), 42), 42)
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(self.func(add, d), "".join(d.keys()))
class TestCmpToKey:
def test_cmp_to_key(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(cmp1)
self.assertEqual(key(3), key(3))
self.assertGreater(key(3), key(1))
self.assertGreaterEqual(key(3), key(3))
def cmp2(x, y):
return int(x) - int(y)
key = self.cmp_to_key(cmp2)
self.assertEqual(key(4.0), key('4'))
self.assertLess(key(2), key('35'))
self.assertLessEqual(key(2), key('35'))
self.assertNotEqual(key(2), key('35'))
def test_cmp_to_key_arguments(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(obj=3), key(obj=3))
self.assertGreater(key(obj=3), key(obj=1))
with self.assertRaises((TypeError, AttributeError)):
key(3) > 1 # rhs is not a K object
with self.assertRaises((TypeError, AttributeError)):
1 < key(3) # lhs is not a K object
with self.assertRaises(TypeError):
key = self.cmp_to_key() # too few args
with self.assertRaises(TypeError):
key = self.cmp_to_key(cmp1, None) # too many args
key = self.cmp_to_key(cmp1)
with self.assertRaises(TypeError):
key() # too few args
with self.assertRaises(TypeError):
key(None, None) # too many args
def test_bad_cmp(self):
def cmp1(x, y):
raise ZeroDivisionError
key = self.cmp_to_key(cmp1)
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
class BadCmp:
def __lt__(self, other):
raise ZeroDivisionError
def cmp1(x, y):
return BadCmp()
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
def test_obj_field(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(50).obj, 50)
def test_sort_int(self):
def mycmp(x, y):
return y - x
self.assertEqual(sorted(range(5), key=self.cmp_to_key(mycmp)),
[4, 3, 2, 1, 0])
def test_sort_int_str(self):
def mycmp(x, y):
x, y = int(x), int(y)
return (x > y) - (x < y)
values = [5, '3', 7, 2, '0', '1', 4, '10', 1]
values = sorted(values, key=self.cmp_to_key(mycmp))
self.assertEqual([int(value) for value in values],
[0, 1, 1, 2, 3, 4, 5, 7, 10])
def test_hash(self):
def mycmp(x, y):
return y - x
key = self.cmp_to_key(mycmp)
k = key(10)
self.assertRaises(TypeError, hash, k)
self.assertNotIsInstance(k, collections.abc.Hashable)
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestCmpToKeyC(TestCmpToKey, unittest.TestCase):
if c_functools:
cmp_to_key = c_functools.cmp_to_key
class TestCmpToKeyPy(TestCmpToKey, unittest.TestCase):
cmp_to_key = staticmethod(py_functools.cmp_to_key)
class TestTotalOrdering(unittest.TestCase):
def test_total_ordering_lt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) > A(2))
def test_total_ordering_le(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __le__(self, other):
return self.value <= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) >= A(2))
def test_total_ordering_gt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __gt__(self, other):
return self.value > other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) < A(1))
def test_total_ordering_ge(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __ge__(self, other):
return self.value >= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) <= A(1))
def test_total_ordering_no_overwrite(self):
# new methods should not overwrite existing
@functools.total_ordering
class A(int):
pass
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
def test_no_operations_defined(self):
with self.assertRaises(ValueError):
@functools.total_ordering
class A:
pass
def test_type_error_when_not_implemented(self):
# bug 10042; ensure stack overflow does not occur
# when decorated types return NotImplemented
@functools.total_ordering
class ImplementsLessThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value == other.value
return False
def __lt__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value < other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value == other.value
return False
def __gt__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value > other.value
return NotImplemented
@functools.total_ordering
class ImplementsLessThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value == other.value
return False
def __le__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value <= other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value == other.value
return False
def __ge__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value >= other.value
return NotImplemented
@functools.total_ordering
class ComparatorNotImplemented:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ComparatorNotImplemented):
return self.value == other.value
return False
def __lt__(self, other):
return NotImplemented
with self.subTest("LT < 1"), self.assertRaises(TypeError):
ImplementsLessThan(-1) < 1
with self.subTest("LT < LE"), self.assertRaises(TypeError):
ImplementsLessThan(0) < ImplementsLessThanEqualTo(0)
with self.subTest("LT < GT"), self.assertRaises(TypeError):
ImplementsLessThan(1) < ImplementsGreaterThan(1)
with self.subTest("LE <= LT"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(2) <= ImplementsLessThan(2)
with self.subTest("LE <= GE"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(3) <= ImplementsGreaterThanEqualTo(3)
with self.subTest("GT > GE"), self.assertRaises(TypeError):
ImplementsGreaterThan(4) > ImplementsGreaterThanEqualTo(4)
with self.subTest("GT > LT"), self.assertRaises(TypeError):
ImplementsGreaterThan(5) > ImplementsLessThan(5)
with self.subTest("GE >= GT"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(6) >= ImplementsGreaterThan(6)
with self.subTest("GE >= LE"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(7) >= ImplementsLessThanEqualTo(7)
with self.subTest("GE when equal"):
a = ComparatorNotImplemented(8)
b = ComparatorNotImplemented(8)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a >= b
with self.subTest("LE when equal"):
a = ComparatorNotImplemented(9)
b = ComparatorNotImplemented(9)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a <= b
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for name in '__lt__', '__gt__', '__le__', '__ge__':
with self.subTest(method=name, proto=proto):
method = getattr(Orderable_LT, name)
method_copy = pickle.loads(pickle.dumps(method, proto))
self.assertIs(method_copy, method)
@functools.total_ordering
class Orderable_LT:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
class TestLRU:
def test_lru(self):
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=20)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(maxsize, 20)
self.assertEqual(currsize, 0)
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
domain = range(5)
for i in range(1000):
x, y = choice(domain), choice(domain)
actual = f(x, y)
expected = orig(x, y)
self.assertEqual(actual, expected)
hits, misses, maxsize, currsize = f.cache_info()
self.assertTrue(hits > misses)
self.assertEqual(hits + misses, 1000)
self.assertEqual(currsize, 20)
f.cache_clear() # test clearing
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
self.assertEqual(currsize, 0)
f(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# Test bypassing the cache
self.assertIs(f.__wrapped__, orig)
f.__wrapped__(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size zero (which means "never-cache")
@self.module.lru_cache(0)
def f():
nonlocal f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 0)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 5)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 5)
self.assertEqual(currsize, 0)
# test size one
@self.module.lru_cache(1)
def f():
nonlocal f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 1)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 1)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 4)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size two
@self.module.lru_cache(2)
def f(x):
nonlocal f_cnt
f_cnt += 1
return x*10
self.assertEqual(f.cache_info().maxsize, 2)
f_cnt = 0
for x in 7, 9, 7, 9, 7, 9, 8, 8, 8, 9, 9, 9, 8, 8, 8, 7:
# * * * *
self.assertEqual(f(x), x*10)
self.assertEqual(f_cnt, 4)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 12)
self.assertEqual(misses, 4)
self.assertEqual(currsize, 2)
def test_lru_hash_only_once(self):
# To protect against weird reentrancy bugs and to improve
# efficiency when faced with slow __hash__ methods, the
# LRU cache guarantees that it will only call __hash__
# only once per use as an argument to the cached function.
@self.module.lru_cache(maxsize=1)
def f(x, y):
return x * 3 + y
# Simulate the integer 5
mock_int = unittest.mock.Mock()
mock_int.__mul__ = unittest.mock.Mock(return_value=15)
mock_int.__hash__ = unittest.mock.Mock(return_value=999)
# Add to cache: One use as an argument gives one call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 1)
self.assertEqual(f.cache_info(), (0, 1, 1, 1))
# Cache hit: One use as an argument gives one additional call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 2)
self.assertEqual(f.cache_info(), (1, 1, 1, 1))
# Cache eviction: No use as an argument gives no additional call
self.assertEqual(f(6, 2), 20)
self.assertEqual(mock_int.__hash__.call_count, 2)
self.assertEqual(f.cache_info(), (1, 2, 1, 1))
# Cache miss: One use as an argument gives one additional call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 3)
self.assertEqual(f.cache_info(), (1, 3, 1, 1))
def test_lru_reentrancy_with_len(self):
# Test to make sure the LRU cache code isn't thrown-off by
# caching the built-in len() function. Since len() can be
# cached, we shouldn't use it inside the lru code itself.
old_len = builtins.len
try:
builtins.len = self.module.lru_cache(4)(len)
for i in [0, 0, 1, 2, 3, 3, 4, 5, 6, 1, 7, 2, 1]:
self.assertEqual(len('abcdefghijklmn'[:i]), i)
finally:
builtins.len = old_len
def test_lru_star_arg_handling(self):
# Test regression that arose in ea064ff3c10f
@functools.lru_cache()
def f(*args):
return args
self.assertEqual(f(1, 2), (1, 2))
self.assertEqual(f((1, 2)), ((1, 2),))
def test_lru_type_error(self):
# Regression test for issue #28653.
# lru_cache was leaking when one of the arguments
# wasn't cacheable.
@functools.lru_cache(maxsize=None)
def infinite_cache(o):
pass
@functools.lru_cache(maxsize=10)
def limited_cache(o):
pass
with self.assertRaises(TypeError):
infinite_cache([])
with self.assertRaises(TypeError):
limited_cache([])
def test_lru_with_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n-1) + fib(n-2)
self.assertEqual([fib(n) for n in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_lru_with_maxsize_negative(self):
@self.module.lru_cache(maxsize=-10)
def eq(n):
return n
for i in (0, 1):
self.assertEqual([eq(n) for n in range(150)], list(range(150)))
self.assertEqual(eq.cache_info(),
self.module._CacheInfo(hits=0, misses=300, maxsize=-10, currsize=1))
def test_lru_with_exceptions(self):
# Verify that user_function exceptions get passed through without
# creating a hard-to-read chained exception.
# http://bugs.python.org/issue13177
for maxsize in (None, 128):
@self.module.lru_cache(maxsize)
def func(i):
return 'abc'[i]
self.assertEqual(func(0), 'a')
with self.assertRaises(IndexError) as cm:
func(15)
self.assertIsNone(cm.exception.__context__)
# Verify that the previous exception did not result in a cached entry
with self.assertRaises(IndexError):
func(15)
def test_lru_with_types(self):
for maxsize in (None, 128):
@self.module.lru_cache(maxsize=maxsize, typed=True)
def square(x):
return x * x
self.assertEqual(square(3), 9)
self.assertEqual(type(square(3)), type(9))
self.assertEqual(square(3.0), 9.0)
self.assertEqual(type(square(3.0)), type(9.0))
self.assertEqual(square(x=3), 9)
self.assertEqual(type(square(x=3)), type(9))
self.assertEqual(square(x=3.0), 9.0)
self.assertEqual(type(square(x=3.0)), type(9.0))
self.assertEqual(square.cache_info().hits, 4)
self.assertEqual(square.cache_info().misses, 4)
def test_lru_with_keyword_args(self):
@self.module.lru_cache()
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual(
[fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610]
)
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=128, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=128, currsize=0))
def test_lru_with_keyword_args_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual([fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_kwargs_order(self):
# PEP 468: Preserving Keyword Argument Order
@self.module.lru_cache(maxsize=10)
def f(**kwargs):
return list(kwargs.items())
self.assertEqual(f(a=1, b=2), [('a', 1), ('b', 2)])
self.assertEqual(f(b=2, a=1), [('b', 2), ('a', 1)])
self.assertEqual(f.cache_info(),
self.module._CacheInfo(hits=0, misses=2, maxsize=10, currsize=2))
def test_lru_cache_decoration(self):
def f(zomg: 'zomg_annotation'):
"""f doc string"""
return 42
g = self.module.lru_cache()(f)
for attr in self.module.WRAPPER_ASSIGNMENTS:
self.assertEqual(getattr(g, attr), getattr(f, attr))
def test_lru_cache_threaded(self):
n, m = 5, 11
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=n*m)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(currsize, 0)
start = threading.Event()
def full(k):
start.wait(10)
for _ in range(m):
self.assertEqual(f(k, 0), orig(k, 0))
def clear():
start.wait(10)
for _ in range(2*m):
f.cache_clear()
orig_si = sys.getswitchinterval()
support.setswitchinterval(1e-6)
try:
# create n threads in order to fill cache
threads = [threading.Thread(target=full, args=[k])
for k in range(n)]
with support.start_threads(threads):
start.set()
hits, misses, maxsize, currsize = f.cache_info()
if self.module is py_functools:
# XXX: Why can be not equal?
self.assertLessEqual(misses, n)
self.assertLessEqual(hits, m*n - misses)
else:
self.assertEqual(misses, n)
self.assertEqual(hits, m*n - misses)
self.assertEqual(currsize, n)
# create n threads in order to fill cache and 1 to clear it
threads = [threading.Thread(target=clear)]
threads += [threading.Thread(target=full, args=[k])
for k in range(n)]
start.clear()
with support.start_threads(threads):
start.set()
finally:
sys.setswitchinterval(orig_si)
def test_lru_cache_threaded2(self):
# Simultaneous call with the same arguments
n, m = 5, 7
start = threading.Barrier(n+1)
pause = threading.Barrier(n+1)
stop = threading.Barrier(n+1)
@self.module.lru_cache(maxsize=m*n)
def f(x):
pause.wait(10)
return 3 * x
self.assertEqual(f.cache_info(), (0, 0, m*n, 0))
def test():
for i in range(m):
start.wait(10)
self.assertEqual(f(i), 3 * i)
stop.wait(10)
threads = [threading.Thread(target=test) for k in range(n)]
with support.start_threads(threads):
for i in range(m):
start.wait(10)
stop.reset()
pause.wait(10)
start.reset()
stop.wait(10)
pause.reset()
self.assertEqual(f.cache_info(), (0, (i+1)*n, m*n, i+1))
def test_lru_cache_threaded3(self):
@self.module.lru_cache(maxsize=2)
def f(x):
time.sleep(.01)
return 3 * x
def test(i, x):
with self.subTest(thread=i):
self.assertEqual(f(x), 3 * x, i)
threads = [threading.Thread(target=test, args=(i, v))
for i, v in enumerate([1, 2, 2, 3, 2])]
with support.start_threads(threads):
pass
def test_need_for_rlock(self):
# This will deadlock on an LRU cache that uses a regular lock
@self.module.lru_cache(maxsize=10)
def test_func(x):
'Used to demonstrate a reentrant lru_cache call within a single thread'
return x
class DoubleEq:
'Demonstrate a reentrant lru_cache call within a single thread'
def __init__(self, x):
self.x = x
def __hash__(self):
return self.x
def __eq__(self, other):
if self.x == 2:
test_func(DoubleEq(1))
return self.x == other.x
test_func(DoubleEq(1)) # Load the cache
test_func(DoubleEq(2)) # Load the cache
self.assertEqual(test_func(DoubleEq(2)), # Trigger a re-entrant __eq__ call
DoubleEq(2)) # Verify the correct return value
def test_early_detection_of_bad_call(self):
# Issue #22184
with self.assertRaises(TypeError):
@functools.lru_cache
def f():
pass
def test_lru_method(self):
class X(int):
f_cnt = 0
@self.module.lru_cache(2)
def f(self, x):
self.f_cnt += 1
return x*10+self
a = X(5)
b = X(5)
c = X(7)
self.assertEqual(X.f.cache_info(), (0, 0, 2, 0))
for x in 1, 2, 2, 3, 1, 1, 1, 2, 3, 3:
self.assertEqual(a.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 0, 0))
self.assertEqual(X.f.cache_info(), (4, 6, 2, 2))
for x in 1, 2, 1, 1, 1, 1, 3, 2, 2, 2:
self.assertEqual(b.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 0))
self.assertEqual(X.f.cache_info(), (10, 10, 2, 2))
for x in 2, 1, 1, 1, 1, 2, 1, 3, 2, 1:
self.assertEqual(c.f(x), x*10 + 7)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 5))
self.assertEqual(X.f.cache_info(), (15, 15, 2, 2))
self.assertEqual(a.f.cache_info(), X.f.cache_info())
self.assertEqual(b.f.cache_info(), X.f.cache_info())
self.assertEqual(c.f.cache_info(), X.f.cache_info())
def test_pickle(self):
cls = self.__class__
for f in cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto, func=f):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertIs(f_copy, f)
def test_copy(self):
cls = self.__class__
def orig(x, y):
return 3 * x + y
part = self.module.partial(orig, 2)
funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
self.module.lru_cache(2)(part))
for f in funcs:
with self.subTest(func=f):
f_copy = copy.copy(f)
self.assertIs(f_copy, f)
def test_deepcopy(self):
cls = self.__class__
def orig(x, y):
return 3 * x + y
part = self.module.partial(orig, 2)
funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
self.module.lru_cache(2)(part))
for f in funcs:
with self.subTest(func=f):
f_copy = copy.deepcopy(f)
self.assertIs(f_copy, f)
@py_functools.lru_cache()
def py_cached_func(x, y):
return 3 * x + y
@c_functools.lru_cache()
def c_cached_func(x, y):
return 3 * x + y
class TestLRUPy(TestLRU, unittest.TestCase):
module = py_functools
cached_func = py_cached_func,
@module.lru_cache()
def cached_meth(self, x, y):
return 3 * x + y
@staticmethod
@module.lru_cache()
def cached_staticmeth(x, y):
return 3 * x + y
class TestLRUC(TestLRU, unittest.TestCase):
module = c_functools
cached_func = c_cached_func,
@module.lru_cache()
def cached_meth(self, x, y):
return 3 * x + y
@staticmethod
@module.lru_cache()
def cached_staticmeth(x, y):
return 3 * x + y
class TestSingleDispatch(unittest.TestCase):
def test_simple_overloads(self):
@functools.singledispatch
def g(obj):
return "base"
def g_int(i):
return "integer"
g.register(int, g_int)
self.assertEqual(g("str"), "base")
self.assertEqual(g(1), "integer")
self.assertEqual(g([1,2,3]), "base")
def test_mro(self):
@functools.singledispatch
def g(obj):
return "base"
class A:
pass
class C(A):
pass
class B(A):
pass
class D(C, B):
pass
def g_A(a):
return "A"
def g_B(b):
return "B"
g.register(A, g_A)
g.register(B, g_B)
self.assertEqual(g(A()), "A")
self.assertEqual(g(B()), "B")
self.assertEqual(g(C()), "A")
self.assertEqual(g(D()), "B")
def test_register_decorator(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(int)
def g_int(i):
return "int %s" % (i,)
self.assertEqual(g(""), "base")
self.assertEqual(g(12), "int 12")
self.assertIs(g.dispatch(int), g_int)
self.assertIs(g.dispatch(object), g.dispatch(str))
# Note: in the assert above this is not g.
# @singledispatch returns the wrapper.
def test_wrapping_attributes(self):
@functools.singledispatch
def g(obj):
"Simple test"
return "Test"
self.assertEqual(g.__name__, "g")
if sys.flags.optimize < 2:
self.assertEqual(g.__doc__, "Simple test")
@unittest.skipUnless(decimal, 'requires _decimal')
@support.cpython_only
def test_c_classes(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(decimal.DecimalException)
def _(obj):
return obj.args
subn = decimal.Subnormal("Exponent < Emin")
rnd = decimal.Rounded("Number got rounded")
self.assertEqual(g(subn), ("Exponent < Emin",))
self.assertEqual(g(rnd), ("Number got rounded",))
@g.register(decimal.Subnormal)
def _(obj):
return "Too small to care."
self.assertEqual(g(subn), "Too small to care.")
self.assertEqual(g(rnd), ("Number got rounded",))
def test_compose_mro(self):
# None of the examples in this test depend on haystack ordering.
c = collections.abc
mro = functools._compose_mro
bases = [c.Sequence, c.MutableMapping, c.Mapping, c.Set]
for haystack in permutations(bases):
m = mro(dict, haystack)
self.assertEqual(m, [dict, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
bases = [c.Container, c.Mapping, c.MutableMapping, collections.OrderedDict]
for haystack in permutations(bases):
m = mro(collections.ChainMap, haystack)
self.assertEqual(m, [collections.ChainMap, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
# If there's a generic function with implementations registered for
# both Sized and Container, passing a defaultdict to it results in an
# ambiguous dispatch which will cause a RuntimeError (see
# test_mro_conflicts).
bases = [c.Container, c.Sized, str]
for haystack in permutations(bases):
m = mro(collections.defaultdict, [c.Sized, c.Container, str])
self.assertEqual(m, [collections.defaultdict, dict, c.Sized,
c.Container, object])
# MutableSequence below is registered directly on D. In other words, it
# precedes MutableMapping which means single dispatch will always
# choose MutableSequence here.
class D(collections.defaultdict):
pass
c.MutableSequence.register(D)
bases = [c.MutableSequence, c.MutableMapping]
for haystack in permutations(bases):
m = mro(D, bases)
self.assertEqual(m, [D, c.MutableSequence, c.Sequence, c.Reversible,
collections.defaultdict, dict, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable, c.Container,
object])
# Container and Callable are registered on different base classes and
# a generic function supporting both should always pick the Callable
# implementation if a C instance is passed.
class C(collections.defaultdict):
def __call__(self):
pass
bases = [c.Sized, c.Callable, c.Container, c.Mapping]
for haystack in permutations(bases):
m = mro(C, haystack)
self.assertEqual(m, [C, c.Callable, collections.defaultdict, dict, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
def test_register_abc(self):
c = collections.abc
d = {"a": "b"}
l = [1, 2, 3]
s = {object(), None}
f = frozenset(s)
t = (1, 2, 3)
@functools.singledispatch
def g(obj):
return "base"
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "base")
self.assertEqual(g(s), "base")
self.assertEqual(g(f), "base")
self.assertEqual(g(t), "base")
g.register(c.Sized, lambda obj: "sized")
self.assertEqual(g(d), "sized")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableMapping, lambda obj: "mutablemapping")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(collections.ChainMap, lambda obj: "chainmap")
self.assertEqual(g(d), "mutablemapping") # irrelevant ABCs registered
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSequence, lambda obj: "mutablesequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSet, lambda obj: "mutableset")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Mapping, lambda obj: "mapping")
self.assertEqual(g(d), "mutablemapping") # not specific enough
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Sequence, lambda obj: "sequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sequence")
g.register(c.Set, lambda obj: "set")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(dict, lambda obj: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(list, lambda obj: "list")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(set, lambda obj: "concrete-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(frozenset, lambda obj: "frozen-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "sequence")
g.register(tuple, lambda obj: "tuple")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "tuple")
def test_c3_abc(self):
c = collections.abc
mro = functools._c3_mro
class A(object):
pass
class B(A):
def __len__(self):
return 0 # implies Sized
@c.Container.register
class C(object):
pass
class D(object):
pass # unrelated
class X(D, C, B):
def __call__(self):
pass # implies Callable
expected = [X, c.Callable, D, C, c.Container, B, c.Sized, A, object]
for abcs in permutations([c.Sized, c.Callable, c.Container]):
self.assertEqual(mro(X, abcs=abcs), expected)
# unrelated ABCs don't appear in the resulting MRO
many_abcs = [c.Mapping, c.Sized, c.Callable, c.Container, c.Iterable]
self.assertEqual(mro(X, abcs=many_abcs), expected)
def test_false_meta(self):
# see issue23572
class MetaA(type):
def __len__(self):
return 0
class A(metaclass=MetaA):
pass
class AA(A):
pass
@functools.singledispatch
def fun(a):
return 'base A'
@fun.register(A)
def _(a):
return 'fun A'
aa = AA()
self.assertEqual(fun(aa), 'fun A')
def test_mro_conflicts(self):
c = collections.abc
@functools.singledispatch
def g(arg):
return "base"
class O(c.Sized):
def __len__(self):
return 0
o = O()
self.assertEqual(g(o), "base")
g.register(c.Iterable, lambda arg: "iterable")
g.register(c.Container, lambda arg: "container")
g.register(c.Sized, lambda arg: "sized")
g.register(c.Set, lambda arg: "set")
self.assertEqual(g(o), "sized")
c.Iterable.register(O)
self.assertEqual(g(o), "sized") # because it's explicitly in __mro__
c.Container.register(O)
self.assertEqual(g(o), "sized") # see above: Sized is in __mro__
c.Set.register(O)
self.assertEqual(g(o), "set") # because c.Set is a subclass of
# c.Sized and c.Container
class P:
pass
p = P()
self.assertEqual(g(p), "base")
c.Iterable.register(P)
self.assertEqual(g(p), "iterable")
c.Container.register(P)
with self.assertRaises(RuntimeError) as re_one:
g(p)
self.assertIn(
str(re_one.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Iterable'>"),
("Ambiguous dispatch: <class 'collections.abc.Iterable'> "
"or <class 'collections.abc.Container'>")),
)
class Q(c.Sized):
def __len__(self):
return 0
q = Q()
self.assertEqual(g(q), "sized")
c.Iterable.register(Q)
self.assertEqual(g(q), "sized") # because it's explicitly in __mro__
c.Set.register(Q)
self.assertEqual(g(q), "set") # because c.Set is a subclass of
# c.Sized and c.Iterable
@functools.singledispatch
def h(arg):
return "base"
@h.register(c.Sized)
def _(arg):
return "sized"
@h.register(c.Container)
def _(arg):
return "container"
# Even though Sized and Container are explicit bases of MutableMapping,
# this ABC is implicitly registered on defaultdict which makes all of
# MutableMapping's bases implicit as well from defaultdict's
# perspective.
with self.assertRaises(RuntimeError) as re_two:
h(collections.defaultdict(lambda: 0))
self.assertIn(
str(re_two.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
class R(collections.defaultdict):
pass
c.MutableSequence.register(R)
@functools.singledispatch
def i(arg):
return "base"
@i.register(c.MutableMapping)
def _(arg):
return "mapping"
@i.register(c.MutableSequence)
def _(arg):
return "sequence"
r = R()
self.assertEqual(i(r), "sequence")
class S:
pass
class T(S, c.Sized):
def __len__(self):
return 0
t = T()
self.assertEqual(h(t), "sized")
c.Container.register(T)
self.assertEqual(h(t), "sized") # because it's explicitly in the MRO
class U:
def __len__(self):
return 0
u = U()
self.assertEqual(h(u), "sized") # implicit Sized subclass inferred
# from the existence of __len__()
c.Container.register(U)
# There is no preference for registered versus inferred ABCs.
with self.assertRaises(RuntimeError) as re_three:
h(u)
self.assertIn(
str(re_three.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
class V(c.Sized, S):
def __len__(self):
return 0
@functools.singledispatch
def j(arg):
return "base"
@j.register(S)
def _(arg):
return "s"
@j.register(c.Container)
def _(arg):
return "container"
v = V()
self.assertEqual(j(v), "s")
c.Container.register(V)
self.assertEqual(j(v), "container") # because it ends up right after
# Sized in the MRO
def test_cache_invalidation(self):
from collections import UserDict
import weakref
class TracingDict(UserDict):
def __init__(self, *args, **kwargs):
super(TracingDict, self).__init__(*args, **kwargs)
self.set_ops = []
self.get_ops = []
def __getitem__(self, key):
result = self.data[key]
self.get_ops.append(key)
return result
def __setitem__(self, key, value):
self.set_ops.append(key)
self.data[key] = value
def clear(self):
self.data.clear()
td = TracingDict()
with support.swap_attr(weakref, "WeakKeyDictionary", lambda: td):
c = collections.abc
@functools.singledispatch
def g(arg):
return "base"
d = {}
l = []
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(g(l), "base")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict, list])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(td.data[list], g.registry[object])
self.assertEqual(td.data[dict], td.data[list])
self.assertEqual(g(l), "base")
self.assertEqual(g(d), "base")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list])
g.register(list, lambda arg: "list")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict])
self.assertEqual(td.data[dict],
functools._find_impl(dict, g.registry))
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list])
self.assertEqual(td.data[list],
functools._find_impl(list, g.registry))
class X:
pass
c.MutableMapping.register(X) # Will not invalidate the cache,
# not using ABCs yet.
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "list")
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list])
g.register(c.Sized, lambda arg: "sized")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "sized")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict])
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
self.assertEqual(g(l), "list")
self.assertEqual(g(d), "sized")
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
g.dispatch(list)
g.dispatch(dict)
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict,
list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
c.MutableSet.register(X) # Will invalidate the cache.
self.assertEqual(len(td), 2) # Stale cache.
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 1)
g.register(c.MutableMapping, lambda arg: "mutablemapping")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(len(td), 1)
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
g.register(dict, lambda arg: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
g._clear_cache()
self.assertEqual(len(td), 0)
def test_annotations(self):
@functools.singledispatch
def i(arg):
return "base"
@i.register
def _(arg: collections.abc.Mapping):
return "mapping"
@i.register
def _(arg: "collections.abc.Sequence"):
return "sequence"
self.assertEqual(i(None), "base")
self.assertEqual(i({"a": 1}), "mapping")
self.assertEqual(i([1, 2, 3]), "sequence")
self.assertEqual(i((1, 2, 3)), "sequence")
self.assertEqual(i("str"), "sequence")
# Registering classes as callables doesn't work with annotations,
# you need to pass the type explicitly.
@i.register(str)
class _:
def __init__(self, arg):
self.arg = arg
def __eq__(self, other):
return self.arg == other
self.assertEqual(i("str"), "str")
def test_invalid_registrations(self):
msg_prefix = "Invalid first argument to `register()`: "
msg_suffix = (
". Use either `@register(some_class)` or plain `@register` on an "
"annotated function."
)
@functools.singledispatch
def i(arg):
return "base"
with self.assertRaises(TypeError) as exc:
@i.register(42)
def _(arg):
return "I annotated with a non-type"
self.assertTrue(str(exc.exception).startswith(msg_prefix + "42"))
self.assertTrue(str(exc.exception).endswith(msg_suffix))
with self.assertRaises(TypeError) as exc:
@i.register
def _(arg):
return "I forgot to annotate"
self.assertTrue(str(exc.exception).startswith(msg_prefix +
"<function TestSingleDispatch.test_invalid_registrations.<locals>._"
))
self.assertTrue(str(exc.exception).endswith(msg_suffix))
# FIXME: The following will only work after PEP 560 is implemented.
return
with self.assertRaises(TypeError) as exc:
@i.register
def _(arg: typing.Iterable[str]):
# At runtime, dispatching on generics is impossible.
# When registering implementations with singledispatch, avoid
# types from `typing`. Instead, annotate with regular types
# or ABCs.
return "I annotated with a generic collection"
self.assertTrue(str(exc.exception).startswith(msg_prefix +
"<function TestSingleDispatch.test_invalid_registrations.<locals>._"
))
self.assertTrue(str(exc.exception).endswith(msg_suffix))
def test_invalid_positional_argument(self):
@functools.singledispatch
def f(*args):
pass
msg = 'f requires at least 1 positional argument'
with self.assertRaisesRegex(TypeError, msg):
f()
if __name__ == '__main__':
unittest.main()
|
background.py | # Copyright 2018 Datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing
from http.server import BaseHTTPRequestHandler, HTTPServer
from threading import Thread
from .runner import Runner
def dumb_print(message: str) -> None:
print(message)
class DumbHandler(BaseHTTPRequestHandler):
"""
HTTP handler that returns success for any HEAD request
"""
tel_output = dumb_print
def do_HEAD(self) -> None:
"Handle head"
self.send_response(200)
self.end_headers()
def log_message(self, format: str, *args: typing.Any) -> None:
"""
Make sure log messages go to the right place
"""
message = format % args
if message == '"HEAD / HTTP/1.1" 200 -':
message = "(proxy checking local liveness)"
DumbHandler.tel_output(message)
def launch_local_server(runner: Runner, port: int) -> None:
"""
Make a dumb web server for the proxy pod to poll.
"""
DumbHandler.tel_output = runner.write
server = HTTPServer(("127.0.0.1", port), DumbHandler)
Thread(target=server.serve_forever, daemon=True).start()
name = "Web server for proxy poll"
runner.write("Launching " + name)
runner.add_cleanup("Kill " + name, server.shutdown)
|
DockerRunner.py | import docker
import os
import json
from threading import Thread
from time import time as _time
from time import sleep as _sleep
import sys
class DockerRunner:
"""
This class provides the container interface for Docker.
"""
def __init__(self, logger=None):
"""
Inputs: config dictionary, Job ID, and optional logger
"""
self.docker = docker.from_env()
self.logger = logger
self.containers = []
self.threads = []
def _sort_logs(self, sout, serr):
"""
This is an internal function to sort and interlace output for NJS.
This is not fully implemented yet and sould be rethought.
"""
# TODO: Fix sorting
lines = []
if len(sout) > 0:
for line in sout.decode("utf-8").split('\n'):
if len(line) > 0:
lines.append({'line': line, 'is_error': 0})
if len(serr) > 0:
for line in serr.decode("utf-8").split('\n'):
if len(line) > 0:
lines.append({'line': line, 'is_error': 1})
return lines
def _shepherd(self, c, job_id, subjob, queues):
last = 1
try:
while c.status in ['created', 'running']:
c.reload()
now = int(_time())
sout = c.logs(stdout=True, stderr=False, since=last, until=now,
timestamps=True)
serr = c.logs(stdout=False, stderr=True, since=last, until=now,
timestamps=True)
lines = self._sort_logs(sout, serr)
if self.logger is not None:
self.logger.log_lines(lines)
last = now
_sleep(1)
c.remove()
self.containers.remove(c)
for q in queues:
q.put(['finished', job_id, None])
except:
self.logger.error("Unexpected failure")
def get_image(self, image):
# Pull the image from the hub if we don't have it
pulled = False
for im in self.docker.images.list():
if image in im.tags:
id = im.id
pulled = True
break
if not pulled:
self.logger.log("Pulling image {}".format(image))
id = self.docker.images.pull(image).id
return id
def run(self, job_id, image, env, vols, labels, subjob, queues):
c = self.docker.containers.run(image, 'async',
environment=env,
detach=True,
labels=labels,
volumes=vols)
self.containers.append(c)
# Start a thread to monitor output and handle finished containers
t = Thread(target=self._shepherd, args=[c, job_id, subjob, queues])
self.threads.append(t)
t.start()
return c
def remove(self, c):
try:
c.kill()
except:
pass
try:
c.remove()
except:
pass
# def cleanup_all(self):
# for c in self.containers:
# try:
# c.kill()
# except:
# continue
# _sleep(1)
# for c in self.containers:
# try:
# c.remove
# except:
# continue
# return True
|
trainer.py | # Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
r"""Trainer.
To run locally:
.. code-block:: bash
$ bazel build -c opt //lingvo:trainer
$ bazel-bin/lingvo/trainer --logtostderr \
--model=image.mnist.LeNet5 --mode=sync --logdir=/tmp/lenet5 \
--run_locally=cpu
To use GPU, add `--config=cuda` to build command and set `--run_locally=gpu`.
"""
# pylint: enable=line-too-long
import os
import re
import sys
import threading
import time
from lingvo import base_trial
from lingvo import datasets
from lingvo import executor
from lingvo import model_imports
from lingvo import model_registry
from lingvo import trainer_impl
import lingvo.compat as tf
from lingvo.core import base_model
from lingvo.core import base_model_params
from lingvo.core import checkpointer
from lingvo.core import cluster_factory
from lingvo.core import inference_graph_exporter
from lingvo.core import metrics
from lingvo.core import py_utils
from lingvo.core import summary_utils
import numpy as np
from lingvo import base_runner
from google.protobuf import text_format
# pylint:disable=g-direct-tensorflow-import
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf.tpu import compilation_result_pb2 as tpu_compilation_result
from tensorflow.python.tpu import device_assignment as device_assignment_lib
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_function
from tensorflow.python.tpu import training_loop as tpu_training_loop
from tensorflow.python.tpu.ops import tpu_ops
# pylint:enable=g-direct-tensorflow-import
tf.flags.DEFINE_string(
'model', None, 'Name of the model class to train.'
'Must be a model defined in the model_registry.')
tf.flags.DEFINE_string(
'model_task_name', '', 'For multitask models: '
'select task to train/evaluate/decode. '
'Empty means to sample a task (training only).')
tf.flags.DEFINE_string('logdir', '', 'Log directory.')
tf.flags.DEFINE_bool(
'interactive', False,
'If True, enter interactive IPython for the controller job.')
tf.flags.DEFINE_string(
'run_locally', '',
'Can be empty, cpu, or gpu. If not empty, ignores cluster configuration '
'flags and runs controller and trainer in a single local process.')
tf.flags.DEFINE_string('tf_master', '', 'TF runtime.')
tf.flags.DEFINE_string(
'cluster_spec', '', 'A tf.train.ClusterSpec to override the master. '
'The dict is specified as: job=host1:port1,host2:port2,'
'host3:port3@job2=host3:port4,...')
tf.flags.DEFINE_string(
'mode', 'async', 'How this trainer binary is used. '
'async: used in an async training setup; '
'sync: used in a sync training setup; '
'shell: an interactive shell for development; '
'inspect_evaler: print evaler dataset names; '
'inspect_decoder: print decoder dataset names; '
'write_inference_graph: write inference graphs to logdir.')
tf.flags.DEFINE_string('job', '', 'trainer/controller/eval, etc.')
tf.flags.DEFINE_integer('task', 0, 'Task id within the job.')
tf.flags.DEFINE_string('controller_job', '/job:controller', 'Job name.')
tf.flags.DEFINE_integer('controller_gpus', 0, 'Number of controller GPUs.')
tf.flags.DEFINE_string('worker_job', '/job:trainer', 'Job name.')
tf.flags.DEFINE_list('additional_worker_jobs', [],
'Additional worker job names.')
tf.flags.DEFINE_integer('worker_replicas', 1, 'Number of replicas.')
tf.flags.DEFINE_integer('worker_gpus', 0, 'Number of gpus to use per replica.')
tf.flags.DEFINE_integer('worker_tpus', 0, 'Number of tpus to use per replica.')
tf.flags.DEFINE_integer('worker_num_tpu_hosts', 0, 'Number of tpu hosts.')
tf.flags.DEFINE_integer('worker_split_size', 1,
'Number of devices for one split.')
tf.flags.DEFINE_string('ps_job', '/job:ps', 'Job name')
tf.flags.DEFINE_integer('ps_replicas', 1, 'Number of replicas.')
tf.flags.DEFINE_integer('ps_gpus', 0, 'Number of gpus to use per replica.')
tf.flags.DEFINE_string('input_job', '/job:input', 'Job name')
tf.flags.DEFINE_integer('input_replicas', 0, 'Number of replicas.')
tf.flags.DEFINE_string(
'input_targets', '', 'Target network addresses for the '
'input job. E.g., a single ip:port, or a list of '
'comma-separated grpc://ip:port, etc.')
tf.flags.DEFINE_string('evaler_job', '/job:evaler', 'Job name')
tf.flags.DEFINE_integer('evaler_replicas', 0, 'Number of replicas.')
tf.flags.DEFINE_integer('evaler_gpus', 0, 'Number of gpus to use per replica.')
tf.flags.DEFINE_string('decoder_job', '/job:decoder', 'Job name')
tf.flags.DEFINE_integer('decoder_replicas', 0, 'Number of replicas.')
tf.flags.DEFINE_integer('decoder_gpus', 0, 'Number of gpus to use per replica.')
tf.flags.DEFINE_integer(
'inference_graph_random_seed', None,
'Random seed to fix when exporting inference graph. '
'Not fixed when set to None.')
tf.flags.DEFINE_string(
'inference_graph_filename', None,
'Output inference graph filename. If unspecified, output two inference '
'graphs, one for CPU and one for TPU using the default settings.')
tf.flags.DEFINE_string(
'inference_graph_device', None,
'Type of device the output inference graph is for. This flag is applicable '
'only when FLAGS.inference_graph_filename is specified.')
tf.flags.DEFINE_bool(
'evaler_in_same_address_as_controller', False,
'Whether or not evaler is in the same address space as '
'controller. This flag is meant for unittest only.')
tf.flags.DEFINE_string(
'vizier_reporting_job', 'evaler',
'Job responsible for reporting metrics. This specifies a '
'job prefix, evaler will match all evaler jobs, while '
'evaler_dev and decoder_dev will only match the corresponding '
'jobs that are on the dev set.')
tf.flags.DEFINE_bool(
'add_summary', None,
'Whether we should output summaries. The default value "None", enables '
'summaries based on the job type.')
@tf.flags.validator('vizier_reporting_job')
def _ValidateVizierReportingJob(value):
if value in ['evaler', 'decoder']:
return True
if value.startswith('evaler_') or value.startswith('decoder_'):
return True
raise tf.flags.ValidationError('Invalid value %s for vizier_reporting_job' %
value)
tf.flags.DEFINE_integer(
'enqueue_max_steps', None, 'Max enqueue steps. -1 meaning no limit.'
' This flag should be set for unit-test only.')
tf.flags.DEFINE_integer('saver_max_to_keep', None,
'Maximum number of recent checkpoints to keep.')
tf.flags.DEFINE_float('saver_keep_checkpoint_every_n_hours', None,
'How often to keep a checkpoint.')
tf.flags.DEFINE_bool(
'checkpoint_in_trainer_tpu', False,
'Whether to enable checkpointing in TrainerTpu, allowing for '
'operation without a separate Controller task.'
'This flag also disables checkpointing from the Controller, '
'but still allows it to write summaries.')
tf.flags.DEFINE_string(
'tpu', None,
'The Cloud TPU on GCP to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 '
'url. If set, other cluster parameters (such as --cluster_spec) will be '
'configured automatically with TPUClusterResolver.')
tf.flags.DEFINE_string(
'gcp_project', None,
'Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
tf.flags.DEFINE_string(
'tpu_zone', None,
'GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
# Please consider adding model params instead of adding flags.
FLAGS = tf.flags.FLAGS
# useful for debugging.
def _StartShell(local_ns=None):
# An interactive shell is useful for debugging/development.
import IPython # pylint: disable=g-import-not-at-top
user_ns = {}
if local_ns:
user_ns.update(local_ns)
user_ns.update(globals())
IPython.start_ipython(argv=[], user_ns=user_ns)
class Controller(base_runner.BaseRunner):
"""Controller for a training cluster."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._job_name = 'controller'
assert not self._model_task_name, 'Controller needs all tasks!'
self._control_dir = os.path.join(self._logdir, 'control')
tf.io.gfile.makedirs(self._control_dir)
self._summary_writer = self._CreateSummaryWriter(self._control_dir)
self._checkpoint_in_controller = True
if FLAGS.checkpoint_in_trainer_tpu:
self._checkpoint_in_controller = False
with self._graph.as_default(), tf.container(self._container_id):
with self._cluster, tf.device(self._cluster.GetPlacer()):
self._model = self.params.Instantiate()
self._params = self._model.params
self._model.ConstructFPropBPropGraph()
self._summary_op = tf.summary.merge_all()
self._initialize_tables = tf.tables_initializer()
self._initialize_local_vars = tf.local_variables_initializer()
self._initialize_global_vars = tf.global_variables_initializer()
self._init_input_ops = [
task.input.InitOps() for task in self._model.tasks
]
self.enqueue_ops = tf.get_collection(py_utils.ENQUEUE_OPS)
if self._checkpoint_in_controller:
self.checkpointer = self._CreateCheckpointer(
self._train_dir,
self._model,
init_op=self._initialize_global_vars)
self._ExportMetrics(params=self.params)
self._model_analysis, self._total_num_params = summary_utils.ModelAnalysis(
self._model)
py_utils.LogMultiLines('MODEL ANALYSIS', self._model_analysis)
self._WriteToLog(self._model_analysis, self._control_dir,
'model_analysis.txt')
self._WriteToLog(self.params.ToText(), self._control_dir, 'params.txt')
self._WriteToLog(
text_format.MessageToString(self.params.ToProto(), as_utf8=True),
self._control_dir, 'params.pbtxt')
tf.io.write_graph(self._graph.as_graph_def(), self._control_dir,
'train.pbtxt')
def _CreateCheckpointer(self, train_dir, model, init_op=None):
"""Wrapper method for override purposes."""
return checkpointer.Checkpointer(train_dir, model, init_op)
def Start(self):
self._RunLoop('controller', self._Loop)
def StartEnqueueOp(self, op):
self._RunLoop(
'controller/enqueue_op/%s' % op.name, self._LoopEnqueue, loop_args=[op])
def _Loop(self):
self._summary_writer.add_graph(self._graph)
with tf.container(self._container_id), self._GetSession() as sess:
if FLAGS.interactive:
# Into interactive debugging mode.
_StartShell(locals())
return
# This initializes local tables
sess.run(self._initialize_tables)
# This initializes local variables.
sess.run(self._initialize_local_vars)
# This initializes any ops the input generator depends on.
sess.run(self._init_input_ops)
# TODO(zhifengc): Moves these options into params.
tp = self.params.train
summary_interval_steps = tp.summary_interval_steps
save_interval_seconds = tp.save_interval_seconds
next_summary_step = 1
if not self._checkpoint_in_controller:
global_step = self._WaitUntilInit(sess)
while True:
now = time.time()
next_iteration_seconds = now + min(
10, save_interval_seconds) # 10 seconds or less
if self._checkpoint_in_controller:
# Init/restore variable if needed.
self.checkpointer.RestoreIfNeeded(sess)
global_step = sess.run(self._model.global_step)
if self._trial.ShouldStop() or self._ShouldStop(sess, global_step):
tf.logging.info('Training finished.')
if self._checkpoint_in_controller:
self.checkpointer.Save(sess, global_step)
sess.close()
self._DequeueThreadComplete()
return
if self._checkpoint_in_controller:
# Checkpoint if it's time.
self.checkpointer.MaybeSave(sess, self._model.global_step)
# Summary.
if self._summary_op is not None and global_step >= next_summary_step:
global_step, summary_str = sess.run(
[self._model.global_step, self._summary_op])
next_summary_step = global_step + summary_interval_steps
if isinstance(summary_str, np.ndarray) and summary_str.size == 0:
tf.logging.info('Skipping summary: %s', summary_str)
else:
self._summary_writer.add_summary(summary_str, global_step)
tf.logging.info('Write summary @%s', global_step)
self._SummarizeValue(global_step, 'total_num_params',
self._total_num_params)
tf.logging.info('Write summary done: step %d', global_step)
now = time.time()
if now < next_iteration_seconds:
time.sleep(next_iteration_seconds - now)
def _SummarizeValue(self, step, tag, value):
self._summary_writer.add_summary(
metrics.CreateScalarSummary(tag, value), step)
Trainer = trainer_impl.Trainer
class TrainerTpu(base_runner.BaseRunner):
"""Trainer on TPU."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._job_name = 'trainer_tpu'
# Multiple TPU trainer tasks not tested/implemented.
assert self._cluster.num_replicas == 1
data_parallelism = self._cluster.num_splits_per_client
assert data_parallelism
num_devices_per_split = self._cluster.num_devices_per_split
tf.logging.info('data_parallelism: %d, num_devices_per_split: %d',
data_parallelism, num_devices_per_split)
self._steps_per_loop = min(self.params.train.tpu_steps_per_loop,
self.params.train.max_steps)
self._step_rate_tracker = summary_utils.StepRateTracker()
self._cluster_def = self._cluster.worker_cluster_def
self._compile_op = None
self._initialized = threading.Event()
tf.logging.info(
'Creating TrainerTpu using data parallelism %s '
'and %s steps_per_loop', data_parallelism, self._steps_per_loop)
@py_utils.RetryOnTransientTfError()
def _WaitUntilInitTpu():
"""Wait until the model is ready."""
try:
# tpu.initialize_system() is called with None as embedding_config, as
# embedding_config is not available yet. Later in _Loop, it is called
# with the correct embedding_config. Since it cannot be called twice in
# the same graph with different embedding_config, we use a dummy_graph
# here.
dummy_graph = tf.Graph()
with dummy_graph.as_default():
tpu_initialize_system_op = tf.tpu.initialize_system(
embedding_config=None, job=None)
with self._GetSession(graph=dummy_graph) as sess:
topology = sess.run(tpu_initialize_system_op)
if self.params.train.tpu_device_order_mode is None:
device_assignment = device_assignment_lib.device_assignment(
topology,
computation_shape=py_utils.ComputationShape(
num_devices_per_split, topology),
num_replicas=data_parallelism)
else:
device_assignment = device_assignment_lib.device_assignment(
topology,
computation_shape=py_utils.ComputationShape(
num_devices_per_split, topology),
num_replicas=data_parallelism,
device_order_mode=self.params.train.tpu_device_order_mode)
py_utils.SetTpuDeviceAssignment(device_assignment)
tf.logging.info('device_assignment.core_assignment: %s',
str(device_assignment.core_assignment))
tf.logging.info('device_assignment.topology.device_coordinates: %s',
str(device_assignment.topology.device_coordinates))
except py_utils.transient_tf_errors as e:
tf.logging.info('TPU initialization failed: %s', e)
raise
_WaitUntilInitTpu()
with self._graph.as_default(), tf.container(self._container_id):
with self._cluster, tf.device(self._cluster.job_spec.name):
with cluster_factory.SetImmediatelyInstantiateVariables(False):
self._model = self.params.Instantiate()
self._task = self._model.GetTask()
self._task.input.InstantiateVariables()
self._task.input.CreateTpuEnqueueOps()
self._init_input_ops = self._task.input.InitOps()
self._eval_metrics = metrics.TpuEvalMetrics()
# Needed due to the AddExtraTheta() reference to global_step when
# instantiating the InputGenerator.
_ = py_utils.GetOrCreateGlobalStepVar()
def TpuTrainStep(*args):
"""Train a shard of a batch on a single TPU core.
Args:
*args: metrics values from previous steps.
Returns:
New summed metrics values and a train_op.
"""
self._model.InstantiateVariables()
self._model.ConstructFPropBPropGraph()
self._load_ops = tf.get_collection(py_utils.TPU_EMBEDDING_LOAD_OPS)
self._retrieve_ops = tf.get_collection(
py_utils.TPU_EMBEDDING_RETRIEVE_OPS)
tpu_embedding_collection = tf.get_collection(py_utils.TPU_EMBEDDING)
self._tpu_embedding = (
tpu_embedding_collection[0] if tpu_embedding_collection else None)
per_step_eval_metrics = self._eval_metrics.SetMetrics(
self._task.eval_metrics, args)
outfeed_op = self._OutfeedEnqueue(self._task.per_example_tensors)
summed_metrics = []
assert len(per_step_eval_metrics) == len(args)
with tf.control_dependencies([outfeed_op]):
for x, y in zip(per_step_eval_metrics, args):
summed_metrics.append(x + y)
return summed_metrics + [self._task.train_op]
@tpu_function.on_device_training_loop
def TpuTrain():
loop_result = tpu_training_loop.repeat(
self._steps_per_loop,
TpuTrainStep,
inputs=self._eval_metrics.initial_values,
name='train_loop')
# Final metrics are the avg across self._steps_per_loop steps.
return self._eval_metrics.FinalizeMetrics(loop_result)
self._compile_op, batch_parallel_res = tpu.split_compile_and_shard(
TpuTrain,
num_shards=data_parallelism,
device_assignment=py_utils.GetTpuDeviceAssignment())
outfeed_dequeue_op = self._OutfeedDequeueLoop(
self._task.per_example_tensors, self._steps_per_loop,
self._cluster.num_splits_per_client)
self._task.input.CreateTpuEmbeddingEnqueueOps()
def _ConstructPostTrainingLoop(train_loop_op, outfeed_dequeue_op):
"""Returns the op for tpu training with tail cpu computation."""
# Adds a tail computation that is run after the tpu_training loop
# step finishes. This allows us to run certain computation that
# acts on the variable between tpu_train_loop iterations and
# amortizing the cost of the operations. Alternative of running
# tpu.outside_compilation & using tf.cond is expenseive.
with tf.control_dependencies(train_loop_op):
self._model.ConstructPostTrainingLoop(outfeed_dequeue_op)
with tf.control_dependencies([self._task.post_training_loop_op]):
return ([[tf.identity(o) for o in train_loop_op],
outfeed_dequeue_op])
# Get metric result from a single replica; they are all same here.
all_tpu_ops = [t[0] for t in batch_parallel_res]
self._tpu_train_ops = (
_ConstructPostTrainingLoop(all_tpu_ops, outfeed_dequeue_op))
self._initialize_local_vars = tf.local_variables_initializer()
self._initialize_global_vars = tf.global_variables_initializer()
self._initialize_tables = tf.tables_initializer()
if FLAGS.checkpoint_in_trainer_tpu:
self.checkpointer = checkpointer.Checkpointer(
self._train_dir, self._model, init_op=self._initialize_global_vars)
self.enqueue_ops = self._task.input.tpu_infeed_op
tf.logging.info('Trainer number of enqueue ops: %d',
len(self.enqueue_ops))
self._summary_writer = self._CreateSummaryWriter(self._train_dir)
if FLAGS.checkpoint_in_trainer_tpu:
self._model_analysis, self._total_num_params = (
summary_utils.ModelAnalysis(self._model))
py_utils.LogMultiLines('MODEL ANALYSIS', self._model_analysis)
self._WriteToLog(self._model_analysis, self._train_dir,
'model_analysis.txt')
# Saves the graph def.
tf.io.write_graph(self._graph.as_graph_def(), self._train_dir,
'train.pbtxt')
# Saves the trainer params.
self._WriteToLog(self.params.ToText(), self._train_dir,
'trainer_params.txt')
def _GetSession(self, **kwargs):
return super()._GetSession(cluster_def=self._cluster_def, **kwargs)
def _OutfeedEnqueue(self, per_example_tensors):
if not per_example_tensors:
return tf.no_op()
per_example_tensors = py_utils.NestedMap(per_example_tensors)
return tpu_ops.outfeed_enqueue_tuple(per_example_tensors.Flatten())
def _OutfeedDequeueLoop(self, per_example_tensors, num_loops, num_devices):
"""Process all per-example tensor outfeed data for a TPU sess.run.
Args:
per_example_tensors: dict of key -> tensor as generated by TpuTrainStep.
num_loops: number of times that TpuTrainStep will be executed by TpuTrain.
num_devices: number of TPU cores assigned to this process.
Returns:
A dict of per-example tensors from the latest TpuTrainStep.
"""
if not per_example_tensors:
return tf.no_op()
tensor_shapes = [
py_utils.GetShape(per_example_tensors[key])
for key in sorted(per_example_tensors)
]
tensor_types = [
tf.as_dtype(per_example_tensors[key].dtype)
for key in sorted(per_example_tensors)
]
def LoopBody(i, *input_arrays):
"""Process outfeed data for a single TpuTrainStep.
Args:
i: current loop index.
*input_arrays: One tf.TensorArray per outfeed tensor.
Returns:
i+1 (new index) plus post-write tf.TensorArray handles.
"""
# Outfeed ops execute on each JF node, so they must be located on the
# nodes.
outfeed_devices = []
device_assignment = py_utils.GetTpuDeviceAssignment()
assert device_assignment
for replica in range(device_assignment.num_replicas):
for core in range(device_assignment.num_cores_per_replica):
with tf.device(device_assignment.host_device(replica, core)):
outfeed_devices.append(
tpu_ops.outfeed_dequeue_tuple(
tensor_types,
tensor_shapes,
device_ordinal=device_assignment.tpu_ordinal(replica,
core)))
offset = i * num_devices
output_arrays = list(input_arrays)
# Each output_array holds a different per-example tensor. We get results
# for each tensor from each TPU for each TpuTrainStep call.
for j in range(len(output_arrays)):
for k in range(len(outfeed_devices)):
output_arrays[j] = output_arrays[j].write(offset + k,
outfeed_devices[k][j])
return tuple([i + 1] + output_arrays)
def LoopCond(i, *output_arrays):
del output_arrays
return i < num_loops
output_arrays = [
tf.TensorArray(
tensor_types[i],
size=num_loops * num_devices,
element_shape=tensor_shapes[i]) for i in range(len(tensor_shapes))
]
# Loop once for each time that TpuTrainStep runs.
output_arrays = tf.while_loop(
LoopCond, LoopBody, [0] + output_arrays, parallel_iterations=1)[1:]
concatenated_arrays = [array.concat() for array in output_arrays]
return dict(zip(sorted(per_example_tensors), concatenated_arrays))
def _CleanUp(self):
# If there's an exception, we want _LoopEnqueue to wait until
# everything is initialized before starting up.
self._initialized.clear()
def Start(self):
# Run training.
self._RunLoop('trainer', self._Loop, cleanup_func=self._CleanUp)
def _InfeedLoop(self, sess):
tf.logging.info('_InfeedLoop start')
for _ in range(self._steps_per_loop):
sess.run(self.enqueue_ops)
def StartEnqueueOp(self, op):
# When retrieve ops for TPU embedding is present, we use _InfeedLoop above
# instead to make sure enqueue and retrieve does not happen at the same
# time as required by TPU embedding.
# We can remove this by using a tf.while_loop driven infeed op.
if self._retrieve_ops:
return
self._RunLoop(
'trainer/enqueue_op/%s' % op.name, self._LoopEnqueue, loop_args=[op])
def _SummarizeValue(self, steps, tag, value):
self._summary_writer.add_summary(
metrics.CreateScalarSummary(tag, value), steps)
def _LoopEnqueue(self, op):
# Evaler/Controller jobs may find that the trial is infeasible and report
# done earlier. This is an important check since the trainer may retry
# indefinitely without it.
if self._trial.ShouldStop():
tf.logging.info('Training skipped (trial requested to stop).')
return
# Wait for _Loop to initialize variables first before attempting to infeed.
tf.logging.info('_LoopEnqueue waiting for _initialized...')
self._initialized.wait()
tf.logging.info('_LoopEnqueue proceeding.')
# The global step may not be initialized in this thread if the target server
# uses session state isolation (e.g. Cloud TPUs).
sess = self._GetSession()
if FLAGS.checkpoint_in_trainer_tpu:
self.checkpointer.RestoreGlobalStepIfNeeded(sess)
return super()._LoopEnqueue(op, sess)
def _Loop(self):
# Evaler/Controller jobs may find that the trial is infeasible and report
# done earlier. This is an important check since the trainer may retry
# indefinitely without it.
if self._trial.ShouldStop():
tf.logging.info('Training skipped (trial requested to stop).')
self._DequeueThreadComplete()
return
with tf.container(
self._container_id), self._cluster, self._GetSession() as sess:
config_proto = (
self._tpu_embedding.config_proto
if self._tpu_embedding is not None else None)
sess.run(
tf.tpu.initialize_system(embedding_config=config_proto, job=None))
sess.run(self._initialize_tables)
sess.run(self._initialize_local_vars)
sess.run(self._init_input_ops)
if FLAGS.run_locally == 'tpu':
sess.run(self._initialize_global_vars)
self._SetStatusMessage('Compiling ...')
compilation_result = sess.run(self._compile_op)
comp_result_proto = tpu_compilation_result.CompilationResultProto()
comp_result_proto.ParseFromString(compilation_result)
if comp_result_proto.status_error_message:
tf.logging.fatal('Compilation failed: {}'.format(
comp_result_proto.status_error_message))
self._SetStatusMessage('Compiling done.')
if FLAGS.checkpoint_in_trainer_tpu:
# For b/134415393 -- better to initialize to a known state than
# rely on what's in the session on the trainer/TPU worker.
tf.logging.info('TrainerTpu: Force restore or initialize.')
self.checkpointer.Restore(sess, force_reinitialize=True)
global_step = sess.run(self._model.global_step)
self._initialized.set()
eval_metrics = None
sess.run(self._load_ops)
while True:
train_steps_start = time.perf_counter()
if FLAGS.checkpoint_in_trainer_tpu:
# Init/restore variable if needed.
self.checkpointer.RestoreIfNeeded(sess)
if self._trial.ShouldStopAndMaybeReport(global_step, eval_metrics):
# Early terminate gracefully by setting a new max step horizon: three
# more TPU steps to ensure that the enqueue ops can gracefully
# terminate as well.
if self._max_steps is None:
self._max_steps = global_step + 3 * self._steps_per_loop
tf.logging.info('Early stopping at step: %d', self._max_steps)
if self._ShouldStop(sess, global_step):
tf.logging.info('Training finished.')
if FLAGS.checkpoint_in_trainer_tpu:
self.checkpointer.Save(sess, global_step)
self._DequeueThreadComplete()
return
if self._retrieve_ops:
infeed_loop_thread = threading.Thread(
target=self._InfeedLoop, args=(sess,))
infeed_loop_thread.start()
tpu_train_op_start = time.perf_counter()
values, outfeeds = sess.run(self._tpu_train_ops)
tpu_train_op_secs = time.perf_counter() - tpu_train_op_start
if self._retrieve_ops:
infeed_loop_thread.join()
tf.logging.info('Retrieve params.')
sess.run(self._retrieve_ops)
tf.logging.info('Retrieve params done.')
self._eval_metrics.PackMetricsValues(values)
eval_metrics = self._eval_metrics.metrics
# Note: global_step is incremented by self._steps_per_loop by the
# previous sess.run call.
task_global_step = sess.run(self._task.global_step)
global_step = sess.run(self._model.global_step)
if not self._task.per_example_tensors:
outfeeds = {}
self._task.ProcessFPropResults(sess, task_global_step, eval_metrics,
outfeeds)
self._model.ProcessFPropResults(sess, global_step, eval_metrics,
outfeeds)
step_rate, example_rate, total_examples = (
self._step_rate_tracker.ComputeStepRate(
global_step,
eval_metrics['num_samples_in_batch'][0] * self._steps_per_loop))
self._SummarizeValue(global_step, 'global_step/sec', step_rate)
self._SummarizeValue(global_step, 'examples/sec', example_rate)
self._SummarizeValue(global_step, 'total_samples', total_examples)
if FLAGS.checkpoint_in_trainer_tpu:
self._SummarizeValue(global_step, 'total_num_params',
self._total_num_params)
msg = 'step:%6d, steps/sec: %0.2f, examples/sec: %0.2f' % (
global_step, step_rate, example_rate)
for key, (val, _) in sorted(eval_metrics.items()):
msg += ' %s:%.8g' % (key, val)
self._SummarizeValue(global_step, key, val)
self._SetStatusMessage(msg)
checkpoint_write_secs = 0.0
if FLAGS.checkpoint_in_trainer_tpu:
checkpoint_write_start = time.perf_counter()
checkpoint_saved = self.checkpointer.MaybeSave(
sess, self._model.global_step)
if checkpoint_saved:
checkpoint_write_secs = time.perf_counter() - checkpoint_write_start
train_steps_secs = time.perf_counter() - train_steps_start
self._ExportMetrics(
# Metrics expects python int, but global_step is numpy.int64.
global_step=int(global_step),
step_rate=step_rate,
example_rate=example_rate,
tpu_train_op_secs=tpu_train_op_secs,
checkpoint_write_secs=checkpoint_write_secs,
total_train_steps_secs=train_steps_secs)
class Evaler(base_runner.BaseRunner):
"""Evaler."""
def __init__(self, eval_type, *args, **kwargs):
super().__init__(*args, **kwargs)
self._job_name = 'evaler_' + eval_type
self._output_name = 'eval_' + eval_type
self.params.cluster.do_eval = True
self._cluster = cluster_factory.Cluster(self.params.cluster)
self._eval_dir = os.path.join(self._logdir, self._output_name)
if self._model_task_name:
self._eval_dir += '_' + str(self._model_task_name)
tf.io.gfile.makedirs(self._eval_dir)
self._eval_path = None
# Multitask params doesn't have 'task'.
if 'task' in self.params:
self._eval_path = checkpointer.GetSpecificCheckpoint(
self.params.task.eval.load_checkpoint_from)
self._summary_writer = self._CreateSummaryWriter(self._eval_dir)
self._should_report_metrics = self._job_name.startswith(
self.params.reporting_job)
with self._graph.as_default(), tf.container(self._container_id):
with self._cluster, tf.device(self._cluster.GetPlacer()):
self._model = self.params.Instantiate()
self._params = self._model.params
self._model.ConstructFPropGraph()
self._task = self._model.GetTask(self._model_task_name)
self._init_input_ops = self._task.input.InitOps()
self._summary_op = tf.summary.merge_all()
self._initialize_tables = tf.tables_initializer()
self._initialize_local_vars = tf.local_variables_initializer()
# No queues are allowed for eval models.
self.enqueue_ops = tf.get_collection(py_utils.ENQUEUE_OPS)
assert not self.enqueue_ops
self.checkpointer = self._CreateCheckpointer(self._train_dir, self._model)
# Saves the graph def.
self._WriteToLog(self.params.ToText(), self._eval_dir, 'params.txt')
if self.params.cluster.task == 0:
tf.io.write_graph(self._graph.as_graph_def(), self._eval_dir,
'%s.pbtxt' % self._output_name)
def _CreateCheckpointer(self, train_dir, model):
"""Wrapper method for override purposes."""
return checkpointer.Checkpointer(train_dir, model)
def Start(self):
self._RunLoop(self._job_name, self._Loop)
def _Loop(self):
"""The main loop."""
with tf.container(
self._container_id), self._cluster, self._GetSession() as sess:
# This initializes local tables
sess.run(self._initialize_tables)
# This initializes local variables.
sess.run(self._initialize_local_vars)
# This initializes input generator ops
sess.run(self._init_input_ops)
if self._eval_path:
self._EvalOnce(self._eval_path, sess)
else:
path = None
while True:
path = self._FindNewCheckpoint(path, sess)
if not path or self._EvalOnce(path, sess):
break
# Maybe evaluate the last checkpoint if we are not given a specific
# checkpoint to evaluate.
if self._eval_path is None:
self.EvalLatestCheckpoint(path)
if self._should_report_metrics:
tf.logging.info('Reporting trial done.')
self._trial.ReportDone()
tf.logging.info('Evaluation finished.')
def EvalLatestCheckpoint(self, last_path=None):
"""Runs eval once on the latest checkpoint."""
with tf.container(
self._container_id), self._cluster, self._GetSession() as sess:
# This initializes local tables
sess.run(self._initialize_tables)
# This initializes local variables.
sess.run(self._initialize_local_vars)
# This initializes input generator ops
sess.run(self._init_input_ops)
path = tf.train.latest_checkpoint(self._train_dir)
if not path:
tf.logging.info('No checkpoint available.')
return
elif path == last_path:
tf.logging.info('Latest checkpoint was already evaluated.')
return
self._EvalOnce(path, sess)
def EvalCheckpoint(self, ckpt_id):
with tf.container(self._container_id), self._GetSession() as sess:
# This initializes local tables
sess.run(self._initialize_tables)
# This initializes local variables.
sess.run(self._initialize_local_vars)
# This initializes input generator ops
sess.run(self._init_input_ops)
path = '{}/ckpt-{:08d}'.format(self._train_dir, ckpt_id)
self._EvalOnce(path, sess)
def _RemoveScalarSummaries(self, summaries):
proto = summary_pb2.Summary()
proto.ParseFromString(summaries)
for i, value in enumerate(proto.value):
if value.HasField('simple_value'):
del proto.value[i]
return proto.SerializeToString()
def _EvalOnce(self, path, sess):
"""Runs evaluation for a batch of samples.
Args:
path: checkpoint path.
sess: the tf Session.
Returns:
should_stop.
"""
if not FLAGS.evaler_in_same_address_as_controller:
self.checkpointer.RestoreFromPath(sess, path)
global_step = sess.run(py_utils.GetGlobalStep())
# Check after how many steps checkpoint got saved.
# And decide whether to run an evaluation.
if global_step < self._task.params.eval.start_eval_after:
return False
if self._task.params.input.resettable:
tf.logging.info('Resetting input_generator.')
self._task.input_generator.Reset(sess)
metrics_dict = {
name: metrics.AverageMetric() for name in self._task.eval_metrics
}
num_samples_metric = metrics_dict['num_samples_in_batch']
samples_per_summary = self._task.params.eval.samples_per_summary
if samples_per_summary == 0:
assert self._task.params.input.resettable
while samples_per_summary == 0 or (num_samples_metric.total_value <
samples_per_summary):
try:
# NOTE: We intentionally do not let FProp generate scalar summaries by
# default, because evaler calls FProp multiple times for each
# checkpoint. Multiple summaries at the same step is often confusing.
# Instead, models should update eval_metrics and generate aggregate
# summaries. Other types of summaries (images, audio etc.) will be
# generated for the first eval batch.
if num_samples_metric.total_value == 0 and self._summary_op is not None:
ans, summaries = sess.run([self._task.eval_metrics, self._summary_op])
summaries = self._RemoveScalarSummaries(summaries)
# Add non-scalar summaries only for the first batch of data.
self._summary_writer.add_summary(summaries, global_step)
else:
ans = sess.run(self._task.eval_metrics)
for name, (value, weight) in ans.items():
metrics_dict[name].Update(value, weight)
tf.logging.info('Total examples done: %d/%d',
num_samples_metric.total_value, samples_per_summary)
except tf.errors.OutOfRangeError:
if not self._task.params.input.resettable:
raise
break
# Replace average values with total values for certain metrics.
if 'num_predictions' in metrics_dict:
metrics_dict['num_predictions'].total_weight = 1.0
if 'num_words' in metrics_dict:
metrics_dict['num_words'].total_weight = 1.0
summaries = {k: v.Summary(k) for k, v in metrics_dict.items()}
summaries['total_samples'] = metrics.CreateScalarSummary(
'total_samples', num_samples_metric.total_value)
# When we have evaluated so many samples, generate a summary.
self._WriteSummaries(
self._summary_writer,
os.path.basename(self._eval_dir),
global_step,
summaries,
text_filename=os.path.join(self._eval_dir,
'score-{:08d}.txt'.format(global_step)))
should_stop = global_step >= self.params.train.max_steps
if self._should_report_metrics:
tf.logging.info('Reporting eval measure for step %d.' % global_step)
trial_should_stop = self._trial.ReportEvalMeasure(global_step,
metrics_dict, path)
should_stop = should_stop or trial_should_stop
return should_stop
Decoder = trainer_impl.Decoder
GetDecoderDir = trainer_impl.GetDecoderDir
def _GetClusterSpecDict():
"""Parses the cluster_spec flag and returns a dict."""
job_specs = FLAGS.cluster_spec.split('@')
cluster_spec_dict = {}
for job_spec in job_specs:
# ps_host=worker1:1231,worker2:1234
job_machines = job_spec.split('=')
if len(job_machines) != 2:
raise ValueError(f'Invalid job specification: {job_spec}')
cluster_spec_dict[job_machines[0]] = job_machines[1].split(',')
return cluster_spec_dict
class RunnerManager:
"""Helper class for managing runners."""
# This is a hack so these classes can be overridded with internal
# non-public implementations.
# pylint: disable=invalid-name
inference_graph_exporter = inference_graph_exporter
model_registry = model_registry
Controller = Controller
Trainer = Trainer
TrainerTpu = TrainerTpu
Evaler = Evaler
Decoder = Decoder
ExecutorTpu = executor.ExecutorTpu
# pylint: enable=invalid-name
def __init__(self, model):
self._model_name = model
def MaybeLaunchTensorFlow(self):
"""Starts TF machinary in this process."""
if FLAGS.run_locally or FLAGS.tpu:
return
tf.logging.info('Launching tensorflow.')
target = FLAGS.tf_master
if not target.startswith('localhost'):
# E.g., trainer_client is configured w/ FLAGS.tf_master pointing to
# another job. In that case, start a local server.
cluster_spec_dict = _GetClusterSpecDict()
self._tf_server = tf.distribute.Server(
tf.train.ClusterSpec(cluster_spec_dict),
job_name=FLAGS.job,
task_index=FLAGS.task)
target = self._tf_server.target
if not FLAGS.tf_master:
FLAGS.tf_master = target
with tf.Session(target).as_default():
value = (tf.constant(1.) + tf.constant(1.)).eval()
assert value == 2.0, 'Something is really wrong.'
tf.logging.info('Launched tensorflow.')
def GetExecutorParams(self):
"""Get the params needed to instantiate the ExecutorTpu.
Returns:
Tuple (dict, params):
- ps_params_dict: high_level task_name -> ProgramScheduleParams
- train_cfg: Either a SingleTaskModelParams or MultiTaskModelParams.
"""
cluster = cluster_factory.Current()
self.UpdateClusterParamsFromFlags(cluster.params, 'executor_tpu')
ps_params_dict, train_cfg = executor.GetExecutorParams(
self._model_name, cluster.params, self.model_registry)
return ps_params_dict, train_cfg
def GetParamsForDataset(self, job_name, dataset_name):
"""Returns params for job `job_name` on the dataset `dataset_name`."""
# Get the current cluster and update its params from flags.
cluster = cluster_factory.Current()
self.UpdateClusterParamsFromFlags(cluster.params, job_name)
with cluster_factory.Cluster(cluster.params):
try:
cfg = self.model_registry.GetParams(self._model_name, dataset_name)
except base_model_params.DatasetError as e:
dataset_name_retry = dataset_name.title()
tf.logging.warning(
'Exception configuring dataset %s, retrying as %s: %s',
dataset_name, dataset_name_retry, e)
cfg = self.model_registry.GetParams(self._model_name,
dataset_name_retry)
tf.logging.warning('Succeeded after retrying as %s.' %
dataset_name_retry)
cfg.cluster = cluster.params
# Updates a few params based on flags.
if FLAGS.enqueue_max_steps is not None:
cfg.train.enqueue_max_steps = FLAGS.enqueue_max_steps
if FLAGS.saver_max_to_keep is not None:
cfg.train.save_max_to_keep = FLAGS.saver_max_to_keep
if FLAGS.saver_keep_checkpoint_every_n_hours is not None:
cfg.train.save_keep_checkpoint_every_n_hours = FLAGS.saver_keep_checkpoint_every_n_hours
cfg.reporting_job = FLAGS.vizier_reporting_job
return cfg
def MaybeConfigRunDistributed(self):
"""If given a `FLAGS.cluster_spec`, update flags for running distributed."""
if not FLAGS.cluster_spec:
return
job_specs = FLAGS.cluster_spec.split('@')
cluster_spec_dict = _GetClusterSpecDict()
if FLAGS.job == 'trainer_client':
FLAGS.tf_master = 'grpc://%s' % cluster_spec_dict['worker'][FLAGS.task]
for job in cluster_spec_dict:
if job.startswith('decoder_'):
assert len(job_specs) == 1, 'Decoder jobs must run on their own'
assert ',' not in job_specs[0], 'Only single machine supported'
FLAGS.decoder_job = '/job:%s' % job
FLAGS.decoder_replicas = 1
if job.startswith('evaler_'):
assert len(job_specs) == 1, 'Evaler jobs must run on their own'
assert ',' not in job_specs[0], 'Only single machine supported'
FLAGS.evaler_job = '/job:%s' % job
FLAGS.evaler_replicas = 1
if FLAGS.mode == 'sync' and FLAGS.job in ('controller', 'trainer_client',
'worker', 'executor_tpu'):
FLAGS.worker_job = '/job:worker'
FLAGS.worker_replicas = len(cluster_spec_dict['worker'])
FLAGS.ps_job = '/job:worker'
FLAGS.ps_replicas = FLAGS.worker_replicas
if FLAGS.mode == 'async' and FLAGS.job in ('controller', 'trainer', 'ps'):
FLAGS.worker_job = '/job:trainer'
FLAGS.worker_replicas = len(cluster_spec_dict['trainer'])
FLAGS.ps_job = '/job:ps'
FLAGS.ps_replicas = len(cluster_spec_dict['ps'])
def MaybeConfigCloudTpu(self):
"""If given `FLAGS.tpu`, update flags for running on a Cloud TPU."""
if not FLAGS.tpu:
return
if not FLAGS.job:
FLAGS.job = 'trainer_client'
if FLAGS.job not in ('trainer_client', 'executor_tpu'):
raise ValueError('Only trainer_client and executor_tpu jobs are '
'supported on TPU.')
cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
tpu=FLAGS.tpu,
project=FLAGS.gcp_project,
zone=FLAGS.tpu_zone,
job_name=FLAGS.job)
cluster_spec_dict = cluster_resolver.cluster_spec().as_dict()
FLAGS.mode = 'sync'
FLAGS.tf_master = cluster_resolver.master()
FLAGS.worker_job = '/job:{}'.format(FLAGS.job)
FLAGS.worker_replicas = 1
FLAGS.worker_num_tpu_hosts = len(cluster_spec_dict[FLAGS.job])
FLAGS.worker_tpus = (
cluster_resolver.num_accelerators()['TPU'] * FLAGS.worker_num_tpu_hosts)
FLAGS.ps_job = FLAGS.worker_job
if FLAGS.job == 'trainer_client':
FLAGS.ps_replicas = FLAGS.worker_replicas
FLAGS.cluster_spec = ('@'.join('{}={}'.format(job, ','.join(hosts))
for job, hosts in cluster_spec_dict.items()))
FLAGS.xla_device = 'tpu'
FLAGS.enable_asserts = False
FLAGS.checkpoint_in_trainer_tpu = True
def UpdateClusterParamsFromFlags(self, cluster, job_name):
"""Update `cluster` with a training cluster configuration from flags."""
cluster.mode = FLAGS.mode
cluster.job = job_name
cluster.task = FLAGS.task
cluster.do_eval = job_name in ['evaler', 'decoder']
cluster.logdir = FLAGS.logdir
cluster.controller.name = FLAGS.controller_job
cluster.controller.gpus_per_replica = FLAGS.controller_gpus
cluster.worker.name = FLAGS.worker_job
cluster.worker.replicas = FLAGS.worker_replicas
cluster.worker.gpus_per_replica = FLAGS.worker_gpus
cluster.worker.tpus_per_replica = FLAGS.worker_tpus
cluster.worker.num_tpu_hosts = FLAGS.worker_num_tpu_hosts
cluster.worker.devices_per_split = FLAGS.worker_split_size
if FLAGS.additional_worker_jobs:
for additional_job in FLAGS.additional_worker_jobs:
cluster.worker.additional_worker_names.append(additional_job)
if FLAGS.tpu:
job_name = cluster.worker.name.replace('/job:', '', 1)
worker_hosts = _GetClusterSpecDict()[job_name]
if FLAGS.additional_worker_jobs:
for additional_job in cluster.worker.additional_worker_names:
additional_job_name = additional_job.replace('/job:', '', 1)
worker_hosts.extend(_GetClusterSpecDict()[additional_job_name])
cluster.worker.targets = ','.join(
'grpc://{}'.format(host) for host in worker_hosts)
cluster.ps.name = FLAGS.ps_job
cluster.ps.replicas = FLAGS.ps_replicas
cluster.ps.gpus_per_replica = FLAGS.ps_gpus
cluster.input.name = FLAGS.input_job
cluster.input.replicas = FLAGS.input_replicas
cluster.input.targets = FLAGS.input_targets
cluster.evaler.name = FLAGS.evaler_job
cluster.evaler.replicas = FLAGS.evaler_replicas
cluster.evaler.gpus_per_replica = FLAGS.evaler_gpus
cluster.decoder.name = FLAGS.decoder_job
cluster.decoder.replicas = FLAGS.decoder_replicas
cluster.decoder.gpus_per_replica = FLAGS.decoder_gpus
cluster.add_summary = FLAGS.add_summary
def _CreateRunner(self, job, model_task_name, logdir, tf_master, trial):
"""Create a runner."""
evaler_job_name_prefix = 'evaler_'
decoder_job_name_prefix = 'decoder_'
tf.logging.info('Job %s start', job)
common_args = (model_task_name, logdir, tf_master, trial)
if job == 'controller':
cfg = self.GetParamsForDataset('controller', 'Train')
return self.Controller(cfg, *common_args)
elif job == 'trainer':
cfg = self.GetParamsForDataset('trainer', 'Train')
return self.Trainer(cfg, *common_args)
elif job == 'trainer_client':
cfg = self.GetParamsForDataset('trainer_client', 'Train')
if py_utils.use_tpu():
return self.TrainerTpu(cfg, *common_args)
else:
return self.Trainer(cfg, *common_args)
elif job.startswith(evaler_job_name_prefix):
dataset_name = job[len(evaler_job_name_prefix):]
cfg = self.GetParamsForDataset('evaler', dataset_name)
return self.Evaler(dataset_name.lower(), cfg, *common_args)
elif job.startswith(decoder_job_name_prefix):
dataset_name = job[len(decoder_job_name_prefix):]
cfg = self.GetParamsForDataset('decoder', dataset_name)
return self.Decoder(dataset_name.lower(), cfg, *common_args)
elif job in ('ps', 'worker', 'input'):
self._tf_server.join()
elif job == 'executor_tpu':
ps_cfg_dict, train_cfg = self.GetExecutorParams()
return self.ExecutorTpu(train_cfg, ps_cfg_dict, model_task_name, logdir,
tf_master)
else:
raise ValueError('job %s is not supported' % job)
def CreateRunners(self, jobs, logdir, trial=base_trial.NoOpTrial()):
"""Creates a list of runners based on `FLAGS.mode`.
Args:
jobs: a list of runner jobs.
logdir: the directory used for logging, usually on CNS.
trial: optional `Trial` object, used for reporting measures and early
stopping.
Returns:
A list of `.BaseRunner`, one per job in `jobs`.
"""
runners = []
for j in jobs:
tf_master = FLAGS.tf_master
# Ensure that decoder or evaler threads do not clobber variables being
# updated by trainer by forcing them to use independent sessions.
if ('trainer' in jobs and
(j.startswith('decoder') or j.startswith('evaler'))):
tf_master = ''
runner = self._CreateRunner(j, FLAGS.model_task_name, logdir, tf_master,
trial)
runners.append(runner)
return runners
def StartRunners(self, runners):
"""Runs `runners` in parallel threads.
Returns when all of them finish.
Args:
runners: a list of `.BaseRunner`.
Returns:
None.
"""
threads = []
tf.logging.info('Starting runners')
for runner in runners:
runner_class_name = str(runner)
t = threading.Thread(target=runner.Start, name=runner_class_name)
t.daemon = True
t.start()
threads.append(t)
if runner.enqueue_ops:
tf.logging.info('Total num runner.enqueue_ops: %d',
len(runner.enqueue_ops))
for i, enqueue_op in enumerate(runner.enqueue_ops):
def StartEnqueue(runner, op):
tf.logging.info('Starting enqueue op %s', op.name)
return lambda: runner.StartEnqueueOp(op)
enqueue_name = '%s-enqueue-%d' % (runner_class_name, i)
tq = threading.Thread(
target=StartEnqueue(runner, enqueue_op), name=enqueue_name)
tq.start()
threads.append(tq)
tf.logging.info('Waiting for runners to finish...')
for t in threads:
tf.logging.info('Waiting for thread to finish: %s' % t.name)
while True:
t.join(1)
if not t.isAlive():
break
tf.logging.info('All runners done.')
def RunTrial(self, job, logdir, trial):
"""A wrapper function for running a trial."""
# Run each job in separate process/task
# TODO(rpang): add support for running evaler_test and decoder.
self.StartRunners(self.CreateRunners([job], logdir, trial))
def MaybeConfigRunLocally(self):
"""Update flags if configured to run locally."""
if not FLAGS.run_locally:
# Do nothing
return
FLAGS.tf_master = tf.distribute.Server.create_local_server().target
if not FLAGS.mode:
FLAGS.mode = 'sync'
if not FLAGS.job:
if FLAGS.run_locally == 'tpu':
FLAGS.job = 'trainer_client'
elif FLAGS.mode == 'async':
FLAGS.job = 'controller,trainer'
else:
FLAGS.job = 'controller,trainer_client'
FLAGS.task = 0
local_job = '/job:localhost'
FLAGS.controller_job = local_job
FLAGS.worker_job = local_job
FLAGS.worker_replicas = 1
if FLAGS.run_locally == 'gpu':
if not FLAGS.worker_gpus:
FLAGS.worker_gpus = 1
else:
FLAGS.worker_gpus = 0
if FLAGS.run_locally == 'tpu':
FLAGS.xla_device = 'tpu'
FLAGS.enable_asserts = False
else:
FLAGS.worker_tpus = 0
if not FLAGS.worker_split_size:
FLAGS.worker_split_size = 1
FLAGS.ps_job = local_job
FLAGS.ps_replicas = 1
FLAGS.ps_gpus = 0
FLAGS.input_job = local_job
FLAGS.input_replicas = 0
FLAGS.evaler_job = local_job
FLAGS.evaler_replicas = 1
if FLAGS.run_locally == 'gpu':
FLAGS.evaler_gpus = 1
else:
FLAGS.evaler_gpus = 0
FLAGS.decoder_job = local_job
FLAGS.decoder_replicas = 1
if FLAGS.run_locally == 'gpu':
FLAGS.decoder_gpus = 1
else:
FLAGS.decoder_gpus = 0
def InspectModel(self):
"""Prints out model analysis for the model."""
FLAGS.mode = 'sync'
p = self.GetParamsForDataset('controller', 'Train')
c = cluster_factory.Cluster(p.cluster)
with tf.Graph().as_default(), c, tf.device(c.GetPlacer()):
analysis, _ = summary_utils.ModelAnalysis(p.Instantiate())
print(analysis)
def InspectDatasets(self):
"""Prints out datasets configured for the model."""
cls = self.model_registry.GetClass(self._model_name)
print(','.join([dataset.lower() for dataset in datasets.GetDatasets(cls)]))
def InspectDecoder(self):
"""Prints out datasets configured for the decoder."""
cls = self.model_registry.GetClass(self._model_name)
params = cls()
has_decoder = False
if issubclass(cls, base_model_params.SingleTaskModelParams):
has_decoder = params.Task(
).cls.CreateDecoderMetrics != base_model.BaseTask.CreateDecoderMetrics
else:
for _, task_param in params.Model().task_params.IterParams():
has_decoder |= (
task_param.cls.CreateDecoderMetrics !=
base_model.BaseTask.CreateDecoderMetrics)
if has_decoder:
# We assume that the proper decoder is implemented.
self.InspectDatasets()
else:
print('')
def SetModelName(self, model_name):
"""Sets the model name."""
self._model_name = model_name
def WriteInferenceGraph(self):
"""Generates the inference graphs for a given model."""
inference_graph_dir = os.path.join(FLAGS.logdir, 'inference_graphs')
tf.io.gfile.makedirs(inference_graph_dir)
tf.logging.info('Writing inference graphs to dir: %s', inference_graph_dir)
cfg = self.model_registry.GetParams(self._model_name, 'Test')
task_names = [FLAGS.model_task_name]
if (issubclass(cfg.cls, base_model.MultiTaskModel) and
not FLAGS.model_task_name):
task_names = base_model.MultiTaskModel.TaskNames(cfg)
if FLAGS.inference_graph_filename:
# Custom inference graph.
for task_name in task_names:
filename_prefix = FLAGS.inference_graph_filename
if task_name:
filename_prefix = '%s_inference' % task_name
filename_prefix = os.path.join(inference_graph_dir, filename_prefix)
device = ''
var_options = None
if FLAGS.inference_graph_device == 'tpu':
device = 'tpu'
var_options = 'ON_DEVICE'
device_options = inference_graph_exporter.InferenceDeviceOptions(
device=device,
retain_device_placement=False,
var_options=var_options,
gen_init_op=True,
dtype_override=None,
fprop_dtype_override=None)
self.inference_graph_exporter.InferenceGraphExporter.Export(
model_cfg=cfg,
model_task_name=task_name,
device_options=device_options,
export_path=filename_prefix + '.pbtxt',
random_seed=FLAGS.inference_graph_random_seed)
else:
for task_name in task_names:
filename_prefix = 'inference'
if task_name:
filename_prefix = '%s_inference' % task_name
filename_prefix = os.path.join(inference_graph_dir, filename_prefix)
# Standard inference graph.
try:
self.inference_graph_exporter.InferenceGraphExporter.Export(
model_cfg=cfg,
model_task_name=task_name,
export_path=filename_prefix + '.pbtxt',
random_seed=FLAGS.inference_graph_random_seed)
except NotImplementedError as e:
tf.logging.error('Cannot write inference graph: %s', e)
# TPU inference graph. Not all models support it so fail silently.
try:
device_options = self.inference_graph_exporter.InferenceDeviceOptions(
device='tpu',
retain_device_placement=False,
var_options='ON_DEVICE',
gen_init_op=True,
dtype_override=None,
fprop_dtype_override=None)
self.inference_graph_exporter.InferenceGraphExporter.Export(
model_cfg=cfg,
model_task_name=task_name,
device_options=device_options,
export_path=filename_prefix + '_tpu.pbtxt',
random_seed=FLAGS.inference_graph_random_seed)
except Exception as e: # pylint: disable=broad-except
tf.logging.error('Error exporting TPU inference graph: %s' % e)
def RunEvalerOnce(self):
"""Run once evaler."""
m = re.match(r'evaler_once_([^_@]+)@(\d+)', FLAGS.job)
dataset_name, ckpt_id = m.group(1), int(m.group(2))
cfg = self.GetParamsForDataset('evaler', dataset_name)
evaler = self.Evaler(dataset_name.lower(), cfg, FLAGS.model_task_name,
FLAGS.logdir, FLAGS.tf_master)
evaler.EvalCheckpoint(ckpt_id)
def Start(self):
"""Start the process."""
tf.logging.set_verbosity(tf.logging.INFO)
tf.logging.info('tf_api_version: %s', tf.summarize_tf2_status())
if FLAGS.mode == 'inspect_model':
self.InspectModel()
return
if FLAGS.mode == 'inspect_evaler':
self.InspectDatasets()
return
if FLAGS.mode == 'inspect_decoder':
self.InspectDecoder()
return
if FLAGS.mode == 'write_inference_graph':
self.WriteInferenceGraph()
return
if FLAGS.mode == 'shell':
_StartShell(locals())
return
assert FLAGS.mode in ['sync', 'async']
self.MaybeConfigRunLocally()
self.MaybeConfigRunDistributed()
self.MaybeConfigCloudTpu()
self.MaybeLaunchTensorFlow()
if FLAGS.job.startswith('evaler_once_'):
# E.g., trainer --model=foo.bar.Model --logdir=...
# --run_locally=cpu --mode=sync --job=evaler_once_test@65200
self.RunEvalerOnce()
return
self.StartRunners(self.CreateRunners(FLAGS.job.split(','), FLAGS.logdir))
def main(unused_argv):
RunnerManager(FLAGS.model).Start()
if __name__ == '__main__':
tf.disable_eager_execution()
tf.flags.mark_flag_as_required('model')
FLAGS(sys.argv, known_only=True)
model_imports.ImportParams(FLAGS.model)
FLAGS.unparse_flags()
tf.app.run(main)
|
pretrained.py | # Copyright 2017-2021 John Snow Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains classes for the :class:`PretrainedPipeline` and downloading Pretrained Models.
"""
import sparknlp.internal as _internal
import threading
import time
from pyspark.sql import DataFrame
from sparknlp.annotator import *
from sparknlp.base import LightPipeline
from pyspark.ml import PipelineModel
from py4j.protocol import Py4JJavaError
def printProgress(stop):
states = [' | ', ' / ', ' — ', ' \\ ']
nextc = 0
while True:
sys.stdout.write('\r[{}]'.format(states[nextc]))
sys.stdout.flush()
time.sleep(2.5)
nextc = nextc + 1 if nextc < 3 else 0
if stop():
sys.stdout.write('\r[{}]'.format('OK!'))
sys.stdout.flush()
break
sys.stdout.write('\n')
return
class ResourceDownloader(object):
@staticmethod
def downloadModel(reader, name, language, remote_loc=None, j_dwn='PythonResourceDownloader'):
print(name + " download started this may take some time.")
file_size = _internal._GetResourceSize(name, language, remote_loc).apply()
if file_size == "-1":
print("Can not find the model to download please check the name!")
else:
print("Approximate size to download " + file_size)
stop_threads = False
t1 = threading.Thread(target=printProgress, args=(lambda: stop_threads,))
t1.start()
try:
j_obj = _internal._DownloadModel(reader.name, name, language, remote_loc, j_dwn).apply()
except Py4JJavaError as e:
sys.stdout.write("\n" + str(e))
raise e
finally:
stop_threads = True
t1.join()
return reader(classname=None, java_model=j_obj)
@staticmethod
def downloadPipeline(name, language, remote_loc=None):
print(name + " download started this may take some time.")
file_size = _internal._GetResourceSize(name, language, remote_loc).apply()
if file_size == "-1":
print("Can not find the model to download please check the name!")
else:
print("Approx size to download " + file_size)
stop_threads = False
t1 = threading.Thread(target=printProgress, args=(lambda: stop_threads,))
t1.start()
try:
j_obj = _internal._DownloadPipeline(name, language, remote_loc).apply()
jmodel = PipelineModel._from_java(j_obj)
finally:
stop_threads = True
t1.join()
return jmodel
@staticmethod
def clearCache(name, language, remote_loc=None):
_internal._ClearCache(name, language, remote_loc).apply()
@staticmethod
def showPublicModels():
_internal._ShowPublicModels().apply()
@staticmethod
def showPublicPipelines():
_internal._ShowPublicPipelines().apply()
@staticmethod
def showUnCategorizedResources():
_internal._ShowUnCategorizedResources().apply()
class PretrainedPipeline:
"""Loads a Represents a fully constructed and trained Spark NLP pipeline,
ready to be used.
This way, a whole pipeline can be defined in 1 line. Additionally, the
:class:`.LightPipeline` version of the model can be retrieved with member
:attr:`.light_model`.
For more extended examples see the `Pipelines page
<https://nlp.johnsnowlabs.com/docs/en/pipelines>`_ and our `Github Model
Repository <https://github.com/JohnSnowLabs/spark-nlp-models>`_ for
available pipeline models.
Parameters
----------
name : str
Name of the PretrainedPipeline. These can be gathered from the Pipelines
Page.
lang : str, optional
Langauge of the model, by default 'en'
remote_loc : str, optional
Link to the remote location of the model (if it was already downloaded),
by default None
parse_embeddings : bool, optional
Whether to parse embeddings, by default False
disk_location : str , optional
Path to locally stored PretrainedPipeline, by default None
"""
def __init__(self, name, lang='en', remote_loc=None, parse_embeddings=False, disk_location=None):
if not disk_location:
self.model = ResourceDownloader().downloadPipeline(name, lang, remote_loc)
else:
self.model = PipelineModel.load(disk_location)
self.light_model = LightPipeline(self.model, parse_embeddings)
@staticmethod
def from_disk(path, parse_embeddings=False):
return PretrainedPipeline(None, None, None, parse_embeddings, path)
def annotate(self, target, column=None):
"""Annotates the data provided, extracting the results.
The data should be either a list or a str.
Parameters
----------
target : list or str
The data to be annotated
Returns
-------
List[dict] or dict
The result of the annotation
Examples
--------
>>> from sparknlp.pretrained import PretrainedPipeline
>>> explain_document_pipeline = PretrainedPipeline("explain_document_dl")
>>> result = explain_document_pipeline.annotate('U.N. official Ekeus heads for Baghdad.')
>>> result.keys()
dict_keys(['entities', 'stem', 'checked', 'lemma', 'document', 'pos', 'token', 'ner', 'embeddings', 'sentence'])
>>> result["ner"]
['B-ORG', 'O', 'O', 'B-PER', 'O', 'O', 'B-LOC', 'O']
"""
if type(target) is DataFrame:
if not column:
raise Exception("annotate() column arg needed when targeting a DataFrame")
return self.model.transform(target.withColumnRenamed(column, "text"))
elif type(target) is list or type(target) is str:
pipeline = self.light_model
return pipeline.annotate(target)
else:
raise Exception("target must be either a spark DataFrame, a list of strings or a string")
def fullAnnotate(self, target, column=None):
"""Annotates the data provided into `Annotation` type results.
The data should be either a list or a str.
Parameters
----------
target : list or str
The data to be annotated
Returns
-------
List[Annotation]
The result of the annotation
Examples
--------
>>> from sparknlp.pretrained import PretrainedPipeline
>>> explain_document_pipeline = PretrainedPipeline("explain_document_dl")
>>> result = explain_document_pipeline.fullAnnotate('U.N. official Ekeus heads for Baghdad.')
>>> result[0].keys()
dict_keys(['entities', 'stem', 'checked', 'lemma', 'document', 'pos', 'token', 'ner', 'embeddings', 'sentence'])
>>> result[0]["ner"]
[Annotation(named_entity, 0, 2, B-ORG, {'word': 'U.N'}),
Annotation(named_entity, 3, 3, O, {'word': '.'}),
Annotation(named_entity, 5, 12, O, {'word': 'official'}),
Annotation(named_entity, 14, 18, B-PER, {'word': 'Ekeus'}),
Annotation(named_entity, 20, 24, O, {'word': 'heads'}),
Annotation(named_entity, 26, 28, O, {'word': 'for'}),
Annotation(named_entity, 30, 36, B-LOC, {'word': 'Baghdad'}),
Annotation(named_entity, 37, 37, O, {'word': '.'})]
"""
if type(target) is DataFrame:
if not column:
raise Exception("annotate() column arg needed when targeting a DataFrame")
return self.model.transform(target.withColumnRenamed(column, "text"))
elif type(target) is list or type(target) is str:
pipeline = self.light_model
return pipeline.fullAnnotate(target)
else:
raise Exception("target must be either a spark DataFrame, a list of strings or a string")
def transform(self, data):
"""Transforms the input dataset with Spark.
Parameters
----------
data : :class:`pyspark.sql.DataFrame`
input dataset
Returns
-------
:class:`pyspark.sql.DataFrame`
transformed dataset
"""
return self.model.transform(data)
|
test_add_image_progress.py | '''
New Integration test for add image progress.
@author: quarkonics
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.volume_operations as vol_ops
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.zstack_test.zstack_test_image as zstack_image_header
import apibinding.inventory as inventory
import threading
import os
import time
_config_ = {
'timeout' : 1800,
'noparallel' : True
}
new_image = None
def add_image(bs_uuid):
global new_image
image_option = test_util.ImageOption()
image_option.set_name('test_add_image_progress')
image_option.set_format('qcow2')
image_option.set_mediaType('RootVolumeTemplate')
image_option.set_url(os.environ.get('imageUrl_net'))
image_option.set_backup_storage_uuid_list([bs_uuid])
new_image = zstack_image_header.ZstackTestImage()
new_image.set_creation_option(image_option)
new_image.add_root_volume_template()
def test():
global new_image
bs_cond = res_ops.gen_query_conditions("status", '=', "Connected")
bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, \
None, fields=['uuid'])
if not bss:
test_util.test_skip("not find available backup storage. Skip test")
if bss[0].type != inventory.CEPH_BACKUP_STORAGE_TYPE:
if hasattr(inventory, 'IMAGE_STORE_BACKUP_STORAGE_TYPE') and bss[0].type != inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE:
test_util.test_skip("not find available imagestore or ceph backup storage. Skip test")
thread = threading.Thread(target=add_image, args=(bss[0].uuid, ))
thread.start()
time.sleep(5)
image_cond = res_ops.gen_query_conditions("status", '=', "Downloading")
image = res_ops.query_resource_fields(res_ops.IMAGE, image_cond, \
None, fields=['uuid'])
progress = res_ops.get_task_progress(image[0].uuid)
if int(progress.progress) < 0 or int(progress.progress) > 100:
test_util.test_fail("Progress of task should be between 0 and 100, while it actually is %s" % (progress.progress))
thread.join()
new_image.delete()
if test_lib.lib_get_image_delete_policy() != 'Direct':
new_image.expunge()
test_util.test_pass('Add image Progress Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
global new_image
if new_image:
new_image.delete()
if test_lib.lib_get_image_delete_policy() != 'Direct':
new_image.expunge()
|
message_server.py | # Copyright 2017 Google Inc. All rights reserved.
# Use of this source code is governed by the Apache 2.0 license that can be
# found in the LICENSE file.
import Queue
import logging
import threading
import time
import tornado.ioloop
import tornado.web
MESSAGE_SERVER = None
BLANK_PAGE = """<html>
<head>
<title>Blank</title>
<style type="text/css">body {background-color: #FFF;}</style>
</head>
<body>
</body>
</html>"""
ORANGE_PAGE = """<html>
<head>
<title>Orange</title>
<style>
body {background-color: white; margin: 0;}
#wptorange {width:100%; height: 100%; background-color: #DE640D;}
</style>
</head>
<body><div id='wptorange'></div></body>
</html>"""
class TornadoRequestHandler(tornado.web.RequestHandler):
"""Request handler for when we are using tornado"""
def get(self):
"""Handle GET requests"""
import ujson as json
response = None
content_type = 'text/plain'
if self.request.uri == '/ping':
response = 'pong'
elif self.request.uri == '/blank.html':
content_type = 'text/html'
response = BLANK_PAGE
elif self.request.uri == '/orange.html':
content_type = 'text/html'
response = ORANGE_PAGE
elif self.request.uri == '/config':
# JSON config data
content_type = 'application/json'
response = '{}'
if MESSAGE_SERVER.config is not None:
response = json.dumps(MESSAGE_SERVER.config)
elif self.request.uri == '/config.html':
# Orange HTML page that can be queried from the extension for config data
content_type = 'text/html'
response = "<html><head>\n"
response += "<style>\n"
response += "body {background-color: white; margin: 0;}\n"
response += "#wptorange {width:100%; height: 100%; background-color: #DE640D;}\n"
response += "</style>\n"
response += "</head><body><div id='wptorange'></div>\n"
if MESSAGE_SERVER.config is not None:
import cgi
response += '<div id="wptagentConfig" style="display: none;">'
response += cgi.escape(json.dumps(MESSAGE_SERVER.config))
response += '</div>'
response += "</body></html>"
if response is not None:
self.set_status(200)
self.set_header("Content-Type", content_type)
self.set_header("Referrer-Policy", "no-referrer")
self.write(response)
def post(self):
"""Handle POST messages"""
import ujson as json
try:
messages = self.request.body
if messages is not None and len(messages):
if self.request.uri == '/log':
logging.debug(messages)
else:
for line in messages.splitlines():
line = line.strip()
if len(line):
message = json.loads(line)
if 'body' not in message and self.request.uri != '/etw':
message['body'] = None
MESSAGE_SERVER.handle_message(message)
except Exception:
pass
self.set_status(200)
class MessageServer(object):
"""Local HTTP server for interacting with the extension"""
def __init__(self):
global MESSAGE_SERVER
MESSAGE_SERVER = self
self.thread = None
self.messages = Queue.Queue()
self.config = None
self.__is_started = threading.Event()
def get_message(self, timeout):
"""Get a single message from the queue"""
message = self.messages.get(block=True, timeout=timeout)
self.messages.task_done()
return message
def flush_messages(self):
"""Flush all of the pending messages"""
try:
while True:
self.messages.get_nowait()
self.messages.task_done()
except Exception:
pass
def handle_message(self, message):
"""Add a received message to the queue"""
self.messages.put(message)
def start(self):
"""Start running the server in a background thread"""
self.__is_started.clear()
self.thread = threading.Thread(target=self.run)
self.thread.daemon = True
self.thread.start()
self.__is_started.wait(timeout=30)
def stop(self):
"""Stop running the server"""
logging.debug("Shutting down extension server")
self.must_exit = True
if self.thread is not None:
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.add_callback(ioloop.stop)
self.thread.join()
self.thread = None
logging.debug("Extension server stopped")
def is_ok(self):
"""Check that the server is responding and restart it if necessary"""
import requests
import monotonic
end_time = monotonic.monotonic() + 30
server_ok = False
proxies = {"http": None, "https": None}
while not server_ok and monotonic.monotonic() < end_time:
try:
response = requests.get('http://127.0.0.1:8888/ping', timeout=10, proxies=proxies)
if response.text == 'pong':
server_ok = True
except Exception:
pass
if not server_ok:
time.sleep(5)
return server_ok
def run(self):
"""Main server loop"""
logging.debug('Starting extension server on port 8888')
application = tornado.web.Application([(r"/.*", TornadoRequestHandler)])
application.listen(8888, '127.0.0.1')
self.__is_started.set()
tornado.ioloop.IOLoop.instance().start()
|
wait_for_tests.py | #pylint: disable=import-error
from six.moves import queue
import os, time, threading, socket, signal, shutil, glob
#pylint: disable=import-error
from distutils.spawn import find_executable
import logging
import xml.etree.ElementTree as xmlet
import CIME.utils
from CIME.utils import expect, Timeout, run_cmd_no_fail, safe_copy, CIMEError
from CIME.XML.machines import Machines
from CIME.test_status import *
from CIME.provenance import save_test_success
from CIME.case.case import Case
SIGNAL_RECEIVED = False
E3SM_MAIN_CDASH = "E3SM"
CDASH_DEFAULT_BUILD_GROUP = "ACME_Latest"
SLEEP_INTERVAL_SEC = .1
###############################################################################
def signal_handler(*_):
###############################################################################
global SIGNAL_RECEIVED
SIGNAL_RECEIVED = True
###############################################################################
def set_up_signal_handlers():
###############################################################################
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
###############################################################################
def get_test_time(test_path):
###############################################################################
ts = TestStatus(test_dir=test_path)
comment = ts.get_comment(RUN_PHASE)
if comment is None or "time=" not in comment:
logging.warning("No run-phase time data found in {}".format(test_path))
return 0
else:
time_data = [token for token in comment.split() if token.startswith("time=")][0]
return int(time_data.split("=")[1])
###############################################################################
def get_test_output(test_path):
###############################################################################
output_file = os.path.join(test_path, "TestStatus.log")
if (os.path.exists(output_file)):
return open(output_file, 'r').read()
else:
logging.warning("File '{}' not found".format(output_file))
return ""
###############################################################################
def create_cdash_xml_boiler(phase, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, git_commit):
###############################################################################
site_elem = xmlet.Element("Site")
if ("JENKINS_START_TIME" in os.environ):
time_info_str = "Total testing time: {:d} seconds".format(int(current_time) - int(os.environ["JENKINS_START_TIME"]))
else:
time_info_str = ""
site_elem.attrib["BuildName"] = cdash_build_name
site_elem.attrib["BuildStamp"] = "{}-{}".format(utc_time, cdash_build_group)
site_elem.attrib["Name"] = hostname
site_elem.attrib["OSName"] = "Linux"
site_elem.attrib["Hostname"] = hostname
site_elem.attrib["OSVersion"] = "Commit: {}{}".format(git_commit, time_info_str)
phase_elem = xmlet.SubElement(site_elem, phase)
xmlet.SubElement(phase_elem, "StartDateTime").text = time.ctime(current_time)
xmlet.SubElement(phase_elem, "Start{}Time".format("Test" if phase == "Testing" else phase)).text = str(int(current_time))
return site_elem, phase_elem
###############################################################################
def create_cdash_config_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit):
###############################################################################
site_elem, config_elem = create_cdash_xml_boiler("Configure", cdash_build_name, cdash_build_group, utc_time, current_time, hostname, git_commit)
xmlet.SubElement(config_elem, "ConfigureCommand").text = "namelists"
config_results = []
for test_name in sorted(results):
test_status = results[test_name][1]
config_results.append("{} {} Config {}".format("" if test_status != NAMELIST_FAIL_STATUS else "CMake Warning:\n", test_name, "PASS" if test_status != NAMELIST_FAIL_STATUS else "NML DIFF"))
xmlet.SubElement(config_elem, "Log").text = "\n".join(config_results)
xmlet.SubElement(config_elem, "ConfigureStatus").text = "0"
xmlet.SubElement(config_elem, "ElapsedMinutes").text = "0" # Skip for now
etree = xmlet.ElementTree(site_elem)
etree.write(os.path.join(data_rel_path, "Configure.xml"))
###############################################################################
def create_cdash_build_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit):
###############################################################################
site_elem, build_elem = create_cdash_xml_boiler("Build", cdash_build_name, cdash_build_group, utc_time, current_time, hostname, git_commit)
xmlet.SubElement(build_elem, "ConfigureCommand").text = "case.build"
build_results = []
for test_name in sorted(results):
build_results.append(test_name)
xmlet.SubElement(build_elem, "Log").text = "\n".join(build_results)
for idx, test_name in enumerate(sorted(results)):
test_path = results[test_name][0]
test_norm_path = test_path if os.path.isdir(test_path) else os.path.dirname(test_path)
if get_test_time(test_norm_path) == 0:
error_elem = xmlet.SubElement(build_elem, "Error")
xmlet.SubElement(error_elem, "Text").text = test_name
xmlet.SubElement(error_elem, "BuildLogLine").text = str(idx)
xmlet.SubElement(error_elem, "PreContext").text = test_name
xmlet.SubElement(error_elem, "PostContext").text = ""
xmlet.SubElement(error_elem, "RepeatCount").text = "0"
xmlet.SubElement(build_elem, "ElapsedMinutes").text = "0" # Skip for now
etree = xmlet.ElementTree(site_elem)
etree.write(os.path.join(data_rel_path, "Build.xml"))
###############################################################################
def create_cdash_test_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit):
###############################################################################
site_elem, testing_elem = create_cdash_xml_boiler("Testing", cdash_build_name, cdash_build_group, utc_time, current_time, hostname, git_commit)
test_list_elem = xmlet.SubElement(testing_elem, "TestList")
for test_name in sorted(results):
xmlet.SubElement(test_list_elem, "Test").text = test_name
for test_name in sorted(results):
test_path, test_status = results[test_name]
test_passed = test_status in [TEST_PASS_STATUS, NAMELIST_FAIL_STATUS]
test_norm_path = test_path if os.path.isdir(test_path) else os.path.dirname(test_path)
full_test_elem = xmlet.SubElement(testing_elem, "Test")
if test_passed:
full_test_elem.attrib["Status"] = "passed"
elif (test_status == TEST_PEND_STATUS):
full_test_elem.attrib["Status"] = "notrun"
else:
full_test_elem.attrib["Status"] = "failed"
xmlet.SubElement(full_test_elem, "Name").text = test_name
xmlet.SubElement(full_test_elem, "Path").text = test_norm_path
xmlet.SubElement(full_test_elem, "FullName").text = test_name
xmlet.SubElement(full_test_elem, "FullCommandLine")
# text ?
results_elem = xmlet.SubElement(full_test_elem, "Results")
named_measurements = (
("text/string", "Exit Code", test_status),
("text/string", "Exit Value", "0" if test_passed else "1"),
("numeric_double", "Execution Time", str(get_test_time(test_norm_path))),
("text/string", "Completion Status", "Not Completed" if test_status == TEST_PEND_STATUS else "Completed"),
("text/string", "Command line", "create_test")
)
for type_attr, name_attr, value in named_measurements:
named_measurement_elem = xmlet.SubElement(results_elem, "NamedMeasurement")
named_measurement_elem.attrib["type"] = type_attr
named_measurement_elem.attrib["name"] = name_attr
xmlet.SubElement(named_measurement_elem, "Value").text = value
measurement_elem = xmlet.SubElement(results_elem, "Measurement")
value_elem = xmlet.SubElement(measurement_elem, "Value")
value_elem.text = ''.join([item for item in get_test_output(test_norm_path) if ord(item) < 128])
xmlet.SubElement(testing_elem, "ElapsedMinutes").text = "0" # Skip for now
etree = xmlet.ElementTree(site_elem)
etree.write(os.path.join(data_rel_path, "Test.xml"))
###############################################################################
def create_cdash_xml_fakes(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname):
###############################################################################
# We assume all cases were created from the same code repo
first_result_case = os.path.dirname(list(results.items())[0][1][0])
try:
srcroot = run_cmd_no_fail("./xmlquery --value CIMEROOT", from_dir=first_result_case)
except CIMEError:
# Use repo containing this script as last resort
srcroot = CIME.utils.get_cime_root()
git_commit = CIME.utils.get_current_commit(repo=srcroot)
data_rel_path = os.path.join("Testing", utc_time)
create_cdash_config_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit)
create_cdash_build_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit)
create_cdash_test_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit)
###############################################################################
def create_cdash_upload_xml(results, cdash_build_name, cdash_build_group, utc_time, hostname, force_log_upload):
###############################################################################
data_rel_path = os.path.join("Testing", utc_time)
try:
log_dir = "{}_logs".format(cdash_build_name)
need_to_upload = False
for test_name, test_data in results.items():
test_path, test_status = test_data
if test_status not in [TEST_PASS_STATUS, NAMELIST_FAIL_STATUS] or force_log_upload:
test_case_dir = os.path.dirname(test_path)
ts = TestStatus(test_case_dir)
build_status = ts.get_status(SHAREDLIB_BUILD_PHASE)
build_status = TEST_FAIL_STATUS if build_status == TEST_FAIL_STATUS else ts.get_status(MODEL_BUILD_PHASE)
run_status = ts.get_status(RUN_PHASE)
baseline_status = ts.get_status(BASELINE_PHASE)
if build_status == TEST_FAIL_STATUS or run_status == TEST_FAIL_STATUS or baseline_status == TEST_FAIL_STATUS or force_log_upload:
case_dirs = [test_case_dir]
case_base = os.path.basename(test_case_dir)
test_case2_dir = os.path.join(test_case_dir, "case2", case_base)
if os.path.exists(test_case2_dir):
case_dirs.append(test_case2_dir)
for case_dir in case_dirs:
param = "EXEROOT" if build_status == TEST_FAIL_STATUS else "RUNDIR"
log_src_dir = run_cmd_no_fail("./xmlquery {} --value".format(param), from_dir=case_dir)
log_dst_dir = os.path.join(log_dir, "{}{}_{}_logs".format(test_name, "" if case_dir == test_case_dir else ".case2", param))
os.makedirs(log_dst_dir)
for log_file in glob.glob(os.path.join(log_src_dir, "*log*")):
safe_copy(log_file, log_dst_dir)
for log_file in glob.glob(os.path.join(log_src_dir, "*.cprnc.out*")):
safe_copy(log_file, log_dst_dir)
need_to_upload = True
if (need_to_upload):
tarball = "{}.tar.gz".format(log_dir)
if (os.path.exists(tarball)):
os.remove(tarball)
run_cmd_no_fail("tar -cf - {} | gzip -c".format(log_dir), arg_stdout=tarball)
base64 = run_cmd_no_fail("base64 {}".format(tarball))
xml_text = \
r"""<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="Dart/Source/Server/XSL/Build.xsl <file:///Dart/Source/Server/XSL/Build.xsl> "?>
<Site BuildName="{}" BuildStamp="{}-{}" Name="{}" Generator="ctest3.0.0">
<Upload>
<File filename="{}">
<Content encoding="base64">
{}
</Content>
</File>
</Upload>
</Site>
""".format(cdash_build_name, utc_time, cdash_build_group, hostname, os.path.abspath(tarball), base64)
with open(os.path.join(data_rel_path, "Upload.xml"), "w") as fd:
fd.write(xml_text)
finally:
if (os.path.isdir(log_dir)):
shutil.rmtree(log_dir)
###############################################################################
def create_cdash_xml(results, cdash_build_name, cdash_project, cdash_build_group, force_log_upload=False):
###############################################################################
#
# Create dart config file
#
current_time = time.time()
utc_time_tuple = time.gmtime(current_time)
cdash_timestamp = time.strftime("%H:%M:%S", utc_time_tuple)
hostname = Machines().get_machine_name()
if (hostname is None):
hostname = socket.gethostname().split(".")[0]
logging.warning("Could not convert hostname '{}' into an E3SM machine name".format(hostname))
dart_config = \
"""
SourceDirectory: {0}
BuildDirectory: {0}
# Site is something like machine.domain, i.e. pragmatic.crd
Site: {1}
# Build name is osname-revision-compiler, i.e. Linux-2.4.2-2smp-c++
BuildName: {2}
# Submission information
IsCDash: TRUE
CDashVersion:
QueryCDashVersion:
DropSite: my.cdash.org
DropLocation: /submit.php?project={3}
DropSiteUser:
DropSitePassword:
DropSiteMode:
DropMethod: http
TriggerSite:
ScpCommand: {4}
# Dashboard start time
NightlyStartTime: {5} UTC
""".format(os.getcwd(), hostname, cdash_build_name, cdash_project,
find_executable("scp"), cdash_timestamp)
with open("DartConfiguration.tcl", "w") as dart_fd:
dart_fd.write(dart_config)
utc_time = time.strftime('%Y%m%d-%H%M', utc_time_tuple)
os.makedirs(os.path.join("Testing", utc_time))
# Make tag file
with open("Testing/TAG", "w") as tag_fd:
tag_fd.write("{}\n{}\n".format(utc_time, cdash_build_group))
create_cdash_xml_fakes(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname)
create_cdash_upload_xml(results, cdash_build_name, cdash_build_group, utc_time, hostname, force_log_upload)
run_cmd_no_fail("ctest -VV -D NightlySubmit", verbose=True)
###############################################################################
def wait_for_test(test_path, results, wait, check_throughput, check_memory, ignore_namelists, ignore_memleak, no_run):
###############################################################################
if (os.path.isdir(test_path)):
test_status_filepath = os.path.join(test_path, TEST_STATUS_FILENAME)
else:
test_status_filepath = test_path
logging.debug("Watching file: '{}'".format(test_status_filepath))
test_log_path = os.path.join(os.path.dirname(test_status_filepath), ".internal_test_status.log")
# We don't want to make it a requirement that wait_for_tests has write access
# to all case directories
try:
fd = open(test_log_path, "w")
fd.close()
except (IOError, OSError):
test_log_path = "/dev/null"
prior_ts = None
with open(test_log_path, "w") as log_fd:
while (True):
if (os.path.exists(test_status_filepath)):
ts = TestStatus(test_dir=os.path.dirname(test_status_filepath))
test_name = ts.get_name()
test_status = ts.get_overall_test_status(wait_for_run=not no_run, # Important
no_run=no_run,
check_throughput=check_throughput,
check_memory=check_memory, ignore_namelists=ignore_namelists,
ignore_memleak=ignore_memleak)
if prior_ts is not None and prior_ts != ts:
log_fd.write(ts.phase_statuses_dump())
log_fd.write("OVERALL: {}\n\n".format(test_status))
prior_ts = ts
if (test_status == TEST_PEND_STATUS and (wait and not SIGNAL_RECEIVED)):
time.sleep(SLEEP_INTERVAL_SEC)
logging.debug("Waiting for test to finish")
else:
results.put( (test_name, test_path, test_status) )
break
else:
if (wait and not SIGNAL_RECEIVED):
logging.debug("File '{}' does not yet exist".format(test_status_filepath))
time.sleep(SLEEP_INTERVAL_SEC)
else:
test_name = os.path.abspath(test_status_filepath).split("/")[-2]
results.put( (test_name, test_path, "File '{}' doesn't exist".format(test_status_filepath)) )
break
###############################################################################
def wait_for_tests_impl(test_paths, no_wait=False, check_throughput=False, check_memory=False, ignore_namelists=False, ignore_memleak=False, no_run=False):
###############################################################################
results = queue.Queue()
for test_path in test_paths:
t = threading.Thread(target=wait_for_test, args=(test_path, results, not no_wait, check_throughput, check_memory, ignore_namelists, ignore_memleak, no_run))
t.daemon = True
t.start()
while threading.active_count() > 1:
time.sleep(1)
test_results = {}
completed_test_paths = []
while (not results.empty()):
test_name, test_path, test_status = results.get()
if (test_name in test_results):
prior_path, prior_status = test_results[test_name]
if (test_status == prior_status):
logging.warning("Test name '{}' was found in both '{}' and '{}'".format(test_name, test_path, prior_path))
else:
raise CIMEError("Test name '{}' was found in both '{}' and '{}' with different results".format(test_name, test_path, prior_path))
test_results[test_name] = (test_path, test_status)
completed_test_paths.append(test_path)
expect(set(test_paths) == set(completed_test_paths),
"Missing results for test paths: {}".format(set(test_paths) - set(completed_test_paths)))
return test_results
###############################################################################
def wait_for_tests(test_paths,
no_wait=False,
check_throughput=False,
check_memory=False,
ignore_namelists=False,
ignore_memleak=False,
cdash_build_name=None,
cdash_project=E3SM_MAIN_CDASH,
cdash_build_group=CDASH_DEFAULT_BUILD_GROUP,
timeout=None,
force_log_upload=False,
no_run=False,
update_success=False):
###############################################################################
# Set up signal handling, we want to print results before the program
# is terminated
set_up_signal_handlers()
with Timeout(timeout, action=signal_handler):
test_results = wait_for_tests_impl(test_paths, no_wait, check_throughput, check_memory, ignore_namelists, ignore_memleak, no_run)
all_pass = True
for test_name, test_data in sorted(test_results.items()):
test_path, test_status = test_data
logging.info("Test '{}' finished with status '{}'".format(test_name, test_status))
logging.info(" Path: {}".format(test_path))
all_pass &= test_status == TEST_PASS_STATUS
if update_success:
caseroot = os.path.dirname(test_data[0])
with Case(caseroot, read_only=True) as case:
srcroot = case.get_value("CIMEROOT")
baseline_root = case.get_value("BASELINE_ROOT")
save_test_success(baseline_root, srcroot, test_name, test_status in [TEST_PASS_STATUS, NAMELIST_FAIL_STATUS])
if cdash_build_name:
create_cdash_xml(test_results, cdash_build_name, cdash_project, cdash_build_group, force_log_upload)
return all_pass
|
test_html.py | from functools import partial
from importlib import reload
from io import BytesIO, StringIO
import os
import re
import threading
from urllib.error import URLError
import numpy as np
from numpy.random import rand
import pytest
from pandas.compat import is_platform_windows
from pandas.errors import ParserError
import pandas.util._test_decorators as td
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, read_csv
import pandas.util.testing as tm
from pandas.io.common import file_path_to_url
import pandas.io.html
from pandas.io.html import read_html
HERE = os.path.dirname(__file__)
@pytest.fixture(
params=[
"chinese_utf-16.html",
"chinese_utf-32.html",
"chinese_utf-8.html",
"letz_latin1.html",
]
)
def html_encoding_file(request, datapath):
"""Parametrized fixture for HTML encoding test filenames."""
return datapath("io", "data", "html_encoding", request.param)
def assert_framelist_equal(list1, list2, *args, **kwargs):
assert len(list1) == len(list2), (
"lists are not of equal size "
"len(list1) == {0}, "
"len(list2) == {1}".format(len(list1), len(list2))
)
msg = "not all list elements are DataFrames"
both_frames = all(
map(
lambda x, y: isinstance(x, DataFrame) and isinstance(y, DataFrame),
list1,
list2,
)
)
assert both_frames, msg
for frame_i, frame_j in zip(list1, list2):
tm.assert_frame_equal(frame_i, frame_j, *args, **kwargs)
assert not frame_i.empty, "frames are both empty"
@td.skip_if_no("bs4")
def test_bs4_version_fails(monkeypatch, datapath):
import bs4
monkeypatch.setattr(bs4, "__version__", "4.2")
with pytest.raises(ImportError, match="Pandas requires version"):
read_html(datapath("io", "data", "html", "spam.html"), flavor="bs4")
def test_invalid_flavor():
url = "google.com"
flavor = "invalid flavor"
msg = r"\{" + flavor + r"\} is not a valid set of flavors"
with pytest.raises(ValueError, match=msg):
read_html(url, "google", flavor=flavor)
@td.skip_if_no("bs4")
@td.skip_if_no("lxml")
def test_same_ordering(datapath):
filename = datapath("io", "data", "html", "valid_markup.html")
dfs_lxml = read_html(filename, index_col=0, flavor=["lxml"])
dfs_bs4 = read_html(filename, index_col=0, flavor=["bs4"])
assert_framelist_equal(dfs_lxml, dfs_bs4)
@pytest.mark.parametrize(
"flavor",
[
pytest.param("bs4", marks=td.skip_if_no("lxml")),
pytest.param("lxml", marks=td.skip_if_no("lxml")),
],
scope="class",
)
class TestReadHtml:
@pytest.fixture(autouse=True)
def set_files(self, datapath):
self.spam_data = datapath("io", "data", "html", "spam.html")
self.spam_data_kwargs = {}
self.spam_data_kwargs["encoding"] = "UTF-8"
self.banklist_data = datapath("io", "data", "html", "banklist.html")
@pytest.fixture(autouse=True, scope="function")
def set_defaults(self, flavor, request):
self.read_html = partial(read_html, flavor=flavor)
yield
def test_to_html_compat(self):
df = (
tm.makeCustomDataframe(
4,
3,
data_gen_f=lambda *args: rand(),
c_idx_names=False,
r_idx_names=False,
)
.applymap("{0:.3f}".format)
.astype(float)
)
out = df.to_html()
res = self.read_html(out, attrs={"class": "dataframe"}, index_col=0)[0]
tm.assert_frame_equal(res, df)
@tm.network
def test_banklist_url(self):
url = "http://www.fdic.gov/bank/individual/failed/banklist.html"
df1 = self.read_html(
url, "First Federal Bank of Florida", attrs={"id": "table"}
)
df2 = self.read_html(url, "Metcalf Bank", attrs={"id": "table"})
assert_framelist_equal(df1, df2)
@tm.network
def test_spam_url(self):
url = (
"https://raw.githubusercontent.com/pandas-dev/pandas/master/"
"pandas/tests/io/data/html/spam.html"
)
df1 = self.read_html(url, ".*Water.*")
df2 = self.read_html(url, "Unit")
assert_framelist_equal(df1, df2)
@pytest.mark.slow
def test_banklist(self):
df1 = self.read_html(self.banklist_data, ".*Florida.*", attrs={"id": "table"})
df2 = self.read_html(self.banklist_data, "Metcalf Bank", attrs={"id": "table"})
assert_framelist_equal(df1, df2)
def test_spam(self):
df1 = self.read_html(self.spam_data, ".*Water.*")
df2 = self.read_html(self.spam_data, "Unit")
assert_framelist_equal(df1, df2)
assert df1[0].iloc[0, 0] == "Proximates"
assert df1[0].columns[0] == "Nutrient"
def test_spam_no_match(self):
dfs = self.read_html(self.spam_data)
for df in dfs:
assert isinstance(df, DataFrame)
def test_banklist_no_match(self):
dfs = self.read_html(self.banklist_data, attrs={"id": "table"})
for df in dfs:
assert isinstance(df, DataFrame)
def test_spam_header(self):
df = self.read_html(self.spam_data, ".*Water.*", header=2)[0]
assert df.columns[0] == "Proximates"
assert not df.empty
def test_skiprows_int(self):
df1 = self.read_html(self.spam_data, ".*Water.*", skiprows=1)
df2 = self.read_html(self.spam_data, "Unit", skiprows=1)
assert_framelist_equal(df1, df2)
def test_skiprows_xrange(self):
df1 = self.read_html(self.spam_data, ".*Water.*", skiprows=range(2))[0]
df2 = self.read_html(self.spam_data, "Unit", skiprows=range(2))[0]
tm.assert_frame_equal(df1, df2)
def test_skiprows_list(self):
df1 = self.read_html(self.spam_data, ".*Water.*", skiprows=[1, 2])
df2 = self.read_html(self.spam_data, "Unit", skiprows=[2, 1])
assert_framelist_equal(df1, df2)
def test_skiprows_set(self):
df1 = self.read_html(self.spam_data, ".*Water.*", skiprows={1, 2})
df2 = self.read_html(self.spam_data, "Unit", skiprows={2, 1})
assert_framelist_equal(df1, df2)
def test_skiprows_slice(self):
df1 = self.read_html(self.spam_data, ".*Water.*", skiprows=1)
df2 = self.read_html(self.spam_data, "Unit", skiprows=1)
assert_framelist_equal(df1, df2)
def test_skiprows_slice_short(self):
df1 = self.read_html(self.spam_data, ".*Water.*", skiprows=slice(2))
df2 = self.read_html(self.spam_data, "Unit", skiprows=slice(2))
assert_framelist_equal(df1, df2)
def test_skiprows_slice_long(self):
df1 = self.read_html(self.spam_data, ".*Water.*", skiprows=slice(2, 5))
df2 = self.read_html(self.spam_data, "Unit", skiprows=slice(4, 1, -1))
assert_framelist_equal(df1, df2)
def test_skiprows_ndarray(self):
df1 = self.read_html(self.spam_data, ".*Water.*", skiprows=np.arange(2))
df2 = self.read_html(self.spam_data, "Unit", skiprows=np.arange(2))
assert_framelist_equal(df1, df2)
def test_skiprows_invalid(self):
with pytest.raises(TypeError, match=("is not a valid type for skipping rows")):
self.read_html(self.spam_data, ".*Water.*", skiprows="asdf")
def test_index(self):
df1 = self.read_html(self.spam_data, ".*Water.*", index_col=0)
df2 = self.read_html(self.spam_data, "Unit", index_col=0)
assert_framelist_equal(df1, df2)
def test_header_and_index_no_types(self):
df1 = self.read_html(self.spam_data, ".*Water.*", header=1, index_col=0)
df2 = self.read_html(self.spam_data, "Unit", header=1, index_col=0)
assert_framelist_equal(df1, df2)
def test_header_and_index_with_types(self):
df1 = self.read_html(self.spam_data, ".*Water.*", header=1, index_col=0)
df2 = self.read_html(self.spam_data, "Unit", header=1, index_col=0)
assert_framelist_equal(df1, df2)
def test_infer_types(self):
# 10892 infer_types removed
df1 = self.read_html(self.spam_data, ".*Water.*", index_col=0)
df2 = self.read_html(self.spam_data, "Unit", index_col=0)
assert_framelist_equal(df1, df2)
def test_string_io(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
data1 = StringIO(f.read())
with open(self.spam_data, **self.spam_data_kwargs) as f:
data2 = StringIO(f.read())
df1 = self.read_html(data1, ".*Water.*")
df2 = self.read_html(data2, "Unit")
assert_framelist_equal(df1, df2)
def test_string(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
data = f.read()
df1 = self.read_html(data, ".*Water.*")
df2 = self.read_html(data, "Unit")
assert_framelist_equal(df1, df2)
def test_file_like(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
df1 = self.read_html(f, ".*Water.*")
with open(self.spam_data, **self.spam_data_kwargs) as f:
df2 = self.read_html(f, "Unit")
assert_framelist_equal(df1, df2)
@tm.network
def test_bad_url_protocol(self):
with pytest.raises(URLError):
self.read_html("git://github.com", match=".*Water.*")
@tm.network
@pytest.mark.slow
def test_invalid_url(self):
try:
with pytest.raises(URLError):
self.read_html("http://www.a23950sdfa908sd.com", match=".*Water.*")
except ValueError as e:
assert "No tables found" in str(e)
@pytest.mark.slow
def test_file_url(self):
url = self.banklist_data
dfs = self.read_html(
file_path_to_url(os.path.abspath(url)), "First", attrs={"id": "table"}
)
assert isinstance(dfs, list)
for df in dfs:
assert isinstance(df, DataFrame)
@pytest.mark.slow
def test_invalid_table_attrs(self):
url = self.banklist_data
with pytest.raises(ValueError, match="No tables found"):
self.read_html(
url, "First Federal Bank of Florida", attrs={"id": "tasdfable"}
)
def _bank_data(self, *args, **kwargs):
return self.read_html(
self.banklist_data, "Metcalf", attrs={"id": "table"}, *args, **kwargs
)
@pytest.mark.slow
def test_multiindex_header(self):
df = self._bank_data(header=[0, 1])[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_index(self):
df = self._bank_data(index_col=[0, 1])[0]
assert isinstance(df.index, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_index(self):
df = self._bank_data(header=[0, 1], index_col=[0, 1])[0]
assert isinstance(df.columns, MultiIndex)
assert isinstance(df.index, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_skiprows_tuples(self):
df = self._bank_data(header=[0, 1], skiprows=1)[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_skiprows(self):
df = self._bank_data(header=[0, 1], skiprows=1)[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_index_skiprows(self):
df = self._bank_data(header=[0, 1], index_col=[0, 1], skiprows=1)[0]
assert isinstance(df.index, MultiIndex)
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_regex_idempotency(self):
url = self.banklist_data
dfs = self.read_html(
file_path_to_url(os.path.abspath(url)),
match=re.compile(re.compile("Florida")),
attrs={"id": "table"},
)
assert isinstance(dfs, list)
for df in dfs:
assert isinstance(df, DataFrame)
def test_negative_skiprows(self):
msg = r"\(you passed a negative value\)"
with pytest.raises(ValueError, match=msg):
self.read_html(self.spam_data, "Water", skiprows=-1)
@tm.network
def test_multiple_matches(self):
url = "https://docs.python.org/2/"
dfs = self.read_html(url, match="Python")
assert len(dfs) > 1
@tm.network
def test_python_docs_table(self):
url = "https://docs.python.org/2/"
dfs = self.read_html(url, match="Python")
zz = [df.iloc[0, 0][0:4] for df in dfs]
assert sorted(zz) == sorted(["Repo", "What"])
@pytest.mark.slow
def test_thousands_macau_stats(self, datapath):
all_non_nan_table_index = -2
macau_data = datapath("io", "data", "html", "macau.html")
dfs = self.read_html(macau_data, index_col=0, attrs={"class": "style1"})
df = dfs[all_non_nan_table_index]
assert not any(s.isna().any() for _, s in df.items())
@pytest.mark.slow
def test_thousands_macau_index_col(self, datapath):
all_non_nan_table_index = -2
macau_data = datapath("io", "data", "html", "macau.html")
dfs = self.read_html(macau_data, index_col=0, header=0)
df = dfs[all_non_nan_table_index]
assert not any(s.isna().any() for _, s in df.items())
def test_empty_tables(self):
"""
Make sure that read_html ignores empty tables.
"""
result = self.read_html(
"""
<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>2</td>
</tr>
</tbody>
</table>
<table>
<tbody>
</tbody>
</table>
"""
)
assert len(result) == 1
def test_multiple_tbody(self):
# GH-20690
# Read all tbody tags within a single table.
result = self.read_html(
"""<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>2</td>
</tr>
</tbody>
<tbody>
<tr>
<td>3</td>
<td>4</td>
</tr>
</tbody>
</table>"""
)[0]
expected = DataFrame(data=[[1, 2], [3, 4]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_header_and_one_column(self):
"""
Don't fail with bs4 when there is a header and only one column
as described in issue #9178
"""
result = self.read_html(
"""<table>
<thead>
<tr>
<th>Header</th>
</tr>
</thead>
<tbody>
<tr>
<td>first</td>
</tr>
</tbody>
</table>"""
)[0]
expected = DataFrame(data={"Header": "first"}, index=[0])
tm.assert_frame_equal(result, expected)
def test_thead_without_tr(self):
"""
Ensure parser adds <tr> within <thead> on malformed HTML.
"""
result = self.read_html(
"""<table>
<thead>
<tr>
<th>Country</th>
<th>Municipality</th>
<th>Year</th>
</tr>
</thead>
<tbody>
<tr>
<td>Ukraine</td>
<th>Odessa</th>
<td>1944</td>
</tr>
</tbody>
</table>"""
)[0]
expected = DataFrame(
data=[["Ukraine", "Odessa", 1944]],
columns=["Country", "Municipality", "Year"],
)
tm.assert_frame_equal(result, expected)
def test_tfoot_read(self):
"""
Make sure that read_html reads tfoot, containing td or th.
Ignores empty tfoot
"""
data_template = """<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>bodyA</td>
<td>bodyB</td>
</tr>
</tbody>
<tfoot>
{footer}
</tfoot>
</table>"""
expected1 = DataFrame(data=[["bodyA", "bodyB"]], columns=["A", "B"])
expected2 = DataFrame(
data=[["bodyA", "bodyB"], ["footA", "footB"]], columns=["A", "B"]
)
data1 = data_template.format(footer="")
data2 = data_template.format(footer="<tr><td>footA</td><th>footB</th></tr>")
result1 = self.read_html(data1)[0]
result2 = self.read_html(data2)[0]
tm.assert_frame_equal(result1, expected1)
tm.assert_frame_equal(result2, expected2)
def test_parse_header_of_non_string_column(self):
# GH5048: if header is specified explicitly, an int column should be
# parsed as int while its header is parsed as str
result = self.read_html(
"""
<table>
<tr>
<td>S</td>
<td>I</td>
</tr>
<tr>
<td>text</td>
<td>1944</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame([["text", 1944]], columns=("S", "I"))
tm.assert_frame_equal(result, expected)
def test_nyse_wsj_commas_table(self, datapath):
data = datapath("io", "data", "html", "nyse_wsj.html")
df = self.read_html(data, index_col=0, header=0, attrs={"class": "mdcTable"})[0]
expected = Index(
[
"Issue(Roll over for charts and headlines)",
"Volume",
"Price",
"Chg",
"% Chg",
]
)
nrows = 100
assert df.shape[0] == nrows
tm.assert_index_equal(df.columns, expected)
@pytest.mark.slow
def test_banklist_header(self, datapath):
from pandas.io.html import _remove_whitespace
def try_remove_ws(x):
try:
return _remove_whitespace(x)
except AttributeError:
return x
df = self.read_html(self.banklist_data, "Metcalf", attrs={"id": "table"})[0]
ground_truth = read_csv(
datapath("io", "data", "csv", "banklist.csv"),
converters={"Updated Date": Timestamp, "Closing Date": Timestamp},
)
assert df.shape == ground_truth.shape
old = [
"First Vietnamese American BankIn Vietnamese",
"Westernbank Puerto RicoEn Espanol",
"R-G Premier Bank of Puerto RicoEn Espanol",
"EurobankEn Espanol",
"Sanderson State BankEn Espanol",
"Washington Mutual Bank(Including its subsidiary Washington "
"Mutual Bank FSB)",
"Silver State BankEn Espanol",
"AmTrade International BankEn Espanol",
"Hamilton Bank, NAEn Espanol",
"The Citizens Savings BankPioneer Community Bank, Inc.",
]
new = [
"First Vietnamese American Bank",
"Westernbank Puerto Rico",
"R-G Premier Bank of Puerto Rico",
"Eurobank",
"Sanderson State Bank",
"Washington Mutual Bank",
"Silver State Bank",
"AmTrade International Bank",
"Hamilton Bank, NA",
"The Citizens Savings Bank",
]
dfnew = df.applymap(try_remove_ws).replace(old, new)
gtnew = ground_truth.applymap(try_remove_ws)
converted = dfnew._convert(datetime=True, numeric=True)
date_cols = ["Closing Date", "Updated Date"]
converted[date_cols] = converted[date_cols]._convert(datetime=True, coerce=True)
tm.assert_frame_equal(converted, gtnew)
@pytest.mark.slow
def test_gold_canyon(self):
gc = "Gold Canyon"
with open(self.banklist_data, "r") as f:
raw_text = f.read()
assert gc in raw_text
df = self.read_html(self.banklist_data, "Gold Canyon", attrs={"id": "table"})[0]
assert gc in df.to_string()
def test_different_number_of_cols(self):
expected = self.read_html(
"""<table>
<thead>
<tr style="text-align: right;">
<th></th>
<th>C_l0_g0</th>
<th>C_l0_g1</th>
<th>C_l0_g2</th>
<th>C_l0_g3</th>
<th>C_l0_g4</th>
</tr>
</thead>
<tbody>
<tr>
<th>R_l0_g0</th>
<td> 0.763</td>
<td> 0.233</td>
<td> nan</td>
<td> nan</td>
<td> nan</td>
</tr>
<tr>
<th>R_l0_g1</th>
<td> 0.244</td>
<td> 0.285</td>
<td> 0.392</td>
<td> 0.137</td>
<td> 0.222</td>
</tr>
</tbody>
</table>""",
index_col=0,
)[0]
result = self.read_html(
"""<table>
<thead>
<tr style="text-align: right;">
<th></th>
<th>C_l0_g0</th>
<th>C_l0_g1</th>
<th>C_l0_g2</th>
<th>C_l0_g3</th>
<th>C_l0_g4</th>
</tr>
</thead>
<tbody>
<tr>
<th>R_l0_g0</th>
<td> 0.763</td>
<td> 0.233</td>
</tr>
<tr>
<th>R_l0_g1</th>
<td> 0.244</td>
<td> 0.285</td>
<td> 0.392</td>
<td> 0.137</td>
<td> 0.222</td>
</tr>
</tbody>
</table>""",
index_col=0,
)[0]
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_1(self):
# GH17054
result = self.read_html(
"""
<table>
<tr>
<th>A</th>
<th colspan="1">B</th>
<th rowspan="1">C</th>
</tr>
<tr>
<td>a</td>
<td>b</td>
<td>c</td>
</tr>
</table>
"""
)[0]
expected = DataFrame([["a", "b", "c"]], columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_copy_values(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# X x Y Z W
# A B b z C
result = self.read_html(
"""
<table>
<tr>
<td colspan="2">X</td>
<td>Y</td>
<td rowspan="2">Z</td>
<td>W</td>
</tr>
<tr>
<td>A</td>
<td colspan="2">B</td>
<td>C</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(
data=[["A", "B", "B", "Z", "C"]], columns=["X", "X.1", "Y", "Z", "W"]
)
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_both_not_1(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# A B b b C
# a b b b D
result = self.read_html(
"""
<table>
<tr>
<td rowspan="2">A</td>
<td rowspan="2" colspan="3">B</td>
<td>C</td>
</tr>
<tr>
<td>D</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(
data=[["A", "B", "B", "B", "D"]], columns=["A", "B", "B.1", "B.2", "C"]
)
tm.assert_frame_equal(result, expected)
def test_rowspan_at_end_of_row(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# A B
# C b
result = self.read_html(
"""
<table>
<tr>
<td>A</td>
<td rowspan="2">B</td>
</tr>
<tr>
<td>C</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(data=[["C", "B"]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_rowspan_only_rows(self):
# GH17054
result = self.read_html(
"""
<table>
<tr>
<td rowspan="3">A</td>
<td rowspan="3">B</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(data=[["A", "B"], ["A", "B"]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_header_inferred_from_rows_with_only_th(self):
# GH17054
result = self.read_html(
"""
<table>
<tr>
<th>A</th>
<th>B</th>
</tr>
<tr>
<th>a</th>
<th>b</th>
</tr>
<tr>
<td>1</td>
<td>2</td>
</tr>
</table>
"""
)[0]
columns = MultiIndex(levels=[["A", "B"], ["a", "b"]], codes=[[0, 1], [0, 1]])
expected = DataFrame(data=[[1, 2]], columns=columns)
tm.assert_frame_equal(result, expected)
def test_parse_dates_list(self):
df = DataFrame({"date": date_range("1/1/2001", periods=10)})
expected = df.to_html()
res = self.read_html(expected, parse_dates=[1], index_col=0)
tm.assert_frame_equal(df, res[0])
res = self.read_html(expected, parse_dates=["date"], index_col=0)
tm.assert_frame_equal(df, res[0])
def test_parse_dates_combine(self):
raw_dates = Series(date_range("1/1/2001", periods=10))
df = DataFrame(
{
"date": raw_dates.map(lambda x: str(x.date())),
"time": raw_dates.map(lambda x: str(x.time())),
}
)
res = self.read_html(
df.to_html(), parse_dates={"datetime": [1, 2]}, index_col=1
)
newdf = DataFrame({"datetime": raw_dates})
tm.assert_frame_equal(newdf, res[0])
def test_computer_sales_page(self, datapath):
data = datapath("io", "data", "html", "computer_sales_page.html")
msg = (
r"Passed header=\[0,1\] are too many "
r"rows for this multi_index of columns"
)
with pytest.raises(ParserError, match=msg):
self.read_html(data, header=[0, 1])
data = datapath("io", "data", "html", "computer_sales_page.html")
assert self.read_html(data, header=[1, 2])
def test_wikipedia_states_table(self, datapath):
data = datapath("io", "data", "html", "wikipedia_states.html")
assert os.path.isfile(data), f"{repr(data)} is not a file"
assert os.path.getsize(data), f"{repr(data)} is an empty file"
result = self.read_html(data, "Arizona", header=1)[0]
assert result["sq mi"].dtype == np.dtype("float64")
def test_parser_error_on_empty_header_row(self):
msg = (
r"Passed header=\[0,1\] are too many "
r"rows for this multi_index of columns"
)
with pytest.raises(ParserError, match=msg):
self.read_html(
"""
<table>
<thead>
<tr><th></th><th></tr>
<tr><th>A</th><th>B</th></tr>
</thead>
<tbody>
<tr><td>a</td><td>b</td></tr>
</tbody>
</table>
""",
header=[0, 1],
)
def test_decimal_rows(self):
# GH 12907
result = self.read_html(
"""<html>
<body>
<table>
<thead>
<tr>
<th>Header</th>
</tr>
</thead>
<tbody>
<tr>
<td>1100#101</td>
</tr>
</tbody>
</table>
</body>
</html>""",
decimal="#",
)[0]
expected = DataFrame(data={"Header": 1100.101}, index=[0])
assert result["Header"].dtype == np.dtype("float64")
tm.assert_frame_equal(result, expected)
def test_bool_header_arg(self):
# GH 6114
for arg in [True, False]:
with pytest.raises(TypeError):
self.read_html(self.spam_data, header=arg)
def test_converters(self):
# GH 13461
result = self.read_html(
"""<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> 0.763</td>
</tr>
<tr>
<td> 0.244</td>
</tr>
</tbody>
</table>""",
converters={"a": str},
)[0]
expected = DataFrame({"a": ["0.763", "0.244"]})
tm.assert_frame_equal(result, expected)
def test_na_values(self):
# GH 13461
result = self.read_html(
"""<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> 0.763</td>
</tr>
<tr>
<td> 0.244</td>
</tr>
</tbody>
</table>""",
na_values=[0.244],
)[0]
expected = DataFrame({"a": [0.763, np.nan]})
tm.assert_frame_equal(result, expected)
def test_keep_default_na(self):
html_data = """<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> N/A</td>
</tr>
<tr>
<td> NA</td>
</tr>
</tbody>
</table>"""
expected_df = DataFrame({"a": ["N/A", "NA"]})
html_df = self.read_html(html_data, keep_default_na=False)[0]
tm.assert_frame_equal(expected_df, html_df)
expected_df = DataFrame({"a": [np.nan, np.nan]})
html_df = self.read_html(html_data, keep_default_na=True)[0]
tm.assert_frame_equal(expected_df, html_df)
def test_preserve_empty_rows(self):
result = self.read_html(
"""
<table>
<tr>
<th>A</th>
<th>B</th>
</tr>
<tr>
<td>a</td>
<td>b</td>
</tr>
<tr>
<td></td>
<td></td>
</tr>
</table>
"""
)[0]
expected = DataFrame(data=[["a", "b"], [np.nan, np.nan]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_ignore_empty_rows_when_inferring_header(self):
result = self.read_html(
"""
<table>
<thead>
<tr><th></th><th></tr>
<tr><th>A</th><th>B</th></tr>
<tr><th>a</th><th>b</th></tr>
</thead>
<tbody>
<tr><td>1</td><td>2</td></tr>
</tbody>
</table>
"""
)[0]
columns = MultiIndex(levels=[["A", "B"], ["a", "b"]], codes=[[0, 1], [0, 1]])
expected = DataFrame(data=[[1, 2]], columns=columns)
tm.assert_frame_equal(result, expected)
def test_multiple_header_rows(self):
# Issue #13434
expected_df = DataFrame(
data=[("Hillary", 68, "D"), ("Bernie", 74, "D"), ("Donald", 69, "R")]
)
expected_df.columns = [
["Unnamed: 0_level_0", "Age", "Party"],
["Name", "Unnamed: 1_level_1", "Unnamed: 2_level_1"],
]
html = expected_df.to_html(index=False)
html_df = self.read_html(html)[0]
tm.assert_frame_equal(expected_df, html_df)
def test_works_on_valid_markup(self, datapath):
filename = datapath("io", "data", "html", "valid_markup.html")
dfs = self.read_html(filename, index_col=0)
assert isinstance(dfs, list)
assert isinstance(dfs[0], DataFrame)
@pytest.mark.slow
def test_fallback_success(self, datapath):
banklist_data = datapath("io", "data", "html", "banklist.html")
self.read_html(banklist_data, ".*Water.*", flavor=["lxml", "html5lib"])
def test_to_html_timestamp(self):
rng = date_range("2000-01-01", periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
assert "2000-01-01" in result
@pytest.mark.parametrize(
"displayed_only,exp0,exp1",
[
(True, DataFrame(["foo"]), None),
(False, DataFrame(["foo bar baz qux"]), DataFrame(["foo"])),
],
)
def test_displayed_only(self, displayed_only, exp0, exp1):
# GH 20027
data = StringIO(
"""<html>
<body>
<table>
<tr>
<td>
foo
<span style="display:none;text-align:center">bar</span>
<span style="display:none">baz</span>
<span style="display: none">qux</span>
</td>
</tr>
</table>
<table style="display: none">
<tr>
<td>foo</td>
</tr>
</table>
</body>
</html>"""
)
dfs = self.read_html(data, displayed_only=displayed_only)
tm.assert_frame_equal(dfs[0], exp0)
if exp1 is not None:
tm.assert_frame_equal(dfs[1], exp1)
else:
assert len(dfs) == 1 # Should not parse hidden table
def test_encode(self, html_encoding_file):
_, encoding = os.path.splitext(os.path.basename(html_encoding_file))[0].split(
"_"
)
try:
with open(html_encoding_file, "rb") as fobj:
from_string = self.read_html(
fobj.read(), encoding=encoding, index_col=0
).pop()
with open(html_encoding_file, "rb") as fobj:
from_file_like = self.read_html(
BytesIO(fobj.read()), encoding=encoding, index_col=0
).pop()
from_filename = self.read_html(
html_encoding_file, encoding=encoding, index_col=0
).pop()
tm.assert_frame_equal(from_string, from_file_like)
tm.assert_frame_equal(from_string, from_filename)
except Exception:
# seems utf-16/32 fail on windows
if is_platform_windows():
if "16" in encoding or "32" in encoding:
pytest.skip()
raise
def test_parse_failure_unseekable(self):
# Issue #17975
if self.read_html.keywords.get("flavor") == "lxml":
pytest.skip("Not applicable for lxml")
class UnseekableStringIO(StringIO):
def seekable(self):
return False
bad = UnseekableStringIO(
"""
<table><tr><td>spam<foobr />eggs</td></tr></table>"""
)
assert self.read_html(bad)
with pytest.raises(ValueError, match="passed a non-rewindable file object"):
self.read_html(bad)
def test_parse_failure_rewinds(self):
# Issue #17975
class MockFile:
def __init__(self, data):
self.data = data
self.at_end = False
def read(self, size=None):
data = "" if self.at_end else self.data
self.at_end = True
return data
def seek(self, offset):
self.at_end = False
def seekable(self):
return True
good = MockFile("<table><tr><td>spam<br />eggs</td></tr></table>")
bad = MockFile("<table><tr><td>spam<foobr />eggs</td></tr></table>")
assert self.read_html(good)
assert self.read_html(bad)
@pytest.mark.slow
def test_importcheck_thread_safety(self, datapath):
# see gh-16928
class ErrorThread(threading.Thread):
def run(self):
try:
super().run()
except Exception as err:
self.err = err
else:
self.err = None
# force import check by reinitalising global vars in html.py
reload(pandas.io.html)
filename = datapath("io", "data", "html", "valid_markup.html")
helper_thread1 = ErrorThread(target=self.read_html, args=(filename,))
helper_thread2 = ErrorThread(target=self.read_html, args=(filename,))
helper_thread1.start()
helper_thread2.start()
while helper_thread1.is_alive() or helper_thread2.is_alive():
pass
assert None is helper_thread1.err is helper_thread2.err
|
tabuadapipe.py | import sys
import os
from multiprocessing import Process, Value, Array, Pipe
argm = int(sys.argv[1])
pipe_pai, pipe_filho = Pipe()
def func_tab(pipe_filho):
ar2 = pipe_filho.recv()
for i in range(11):
sm = argm * i
ar2.append(sm)
pipe_filho.send(ar2)
ar2 = []
newT = Process(target=func_tab, args=(pipe_filho,))
newT.start()
pipe_pai.send(ar2)
ar2 = pipe_pai.recv()
for i in range(11):
print argm,'*',i,'=',ar2[i]
newT.join()
|
fib.py | import time
import multiprocessing
def profile(func):
def wrapper(*args, **kw):
import time
start = time.time()
func(*args, *kw)
end = time.time()
print('Cost:{}'.format(end-start))
return
return wrapper
def fib(n):
if n <= 2:
return n
return fib(n-1)+fib(n-2)
@profile
def nothread():
fib(35)
fib(35)
return
@profile
def withthread():
threads = []
for i in range(2):
t = multiprocessing.Process(target=fib, args=(35,))
t.start()
threads.append(t)
for t in threads:
t.join()
nothread()
withthread()
|
hid_reader.py | import selectors
import threading
from datetime import datetime
import evdev
from evdev import InputDevice, categorize, ecodes
from app.readers.exceptions import ReaderNotFound
from app.readers.nfc.cards.mifare_classic import MifareClassicCard
from app.readers.port import Port
from app.utils.time import utc_now
SCAN_CODES = {
# Scancode: ASCIICode
0: None, 1: 'ESC', 2: '1', 3: '2', 4: '3', 5: '4', 6: '5', 7: '6', 8: '7', 9: '8',
10: '9', 11: '0', 12: '-', 13: '=', 14: 'BKSP', 15: 'TAB', 16: 'Q', 17: 'W', 18: 'E', 19: 'R',
20: 'T', 21: 'Y', 22: '', 23: 'I', 24: 'O', 25: 'P', 26: '[', 27: ']', 28: 'CRLF', 29: 'LCTRL',
30: 'A', 31: 'S', 32: 'D', 33: 'F', 34: 'G', 35: 'H', 36: 'J', 37: 'K', 38: 'L', 39: ';',
40: '"', 41: '`', 42: 'LSHFT', 43: '\\', 44: 'Z', 45: 'X', 46: 'C', 47: 'V', 48: 'B', 49: 'N',
50: 'M', 51: ',', 52: '.', 53: '/', 54: 'RSHFT', 56: 'LALT', 100: 'RALT'
}
class HIDReader:
@staticmethod
def find_readers(vendor_id, product_id, serial_number):
try:
vendor_id = int(vendor_id, 16)
product_id = int(product_id, 16)
readers = []
for port in HIDReader.get_ports():
if port.vendor_id == vendor_id \
and port.product_id == product_id \
and port.serial_number == serial_number:
readers.append(port)
return readers
except StopIteration:
raise ReaderNotFound(vendor_id, product_id, serial_number)
@staticmethod
def get_ports():
ports = []
for device in [evdev.InputDevice(fn) for fn in evdev.list_devices()]:
ports.append(Port(
path=device.fn,
vendor_id=device.info.vendor,
product_id=device.info.product,
serial_number=None # Not supported with evdev
))
return ports
def __init__(self, hid_ports, read_delay=1):
self.hid_ports = hid_ports
self.reader_devices = []
self.selector = selectors.DefaultSelector()
self.read_delay = read_delay
self.connected = False
self._reader_alive = None
self.receiver_thread = None
self.reader_listeners = []
self.reader_buffer_strings = []
self.reader_last_received = []
self.reader_last_read_time = []
for i in range(len(hid_ports)):
self.reader_buffer_strings.append('')
self.reader_last_received.append(None)
self.reader_last_read_time.append(utc_now())
def add_read_listener(self, listener):
self.reader_listeners.append(listener)
def connect(self):
"""Connect to the reader and start the worker threads"""
self.connected = True
for port in self.hid_ports:
reader_device = InputDevice(port.path)
reader_device.grab()
self.reader_devices.append(reader_device)
self.selector.register(reader_device, selectors.EVENT_READ)
self._start_reader()
while self.connected:
pass
def _start_reader(self):
"""Start reader thread"""
self._reader_alive = True
# start hid->console thread
self.reader()
# TODO: Enable threading when stable
# self.receiver_thread = threading.Thread(target=self.reader, name='rx')
# self.receiver_thread.daemon = True
# self.receiver_thread.start()
def _stop_reader(self):
"""Stop reader thread only, wait for clean exit of thread"""
self._reader_alive = False
self.receiver_thread.join()
def reader(self):
try:
while self.connected and self._reader_alive:
for key, mask in self.selector.select():
reader_device = key.fileobj
reader_index = self.reader_devices.index(reader_device)
for event in reader_device.read():
if event.type == ecodes.EV_KEY:
data = categorize(event) # Save the event temporarily to introspect it
if data.keystate == 1: # Down events only
if data.scancode in SCAN_CODES:
if (data.scancode != 42) and (data.scancode != 28):
self.reader_buffer_strings[reader_index] += SCAN_CODES.get(data.scancode)
if data.scancode == 28:
data_string = self.reader_buffer_strings[reader_index]
# TODO: Add more checks
if len(data_string) == 8:
for listener in self.reader_listeners:
listener.handle_card_read(MifareClassicCard(data_string))
else:
for listener in self.reader_listeners:
listener.handle_data(data_string)
self.reader_buffer_strings[reader_index] = ''
except Exception as e:
self.connected = False
# TODO: Handle exception (reconnect?) instead of re-raise
raise
def _should_read(self, reader_index):
return (utc_now() - self.reader_last_read_time[reader_index]).total_seconds() > self.read_delay
|
augment.py | import os
import xml.etree.ElementTree as ET
import numpy as np
import cv2
import sys
from tqdm import tqdm
from albumentations import *
from multiprocessing import Process
classes = ["0","1","0head","1head"]
imageFolder = "images"
annotationFolder = "annotations"
if len(sys.argv) < 2:
quit()
wd = sys.argv[1]
print(wd)
path_images = "{}/{}".format(wd, imageFolder)
path_annotations = "{}/{}".format(wd, annotationFolder)
files = [f for f in os.listdir(path_annotations) if os.path.isfile(os.path.join(path_annotations, f))]
pbar = tqdm(total=len(files))
def get_aug(aug, min_area=0., min_visibility=0.):
return Compose(aug, bbox_params={'format': 'pascal_voc', 'min_area': min_area, 'min_visibility': min_visibility, 'label_fields': ['category_id']})
def write_annotation(path_annotation, annotations):
height, width = annotations['image'].shape[:2]
#print("{}".format(path_annotation))
annotation_file = open(path_annotation, "w")
annotation_file.write("<annotation>\n")
annotation_file.write("<width>{}</width>\n".format(width))
annotation_file.write("<height>{}</height>\n".format(height))
for idx, bbox in enumerate(annotations['bboxes']):
annotation_file.write("<object>\n")
annotation_file.write("\t<objName>{}</objName>\n".format(annotations['category_id'][idx]))
annotation_file.write("\t<xmin>{}</xmin>\n".format(int(bbox[0])))
annotation_file.write("\t<xmax>{}</xmax>\n".format(int(bbox[2])))
annotation_file.write("\t<ymin>{}</ymin>\n".format(int(bbox[1])))
annotation_file.write("\t<ymax>{}</ymax>\n".format(int(bbox[3])))
annotation_file.write("</object>\n")
#print("{} {} {} {} {}".format(bbox[0], bbox[1], bbox[2], bbox[3], annotations['category_id'][idx]))
annotation_file.write("</annotation>\n")
annotation_file.close()
def process_images(annotations):
aug = get_aug([
OneOf([
RandomBrightness(),
RandomContrast(),
RandomGamma()
],p=0.8),
RGBShift(p=0.1),
HueSaturationValue(p=0.1),
OneOf([
Blur(),
MedianBlur(),
MotionBlur(),
GaussNoise()
],p=0.2),
OneOf([
CLAHE(),
IAAAdditiveGaussianNoise(),
IAASharpen(),
IAAEmboss()
],p=0.15),
#Normalize(p=0.1),
#RandomRotate90(p=0.1),
#JpegCompression(p=0.1, quality_lower=45),
#OneOf([
# RandomCrop(height=900,width=1600),
# RandomCrop(height=800,width=800),
# RandomSizedCrop(min_max_height=[100,500],height=800,width=1000),
# ShiftScaleRotate(p=0.1, scale_limit=0.1),
# Cutout(num_holes=512, max_h_size=16, max_w_size=16),
# RandomCrop(height=1000, width=600)
#],p=0.5)
],min_visibility=0.33)
for filu in annotations:
path_image = "{}/{}".format(path_images, filu)
path_annotation = "{}/{}".format(path_annotations, filu)
path_image = path_image[:-3]
path_image += "jpg"
annotation_file = open(path_annotation)
image = cv2.imread(path_image)
tree = ET.parse(annotation_file)
root = tree.getroot()
bboxes = []
cat_id = []
for obj in root.iter('object'):
clas = obj.find('objName').text
xmin = int(obj.find('xmin').text)
xmax = int(obj.find('xmax').text)
ymin = int(obj.find('ymin').text)
ymax = int(obj.find('ymax').text)
bboxes.append([xmin, ymin, xmax, ymax])
cat_id.append(classes.index(clas))
annotations = {'image': image, 'bboxes': bboxes, 'category_id': cat_id}
category_id_to_name = {0: "0", 1: "1"}
try:
augmented = aug(**annotations)
except:
#print("Some error")
continue
if not augmented['bboxes']:
continue
annotation_file.close()
cv2.imwrite(path_image, augmented['image'])
write_annotation(path_annotation, augmented)
chunks = [files[x:x+100] for x in range(0, len(files), 100)]
thread_count = 16
while chunks:
threads = []
for i in range(0,thread_count):
if not chunks:
break
chunk = chunks.pop()
p = Process(target=process_images, args=([chunk]))
p.start()
threads.append(p)
#process_images(aug, chunk)
for thread in threads:
thread.join()
pbar.update(100)
pbar.close() |
camera.py | import time
import io
import threading
import datetime
import cv2
import imutils
import time
import picamera
import picamera.array
from feed import app
def generate_feed(camera):
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/png\r\n\r\n' + frame + b'\r\n')
class Camera(object):
thread = None # background thread that reads frames from camera
frame = None # current frame is stored here by background thread
last_access = 0 # time of last client access to the camera
last_pic = 0
def initialize(self):
if Camera.thread is None:
print 'Camera.thread is None in initalize'
Camera.thread = threading.Thread(target=self._thread)
Camera.thread.start()
while self.frame is None:
time.sleep(0)
def get_frame(self):
Camera.last_access = time.time()
self.initialize()
return self.frame
@classmethod
def _thread(cls):
print 'Gonna try for a new cam'
with picamera.PiCamera() as camera:
print "New Camera"
camera.resolution = (320, 240)
camera.hflip = True
camera.vflip = True
camera.start_preview()
stream = picamera.array.PiRGBArray(camera, size=camera.resolution)
avg = None
for f in camera.capture_continuous(stream, 'bgr', use_video_port=True):
frame = f.array
timestamp = datetime.datetime.now()
text = "Unoccupied"
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
if avg is None:
print "starting avg"
avg = gray.copy().astype("float")
stream.truncate(0)
continue
cv2.accumulateWeighted(gray, avg, 0.5)
frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))
thresh = cv2.threshold(frameDelta, 5, 255,
cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=2)
im, cnts, other = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
for c in cnts:
if cv2.contourArea(c) < 1000:
continue
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
text = "Occupied"
ts = timestamp.strftime("%A %d %B %y %I:%M:%S%p")
cv2.putText(frame, "Status: {}".format(text), (10, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, ts, (10, frame.shape[0] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
cls.frame = cv2.imencode(".png", frame)[1].tostring()
stream.seek(0)
stream.truncate()
if time.time() - cls.last_access > app.config['TIMEOUT']:
break
print 'Setting thread to None'
cls.thread = None
|
pixivwallpaper_gui.py | from uuid import getnode
import hashlib
import webbrowser
import tkinter as tk
import set_wallpaper
import time
import threading
import os
import subprocess
import win32com.client
import win32pdhutil
from threading import Timer
import psutil
url="https://singf.space/pixiv/controls/ranking"
def launch_picker():
uuid=str(getnode())
webbrowser.open("{}/{}".format(url,uuid))
class Application(tk.Frame):
def __init__(self, master=None):
super().__init__(master)
self.master = master
self.pack()
self.create_widgets()
master.geometry("500x500")
master.iconbitmap("icon.ico")
master.title("PixivWallpaper")
#master.config(bg="#f8f8f9")
def create_widgets(self):
def set_paper_callback():
self.print_log("Getting wallpaper...\n")
status,_=set_wallpaper.download_and_set()
if status==1:
self.print_log("Download failed, check your internet connection")
if status==0:
self.print_log("Wallpaper was set successfully")
def start_set_paper_thread():
set_paper_thread=threading.Thread(target=set_paper_callback)
set_paper_thread.start()
def enlarge_paper_callback():
self.print_log("Getting and enlarging wallpaper, may take a minute or so based on your cpu...\n")
status,_=set_wallpaper.download_and_set(enlarge=True)
if status==1:
self.print_log("Download failed, check your internet connection")
if status==0:
self.print_log("Wallpaper was set successfully")
def start_enlarge_paper_thread():
enlarge_paper_thread=threading.Thread(target=enlarge_paper_callback)
enlarge_paper_thread.start()
def update_callback():
self.print_log("Checking for updates...\n")
status,msg=set_wallpaper.check_update()
if status==1:
self.print_log("Download failed, check your internet connection")
if status==0:
if msg=="latest":
self.print_log("You're running the latest client")
else:
self.print_log("New version available! Download from here:\n{}".format(set_wallpaper.download_url))
def start_update_thread():
update_thread=threading.Thread(target=update_callback)
update_thread.start()
def add_startup():
self.print_log("Adding daemon to startup...\nYour wallpaper will now refresh automatically when the system boots or when the ranking updates\nPlease do this each time you move or update the application\n")
path_loc=os.path.expandvars(r"%APPDATA%\Microsoft\Windows\Start Menu\Programs\Startup")
path_loc = os.path.join(path_loc, "wallpaper_daemon.lnk")
path_target = os.path.abspath("wallpaper_daemon.exe")
path_icon = os.path.abspath("icon.ico")
shell = win32com.client.Dispatch("WScript.Shell")
shortcut = shell.CreateShortCut(path_loc)
shortcut.Targetpath = path_target
shortcut.IconLocation = path_icon
shortcut.WindowStyle = 7 # 7 - Minimized, 3 - Maximized, 1 - Normal
shortcut.WorkingDirectory= os.path.abspath("")
shortcut.save()
subprocess.Popen("explorer /select,{}".format(path_loc))
def check_daemon():
status=1
for proc in psutil.process_iter():
if "wallpaper_daemon" in proc.name():
status=0
break
if status==0:
self.daemon_label.config(text="Daemon running",fg="green")
else:
self.daemon_label.config(text="Daemon not running",fg="red")
return status
def switch_daemon():
status=check_daemon()
if status==0:
self.print_log("Stopping daemon...\nTo prevent daemon from running automatically, press Add to startup and delete the shortcut\n")
os.system("taskkill /F /im wallpaper_daemon.exe")
self.daemon_label.config(text="Daemon not running",fg="red")
else:
self.print_log("Running daemon...\nYour wallpaper will now refresh automatically\nTo make daemon run automatically, press Add to startup\n")
subprocess.Popen([os.path.abspath("wallpaper_daemon.exe")],stdin=None,stdout=None,stderr=None)
self.daemon_label.config(text="Daemon running",fg="green")
self.get_btn=tk.Button(self,text="Get wallpaper",command=start_set_paper_thread)
self.get_btn.grid(row=0,column=0,padx=10,pady=10)
self.enlarge_btn=tk.Button(self,text="Enlarge wallpaper",command=start_enlarge_paper_thread)
self.enlarge_btn.grid(row=1,column=0,padx=10,pady=10)
self.pick_btn=tk.Button(self,text="Pick wallpaper",command=launch_picker)
self.pick_btn.grid(row=2,column=0,padx=10,pady=10)
#self.update_btn=tk.Button(self,text="Check update",command=start_update_thread)
#self.update_btn.grid(row=0,column=1,padx=10,pady=10)
self.startup_btn=tk.Button(self,text="Add to startup",command=add_startup)
self.startup_btn.grid(row=0,column=1,padx=10,pady=10)
self.daemon_label=tk.Label(self)
self.daemon_label.grid(row=1,column=1,padx=10,pady=10)
self.daemon_btn=tk.Button(self,text="Start/Stop daemon",command=switch_daemon)
self.daemon_btn.grid(row=2,column=1,padx=10,pady=10)
self.output_text=tk.Text(self,width=62,height=25)
self.output_text.grid(row=50,columnspan=2)
update_callback()
check_daemon()
def print_log(self,msg):
self.output_text.insert("1.0",msg+"\n")
if __name__=="__main__":
root=tk.Tk()
app=Application(master=root)
app.mainloop()
|
word2vec.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-threaded word2vec mini-batched skip-gram model.
Trains the model described in:
(Mikolov, et. al.) Efficient Estimation of Word Representations in Vector Space
ICLR 2013.
http://arxiv.org/abs/1301.3781
This model does traditional minibatching.
The key ops used are:
* placeholder for feeding in tensors for each example.
* embedding_lookup for fetching rows from the embedding matrix.
* sigmoid_cross_entropy_with_logits to calculate the loss.
* GradientDescentOptimizer for optimizing the loss.
* skipgram custom op that does input processing.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import threading
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
import tensorflow as tf
word2vec = tf.load_op_library(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'word2vec_ops.so'))
flags = tf.app.flags
flags.DEFINE_string("save_path", None, "Directory to write the model and "
"training summaries.")
flags.DEFINE_string("train_data", None, "Training text file. "
"E.g., unzipped file http://mattmahoney.net/dc/text8.zip.")
flags.DEFINE_string(
"eval_data", None, "File consisting of analogies of four tokens."
"embedding 2 - embedding 1 + embedding 3 should be close "
"to embedding 4."
"See README.md for how to get 'questions-words.txt'.")
flags.DEFINE_integer("embedding_size", 200, "The embedding dimension size.")
flags.DEFINE_integer(
"epochs_to_train", 15,
"Number of epochs to train. Each epoch processes the training data once "
"completely.")
flags.DEFINE_float("learning_rate", 0.2, "Initial learning rate.")
flags.DEFINE_integer("num_neg_samples", 100,
"Negative samples per training example.")
flags.DEFINE_integer("batch_size", 16,
"Number of training examples processed per step "
"(size of a minibatch).")
flags.DEFINE_integer("concurrent_steps", 12,
"The number of concurrent training steps.")
flags.DEFINE_integer("window_size", 5,
"The number of words to predict to the left and right "
"of the target word.")
flags.DEFINE_integer("min_count", 5,
"The minimum number of word occurrences for it to be "
"included in the vocabulary.")
flags.DEFINE_float("subsample", 1e-3,
"Subsample threshold for word occurrence. Words that appear "
"with higher frequency will be randomly down-sampled. Set "
"to 0 to disable.")
flags.DEFINE_boolean(
"interactive", False,
"If true, enters an IPython interactive session to play with the trained "
"model. E.g., try model.analogy(b'france', b'paris', b'russia') and "
"model.nearby([b'proton', b'elephant', b'maxwell'])")
flags.DEFINE_integer("statistics_interval", 5,
"Print statistics every n seconds.")
flags.DEFINE_integer("summary_interval", 5,
"Save training summary to file every n seconds (rounded "
"up to statistics interval).")
flags.DEFINE_integer("checkpoint_interval", 600,
"Checkpoint the model (i.e. save the parameters) every n "
"seconds (rounded up to statistics interval).")
FLAGS = flags.FLAGS
class Options(object):
"""Options used by our word2vec model."""
def __init__(self):
# Model options.
# Embedding dimension.
self.emb_dim = FLAGS.embedding_size
# Training options.
# The training text file.
self.train_data = FLAGS.train_data
# Number of negative samples per example.
self.num_samples = FLAGS.num_neg_samples
# The initial learning rate.
self.learning_rate = FLAGS.learning_rate
# Number of epochs to train. After these many epochs, the learning
# rate decays linearly to zero and the training stops.
self.epochs_to_train = FLAGS.epochs_to_train
# Concurrent training steps.
self.concurrent_steps = FLAGS.concurrent_steps
# Number of examples for one training step.
self.batch_size = FLAGS.batch_size
# The number of words to predict to the left and right of the target word.
self.window_size = FLAGS.window_size
# The minimum number of word occurrences for it to be included in the
# vocabulary.
self.min_count = FLAGS.min_count
# Subsampling threshold for word occurrence.
self.subsample = FLAGS.subsample
# How often to print statistics.
self.statistics_interval = FLAGS.statistics_interval
# How often to write to the summary file (rounds up to the nearest
# statistics_interval).
self.summary_interval = FLAGS.summary_interval
# How often to write checkpoints (rounds up to the nearest statistics
# interval).
self.checkpoint_interval = FLAGS.checkpoint_interval
# Where to write out summaries.
self.save_path = FLAGS.save_path
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
# Eval options.
# The text file for eval.
self.eval_data = FLAGS.eval_data
class Word2Vec(object):
"""Word2Vec model (Skipgram)."""
def __init__(self, options, session):
self._options = options
self._session = session
self._word2id = {}
self._id2word = []
self.build_graph()
self.build_eval_graph()
self.save_vocab()
def read_analogies(self):
"""Reads through the analogy question file.
Returns:
questions: a [n, 4] numpy array containing the analogy question's
word ids.
questions_skipped: questions skipped due to unknown words.
"""
questions = []
questions_skipped = 0
with open(self._options.eval_data, "rb") as analogy_f:
for line in analogy_f:
if line.startswith(b":"): # Skip comments.
continue
words = line.strip().lower().split(b" ")
ids = [self._word2id.get(w.strip()) for w in words]
if None in ids or len(ids) != 4:
questions_skipped += 1
else:
questions.append(np.array(ids))
print("Eval analogy file: ", self._options.eval_data)
print("Questions: ", len(questions))
print("Skipped: ", questions_skipped)
self._analogy_questions = np.array(questions, dtype=np.int32)
def forward(self, examples, labels):
"""Build the graph for the forward pass."""
opts = self._options
# Declare all variables we need.
# Embedding: [vocab_size, emb_dim]
init_width = 0.5 / opts.emb_dim
emb = tf.Variable(
tf.random_uniform(
[opts.vocab_size, opts.emb_dim], -init_width, init_width),
name="emb")
self._emb = emb
# Softmax weight: [vocab_size, emb_dim]. Transposed.
sm_w_t = tf.Variable(
tf.zeros([opts.vocab_size, opts.emb_dim]),
name="sm_w_t")
# Softmax bias: [emb_dim].
sm_b = tf.Variable(tf.zeros([opts.vocab_size]), name="sm_b")
# Global step: scalar, i.e., shape [].
self.global_step = tf.Variable(0, name="global_step")
# Nodes to compute the nce loss w/ candidate sampling.
labels_matrix = tf.reshape(
tf.cast(labels,
dtype=tf.int64),
[opts.batch_size, 1])
# Negative sampling.
sampled_ids, _, _ = (tf.nn.fixed_unigram_candidate_sampler(
true_classes=labels_matrix,
num_true=1,
num_sampled=opts.num_samples,
unique=True,
range_max=opts.vocab_size,
distortion=0.75,
unigrams=opts.vocab_counts.tolist()))
# Embeddings for examples: [batch_size, emb_dim]
example_emb = tf.nn.embedding_lookup(emb, examples)
# Weights for labels: [batch_size, emb_dim]
true_w = tf.nn.embedding_lookup(sm_w_t, labels)
# Biases for labels: [batch_size, 1]
true_b = tf.nn.embedding_lookup(sm_b, labels)
# Weights for sampled ids: [num_sampled, emb_dim]
sampled_w = tf.nn.embedding_lookup(sm_w_t, sampled_ids)
# Biases for sampled ids: [num_sampled, 1]
sampled_b = tf.nn.embedding_lookup(sm_b, sampled_ids)
# True logits: [batch_size, 1]
true_logits = tf.reduce_sum(tf.mul(example_emb, true_w), 1) + true_b
# Sampled logits: [batch_size, num_sampled]
# We replicate sampled noise labels for all examples in the batch
# using the matmul.
sampled_b_vec = tf.reshape(sampled_b, [opts.num_samples])
sampled_logits = tf.matmul(example_emb,
sampled_w,
transpose_b=True) + sampled_b_vec
return true_logits, sampled_logits
def nce_loss(self, true_logits, sampled_logits):
"""Build the graph for the NCE loss."""
# cross-entropy(logits, labels)
opts = self._options
true_xent = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.ones_like(true_logits), logits=true_logits)
sampled_xent = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.zeros_like(sampled_logits), logits=sampled_logits)
# NCE-loss is the sum of the true and noise (sampled words)
# contributions, averaged over the batch.
nce_loss_tensor = (tf.reduce_sum(true_xent) +
tf.reduce_sum(sampled_xent)) / opts.batch_size
return nce_loss_tensor
def optimize(self, loss):
"""Build the graph to optimize the loss function."""
# Optimizer nodes.
# Linear learning rate decay.
opts = self._options
words_to_train = float(opts.words_per_epoch * opts.epochs_to_train)
lr = opts.learning_rate * tf.maximum(
0.0001, 1.0 - tf.cast(self._words, tf.float32) / words_to_train)
self._lr = lr
optimizer = tf.train.GradientDescentOptimizer(lr)
train = optimizer.minimize(loss,
global_step=self.global_step,
gate_gradients=optimizer.GATE_NONE)
self._train = train
def build_eval_graph(self):
"""Build the eval graph."""
# Eval graph
# Each analogy task is to predict the 4th word (d) given three
# words: a, b, c. E.g., a=italy, b=rome, c=france, we should
# predict d=paris.
# The eval feeds three vectors of word ids for a, b, c, each of
# which is of size N, where N is the number of analogies we want to
# evaluate in one batch.
analogy_a = tf.placeholder(dtype=tf.int32) # [N]
analogy_b = tf.placeholder(dtype=tf.int32) # [N]
analogy_c = tf.placeholder(dtype=tf.int32) # [N]
# Normalized word embeddings of shape [vocab_size, emb_dim].
nemb = tf.nn.l2_normalize(self._emb, 1)
# Each row of a_emb, b_emb, c_emb is a word's embedding vector.
# They all have the shape [N, emb_dim]
a_emb = tf.gather(nemb, analogy_a) # a's embs
b_emb = tf.gather(nemb, analogy_b) # b's embs
c_emb = tf.gather(nemb, analogy_c) # c's embs
# We expect that d's embedding vectors on the unit hyper-sphere is
# near: c_emb + (b_emb - a_emb), which has the shape [N, emb_dim].
target = c_emb + (b_emb - a_emb)
# Compute cosine distance between each pair of target and vocab.
# dist has shape [N, vocab_size].
dist = tf.matmul(target, nemb, transpose_b=True)
# For each question (row in dist), find the top 4 words.
_, pred_idx = tf.nn.top_k(dist, 4)
# Nodes for computing neighbors for a given word according to
# their cosine distance.
nearby_word = tf.placeholder(dtype=tf.int32) # word id
nearby_emb = tf.gather(nemb, nearby_word)
nearby_dist = tf.matmul(nearby_emb, nemb, transpose_b=True)
nearby_val, nearby_idx = tf.nn.top_k(nearby_dist,
min(1000, self._options.vocab_size))
# Nodes in the construct graph which are used by training and
# evaluation to run/feed/fetch.
self._analogy_a = analogy_a
self._analogy_b = analogy_b
self._analogy_c = analogy_c
self._analogy_pred_idx = pred_idx
self._nearby_word = nearby_word
self._nearby_val = nearby_val
self._nearby_idx = nearby_idx
def build_graph(self):
"""Build the graph for the full model."""
opts = self._options
# The training data. A text file.
(words, counts, words_per_epoch, self._epoch, self._words, examples,
labels) = word2vec.skipgram_word2vec(filename=opts.train_data,
batch_size=opts.batch_size,
window_size=opts.window_size,
min_count=opts.min_count,
subsample=opts.subsample)
(opts.vocab_words, opts.vocab_counts,
opts.words_per_epoch) = self._session.run([words, counts, words_per_epoch])
opts.vocab_size = len(opts.vocab_words)
print("Data file: ", opts.train_data)
print("Vocab size: ", opts.vocab_size - 1, " + UNK")
print("Words per epoch: ", opts.words_per_epoch)
self._examples = examples
self._labels = labels
self._id2word = opts.vocab_words
for i, w in enumerate(self._id2word):
self._word2id[w] = i
true_logits, sampled_logits = self.forward(examples, labels)
loss = self.nce_loss(true_logits, sampled_logits)
tf.scalar_summary("NCE loss", loss)
self._loss = loss
self.optimize(loss)
# Properly initialize all variables.
tf.global_variables_initializer().run()
self.saver = tf.train.Saver()
def save_vocab(self):
"""Save the vocabulary to a file so the model can be reloaded."""
opts = self._options
with open(os.path.join(opts.save_path, "vocab.txt"), "w") as f:
for i in xrange(opts.vocab_size):
vocab_word = tf.compat.as_text(opts.vocab_words[i]).encode("utf-8")
f.write("%s %d\n" % (vocab_word,
opts.vocab_counts[i]))
def _train_thread_body(self):
initial_epoch, = self._session.run([self._epoch])
while True:
_, epoch = self._session.run([self._train, self._epoch])
if epoch != initial_epoch:
break
def train(self):
"""Train the model."""
opts = self._options
initial_epoch, initial_words = self._session.run([self._epoch, self._words])
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(opts.save_path, self._session.graph)
workers = []
for _ in xrange(opts.concurrent_steps):
t = threading.Thread(target=self._train_thread_body)
t.start()
workers.append(t)
last_words, last_time, last_summary_time = initial_words, time.time(), 0
last_checkpoint_time = 0
while True:
time.sleep(opts.statistics_interval) # Reports our progress once a while.
(epoch, step, loss, words, lr) = self._session.run(
[self._epoch, self.global_step, self._loss, self._words, self._lr])
now = time.time()
last_words, last_time, rate = words, now, (words - last_words) / (
now - last_time)
print("Epoch %4d Step %8d: lr = %5.3f loss = %6.2f words/sec = %8.0f\r" %
(epoch, step, lr, loss, rate), end="")
sys.stdout.flush()
if now - last_summary_time > opts.summary_interval:
summary_str = self._session.run(summary_op)
summary_writer.add_summary(summary_str, step)
last_summary_time = now
if now - last_checkpoint_time > opts.checkpoint_interval:
self.saver.save(self._session,
os.path.join(opts.save_path, "model.ckpt"),
global_step=step.astype(int))
last_checkpoint_time = now
if epoch != initial_epoch:
break
for t in workers:
t.join()
return epoch
def _predict(self, analogy):
"""Predict the top 4 answers for analogy questions."""
idx, = self._session.run([self._analogy_pred_idx], {
self._analogy_a: analogy[:, 0],
self._analogy_b: analogy[:, 1],
self._analogy_c: analogy[:, 2]
})
return idx
def eval(self):
"""Evaluate analogy questions and reports accuracy."""
# How many questions we get right at precision@1.
correct = 0
try:
total = self._analogy_questions.shape[0]
except AttributeError as e:
raise AttributeError("Need to read analogy questions.")
start = 0
while start < total:
limit = start + 2500
sub = self._analogy_questions[start:limit, :]
idx = self._predict(sub)
start = limit
for question in xrange(sub.shape[0]):
for j in xrange(4):
if idx[question, j] == sub[question, 3]:
# Bingo! We predicted correctly. E.g., [italy, rome, france, paris].
correct += 1
break
elif idx[question, j] in sub[question, :3]:
# We need to skip words already in the question.
continue
else:
# The correct label is not the precision@1
break
print()
print("Eval %4d/%d accuracy = %4.1f%%" % (correct, total,
correct * 100.0 / total))
def analogy(self, w0, w1, w2):
"""Predict word w3 as in w0:w1 vs w2:w3."""
wid = np.array([[self._word2id.get(w, 0) for w in [w0, w1, w2]]])
idx = self._predict(wid)
for c in [self._id2word[i] for i in idx[0, :]]:
if c not in [w0, w1, w2]:
print(c)
break
print("unknown")
def nearby(self, words, num=20):
"""Prints out nearby words given a list of words."""
ids = np.array([self._word2id.get(x, 0) for x in words])
vals, idx = self._session.run(
[self._nearby_val, self._nearby_idx], {self._nearby_word: ids})
for i in xrange(len(words)):
print("\n%s\n=====================================" % (words[i]))
for (neighbor, distance) in zip(idx[i, :num], vals[i, :num]):
print("%-20s %6.4f" % (self._id2word[neighbor], distance))
def _start_shell(local_ns=None):
# An interactive shell is useful for debugging/development.
import IPython
user_ns = {}
if local_ns:
user_ns.update(local_ns)
user_ns.update(globals())
IPython.start_ipython(argv=[], user_ns=user_ns)
def main(_):
"""Train a word2vec model."""
if not FLAGS.train_data or not FLAGS.eval_data or not FLAGS.save_path:
print("--train_data --eval_data and --save_path must be specified.")
sys.exit(1)
opts = Options()
with tf.Graph().as_default(), tf.Session() as session:
with tf.device("/cpu:0"):
model = Word2Vec(opts, session)
model.read_analogies() # Read analogy questions
for _ in xrange(opts.epochs_to_train):
model.train() # Process one epoch
model.eval() # Eval analogies.
# Perform a final save.
model.saver.save(session,
os.path.join(opts.save_path, "model.ckpt"),
global_step=model.global_step)
if FLAGS.interactive:
# E.g.,
# [0]: model.analogy(b'france', b'paris', b'russia')
# [1]: model.nearby([b'proton', b'elephant', b'maxwell'])
_start_shell(locals())
if __name__ == "__main__":
tf.app.run()
|
slave_sup_sec.py | import time, random
import hashlib
import socket # Import socket module
import threading
import sys
from itertools import chain, product
import time
#p=21 q=109 n=2289 e=7 d=1543
def encrypt(data):
e,n=7,2289
#print("encrypting : "+data)
intdata = [x for x in map(ord, data)]
crypteddata = [((x ** e) % n) for x in intdata]
return ",".join(map(str, crypteddata))
def decrypt(data):
d,n=1543,2289
#print("decrypting : "+data)
decrypteddata = [(x ** d) % n for x in map(int,data.split(','))]
decryptedchar = [x for x in map(chr, decrypteddata)]
return "".join(map(str, decryptedchar))
def brute_force(charset, maxlength):
return (''.join(candidate)
for candidate in chain.from_iterable(product(charset, repeat=i)
for i in range(1, maxlength + 1)))
def get_sha1(s):
return hashlib.sha1(s.encode('utf-8')).hexdigest()
class Slave:
def __init__(self, host, port):
self.slave_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.slave_socket.settimeout(41.0)
self.limit=3
self.hard=5
self.addr=(host,int(port))
self.rand=0
self.data=0
self.trusted=[]
self.tasks = []
self.task = ""
self.to_break = ""
threading.Thread(target=self.conn).start()
threading.Thread(target=self.work).start()
def crsend(self,data,server):
en=encrypt(data)
self.slave_socket.sendto(en.encode(),server)
def challenge(self,server):
print("Challenging "+ str(server))
self.rand = random.randint(1,1000)
self.crsend("auth:"+str(encrypt(str(self.rand))),server)
def authenticate(self,auth,server):
if int(auth) == self.rand:
self.rand=0
print("Server authenticated : "+str(server))
self.trusted.append(server)
self.handlejob(self.data,server)
def handlejob(self,data,server):
if data[0] == "auth":
self.authenticate(data[1],server)
if server in self.trusted:
if data[0] == "job":
self.tasks.append((data[1],data[2]))
print("Job accepted {} {}".format(data[1],data[2]))
else:
print("Job rejected {}".format(str(data)))
self.challenge(server)
self.data=data
def send_res(self,res):
print("Sending result [{}]".format(res))
self.crsend("res:"+res,self.addr)
def send_r(self):
if len(self.tasks)<3:
print("Sending job request")
self.crsend('job',self.addr)
def conn(self):
while True:
try:
self.send_r()
data, server = self.slave_socket.recvfrom(1024)
data = decrypt(data.decode())
data = data.split(":")
self.handlejob(data,server)
time.sleep(2)
except socket.timeout:
pass
def work(self):
while True:
if len(self.tasks):
d=self.tasks.pop()
self.to_break=d[0]
self.task=d[1]
res=self.break_code()
res=res.split(":")
if res[0]=="pass":
self.send_res(res[2])
self.tasks=[]
#print("working")
def break_code(self, length=1):
"""
just a tool of slave
"""
#print("/ breaking {} with {}".format(self.to_break,self.task))
print("/ ")
time.sleep(self.hard)
for guess in brute_force(self.task, length):
if get_sha1(guess) == self.to_break:
return "pass:" + self.to_break + ':' + guess
if length > self.limit:
# print("LENGTH LIMIT REACHED {}".format(length))
return "fail"
else:
return self.break_code(length + 1)
if __name__ == '__main__':
Slave(sys.argv[1], int(sys.argv[2]))
|
test_shell_interactive.py | #!/usr/bin/env impala-python
# encoding=utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import httplib
import logging
import os
import pexpect
import pytest
import re
import signal
import socket
import sys
import threading
from time import sleep
from contextlib import closing
# This import is the actual ImpalaShell class from impala_shell.py.
# We rename it to ImpalaShellClass here because we later import another
# class called ImpalaShell from tests/shell/util.py, and we don't want
# to mask it.
from shell.impala_shell import ImpalaShell as ImpalaShellClass
from tempfile import NamedTemporaryFile
from tests.common.impala_service import ImpaladService
from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.skip import SkipIfLocal
from tests.common.test_dimensions import create_client_protocol_dimension
from util import (assert_var_substitution, ImpalaShell, get_impalad_port, get_shell_cmd,
get_open_sessions_metric, IMPALA_SHELL_EXECUTABLE)
import SimpleHTTPServer
import SocketServer
QUERY_FILE_PATH = os.path.join(os.environ['IMPALA_HOME'], 'tests', 'shell')
# Regex to match the interactive shell prompt that is expected after each command.
# Examples: hostname:21000, hostname:21050, hostname:28000
PROMPT_REGEX = r'\[[^:]+:2(1|8)0[0-9][0-9]\]'
LOG = logging.getLogger('test_shell_interactive')
@pytest.fixture
def tmp_history_file(request):
"""
Test fixture which uses a temporary file as the path for the shell
history.
"""
tmp = NamedTemporaryFile()
old_path = os.environ.get('IMPALA_HISTFILE')
os.environ['IMPALA_HISTFILE'] = tmp.name
def cleanup():
if old_path is not None:
os.environ['IMPALA_HISTFILE'] = old_path
else:
del os.environ['IMPALA_HISTFILE']
request.addfinalizer(cleanup)
return tmp.name
class UnavailableRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""An HTTP server that always returns 503"""
def do_POST(self):
self.send_response(code=httplib.SERVICE_UNAVAILABLE, message="Service Unavailable")
def get_unused_port():
""" Find an unused port http://stackoverflow.com/questions/1365265 """
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(('', 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
class TestImpalaShellInteractive(ImpalaTestSuite):
"""Test the impala shell interactively"""
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
# Run with both beeswax and HS2 to ensure that behaviour is the same.
cls.ImpalaTestMatrix.add_dimension(create_client_protocol_dimension())
def _expect_with_cmd(self, proc, cmd, vector, expectations=(), db="default"):
"""Executes a command on the expect process instance and verifies a set of
assertions defined by the expectations."""
proc.sendline(cmd + ";")
proc.expect(":{0}] {1}>".format(get_impalad_port(vector), db))
if not expectations: return
for e in expectations:
assert e in proc.before
def _wait_for_num_open_sessions(self, vector, impala_service, expected, err):
"""Helper method to wait for the number of open sessions to reach 'expected'."""
metric_name = get_open_sessions_metric(vector)
try:
actual = impala_service.wait_for_metric_value(metric_name, expected)
except AssertionError:
LOG.exception("Error: " % err)
raise
assert actual == expected, err
def test_local_shell_options(self, vector):
"""Test that setting the local shell options works"""
shell_cmd = get_shell_cmd(vector)
proc = pexpect.spawn(shell_cmd[0], shell_cmd[1:])
proc.expect(":{0}] default>".format(get_impalad_port(vector)))
self._expect_with_cmd(proc, "set", vector,
("LIVE_PROGRESS: False", "LIVE_SUMMARY: False"))
self._expect_with_cmd(proc, "set live_progress=true", vector)
self._expect_with_cmd(proc, "set", vector,
("LIVE_PROGRESS: True", "LIVE_SUMMARY: False"))
self._expect_with_cmd(proc, "set live_summary=1", vector)
self._expect_with_cmd(proc, "set", vector,
("LIVE_PROGRESS: True", "LIVE_SUMMARY: True"))
self._expect_with_cmd(proc, "set", vector,
("WRITE_DELIMITED: False", "VERBOSE: True"))
self._expect_with_cmd(proc, "set", vector,
("DELIMITER: \\t", "OUTPUT_FILE: None"))
self._expect_with_cmd(proc, "set write_delimited=true", vector)
self._expect_with_cmd(proc, "set", vector, ("WRITE_DELIMITED: True", "VERBOSE: True"))
self._expect_with_cmd(proc, "set DELIMITER=,", vector)
self._expect_with_cmd(proc, "set", vector, ("DELIMITER: ,", "OUTPUT_FILE: None"))
self._expect_with_cmd(proc, "set output_file=/tmp/clmn.txt", vector)
self._expect_with_cmd(proc, "set", vector,
("DELIMITER: ,", "OUTPUT_FILE: /tmp/clmn.txt"))
proc.sendeof()
proc.wait()
@pytest.mark.execute_serially
def test_write_delimited(self, vector):
"""Test output rows in delimited mode"""
p = ImpalaShell(vector)
p.send_cmd("use tpch")
p.send_cmd("set write_delimited=true")
p.send_cmd("select * from nation")
result = p.get_result()
assert "+----------------+" not in result.stdout
assert "21\tVIETNAM\t2" in result.stdout
@pytest.mark.execute_serially
def test_change_delimiter(self, vector):
"""Test change output delimiter if delimited mode is enabled"""
p = ImpalaShell(vector)
p.send_cmd("use tpch")
p.send_cmd("set write_delimited=true")
p.send_cmd("set delimiter=,")
p.send_cmd("select * from nation")
result = p.get_result()
assert "21,VIETNAM,2" in result.stdout
@pytest.mark.execute_serially
def test_print_to_file(self, vector):
"""Test print to output file and unset"""
# test print to file
p1 = ImpalaShell(vector)
p1.send_cmd("use tpch")
local_file = NamedTemporaryFile(delete=True)
p1.send_cmd("set output_file=%s" % local_file.name)
p1.send_cmd("select * from nation")
result = p1.get_result()
assert "VIETNAM" not in result.stdout
with open(local_file.name, "r") as fi:
# check if the results were written to the file successfully
result = fi.read()
assert "VIETNAM" in result
# test unset to print back to stdout
p2 = ImpalaShell(vector)
p2.send_cmd("use tpch")
p2.send_cmd("set output_file=%s" % local_file.name)
p2.send_cmd("unset output_file")
p2.send_cmd("select * from nation")
result = p2.get_result()
assert "VIETNAM" in result.stdout
def test_compute_stats_with_live_progress_options(self, vector, unique_database):
"""Test that setting LIVE_PROGRESS options won't cause COMPUTE STATS query fail"""
p = ImpalaShell(vector)
p.send_cmd("set live_progress=True")
p.send_cmd("set live_summary=True")
table = "{0}.live_progress_option".format(unique_database)
p.send_cmd('create table {0}(col int);'.format(table))
try:
p.send_cmd('compute stats {0};'.format(table))
finally:
p.send_cmd('drop table if exists {0};'.format(table))
result = p.get_result()
assert "Updated 1 partition(s) and 1 column(s)" in result.stdout
def test_escaped_quotes(self, vector):
"""Test escaping quotes"""
# test escaped quotes outside of quotes
result = run_impala_shell_interactive(vector, "select \\'bc';")
assert "Unexpected character" in result.stderr
result = run_impala_shell_interactive(vector, "select \\\"bc\";")
assert "Unexpected character" in result.stderr
# test escaped quotes within quotes
result = run_impala_shell_interactive(vector, "select 'ab\\'c';")
assert "Fetched 1 row(s)" in result.stderr
result = run_impala_shell_interactive(vector, "select \"ab\\\"c\";")
assert "Fetched 1 row(s)" in result.stderr
@pytest.mark.execute_serially
def test_cancellation(self, vector):
impalad = ImpaladService(socket.getfqdn())
assert impalad.wait_for_num_in_flight_queries(0)
command = "select sleep(10000);"
p = ImpalaShell(vector)
p.send_cmd(command)
sleep(3)
os.kill(p.pid(), signal.SIGINT)
result = p.get_result()
assert "Cancelled" not in result.stderr
assert impalad.wait_for_num_in_flight_queries(0)
p = ImpalaShell(vector)
sleep(3)
os.kill(p.pid(), signal.SIGINT)
result = p.get_result()
assert "^C" in result.stderr
@pytest.mark.execute_serially
def test_cancellation_mid_command(self, vector):
"""The test starts with sending in a multi-line input without a command delimiter.
When the impala-shell is waiting for more input, the test sends a SIGINT signal (to
simulate pressing Ctrl-C) followed by a final query terminated with semicolon.
The expected behavior for the impala shell is to discard everything before the
SIGINT signal was sent and execute the final query only."""
shell_cmd = get_shell_cmd(vector)
queries = [
"line 1\n", "line 2\n", "line 3\n\n", "line 4 and", " 5\n",
"line 6\n", "line 7\n", "line 8\n", "line 9\n", "line 10"]
# Check when the last line before Ctrl-C doesn't end with newline.
child_proc = pexpect.spawn(shell_cmd[0], shell_cmd[1:])
for query in queries:
child_proc.send(query)
child_proc.sendintr()
child_proc.send('select "test without newline";\n')
child_proc.expect("test without newline")
child_proc.sendline('quit;')
child_proc.wait()
# Check when the last line before Ctrl-C ends with newline.
child_proc = pexpect.spawn(shell_cmd[0], shell_cmd[1:])
for query in queries:
child_proc.send(query)
# Sending in a newline so it will end with one
child_proc.send("\n")
# checking if it realy is a new line
child_proc.expect(" > ")
child_proc.sendintr()
child_proc.send('select "test with newline";\n')
child_proc.expect("test with newline")
child_proc.sendline('quit;')
child_proc.wait()
def test_unicode_input(self, vector):
"Test queries containing non-ascii input"
# test a unicode query spanning multiple lines
unicode_text = u'\ufffd'
args = "select '%s'\n;" % unicode_text.encode('utf-8')
result = run_impala_shell_interactive(vector, args)
assert "Fetched 1 row(s)" in result.stderr
def test_welcome_string(self, vector):
"""Test that the shell's welcome message is only printed once
when the shell is started. Ensure it is not reprinted on errors.
Regression test for IMPALA-1153
"""
result = run_impala_shell_interactive(vector, 'asdf;')
assert result.stdout.count("Welcome to the Impala shell") == 1
result = run_impala_shell_interactive(vector, 'select * from non_existent_table;')
assert result.stdout.count("Welcome to the Impala shell") == 1
def test_disconnected_shell(self, vector):
"""Test that the shell presents a disconnected prompt if it can't connect
"""
result = run_impala_shell_interactive(vector, 'asdf;', shell_args=['-ifoo'],
wait_until_connected=False)
assert ImpalaShellClass.DISCONNECTED_PROMPT in result.stdout, result.stderr
def test_quit_no_reconnect(self, vector):
"""Test that a disconnected shell does not try to reconnect if quitting"""
result = run_impala_shell_interactive(vector, 'quit;', shell_args=['-ifoo'],
wait_until_connected=False)
assert "reconnect" not in result.stderr
result = run_impala_shell_interactive(vector, 'exit;', shell_args=['-ifoo'],
wait_until_connected=False)
assert "reconnect" not in result.stderr
# Null case: This is not quitting, so it will result in an attempt to reconnect.
result = run_impala_shell_interactive(vector, 'show tables;', shell_args=['-ifoo'],
wait_until_connected=False)
assert "reconnect" in result.stderr
def test_bash_cmd_timing(self, vector):
"""Test existence of time output in bash commands run from shell"""
args = ["! ls;"]
result = run_impala_shell_interactive(vector, args)
assert "Executed in" in result.stderr
@SkipIfLocal.multiple_impalad
@pytest.mark.execute_serially
def test_reconnect(self, vector):
"""Regression Test for IMPALA-1235
Verifies that a connect command by the user is honoured.
"""
try:
# Disconnect existing clients so there are no open sessions.
self.close_impala_clients()
hostname = socket.getfqdn()
initial_impala_service = ImpaladService(hostname)
target_impala_service = ImpaladService(hostname, webserver_port=25001,
beeswax_port=21001, be_port=22001, hs2_port=21051, hs2_http_port=28001)
protocol = vector.get_value("protocol").lower()
if protocol == "hs2":
target_port = 21051
elif protocol == "hs2-http":
target_port = 28001
else:
assert protocol == "beeswax"
target_port = 21001
# This test is running serially, so there shouldn't be any open sessions, but wait
# here in case a session from a previous test hasn't been fully closed yet.
self._wait_for_num_open_sessions(vector, initial_impala_service, 0,
"first impalad should not have any remaining open sessions.")
self._wait_for_num_open_sessions(vector, target_impala_service, 0,
"second impalad should not have any remaining open sessions.")
# Connect to the first impalad
p = ImpalaShell(vector)
# Make sure we're connected <hostname>:<port>
self._wait_for_num_open_sessions(vector, initial_impala_service, 1,
"Not connected to %s:%d" % (hostname, get_impalad_port(vector)))
p.send_cmd("connect %s:%d" % (hostname, target_port))
# The number of sessions on the target impalad should have been incremented.
self._wait_for_num_open_sessions(vector,
target_impala_service, 1, "Not connected to %s:%d" % (hostname, target_port))
assert "[%s:%d] default>" % (hostname, target_port) in p.get_result().stdout
# The number of sessions on the initial impalad should have been decremented.
self._wait_for_num_open_sessions(vector, initial_impala_service, 0,
"Connection to %s:%d should have been closed" % (
hostname, get_impalad_port(vector)))
finally:
self.create_impala_clients()
@pytest.mark.execute_serially
def test_ddl_queries_are_closed(self, vector):
"""Regression test for IMPALA-1317
The shell does not call close() for alter, use and drop queries, leaving them in
flight. This test issues those queries in interactive mode, and checks the debug
webpage to confirm that they've been closed.
TODO: Add every statement type.
"""
# Disconnect existing clients so there are no open sessions.
self.close_impala_clients()
TMP_DB = 'inflight_test_db'
TMP_TBL = 'tmp_tbl'
MSG = '%s query should be closed'
NUM_QUERIES = 'impala-server.num-queries'
impalad = ImpaladService(socket.getfqdn())
self._wait_for_num_open_sessions(vector, impalad, 0,
"Open sessions found after closing all clients.")
p = ImpalaShell(vector)
try:
start_num_queries = impalad.get_metric_value(NUM_QUERIES)
p.send_cmd('create database if not exists %s' % TMP_DB)
p.send_cmd('use %s' % TMP_DB)
impalad.wait_for_metric_value(NUM_QUERIES, start_num_queries + 2)
assert impalad.wait_for_num_in_flight_queries(0), MSG % 'use'
p.send_cmd('create table %s(i int)' % TMP_TBL)
p.send_cmd('alter table %s add columns (j int)' % TMP_TBL)
impalad.wait_for_metric_value(NUM_QUERIES, start_num_queries + 4)
assert impalad.wait_for_num_in_flight_queries(0), MSG % 'alter'
p.send_cmd('drop table %s' % TMP_TBL)
impalad.wait_for_metric_value(NUM_QUERIES, start_num_queries + 5)
assert impalad.wait_for_num_in_flight_queries(0), MSG % 'drop'
finally:
# get_result() must be called to exit the shell.
p.get_result()
self._wait_for_num_open_sessions(vector, impalad, 0,
"shell should close sessions.")
run_impala_shell_interactive(vector, "drop table if exists %s.%s;" % (
TMP_DB, TMP_TBL))
run_impala_shell_interactive(vector, "drop database if exists foo;")
self.create_impala_clients()
def test_multiline_queries_in_history(self, vector, tmp_history_file):
"""Test to ensure that multiline queries with comments are preserved in history
Ensure that multiline queries are preserved when they're read back from history.
Additionally, also test that comments are preserved.
"""
# readline gets its input from tty, so using stdin does not work.
shell_cmd = get_shell_cmd(vector)
child_proc = pexpect.spawn(shell_cmd[0], shell_cmd[1:])
# List of (input query, expected text in output).
# The expected output is usually the same as the input with a number prefix, except
# where the shell strips newlines before a semicolon.
queries = [
("select\n1;--comment", "[1]: select\n1;--comment"),
("select 1 --comment\n;", "[2]: select 1 --comment;"),
("select 1 --comment\n\n\n;", "[3]: select 1 --comment;"),
("select /*comment*/\n1;", "[4]: select /*comment*/\n1;"),
("select\n/*comm\nent*/\n1;", "[5]: select\n/*comm\nent*/\n1;")]
for query, _ in queries:
child_proc.expect(PROMPT_REGEX)
child_proc.sendline(query)
child_proc.expect("Fetched 1 row\(s\) in [0-9]+\.?[0-9]*s")
child_proc.expect(PROMPT_REGEX)
child_proc.sendline('quit;')
child_proc.wait()
p = ImpalaShell(vector)
p.send_cmd('history')
result = p.get_result()
for _, history_entry in queries:
assert history_entry in result.stderr, "'%s' not in '%s'" % (history_entry,
result.stderr)
def test_history_file_option(self, vector, tmp_history_file):
"""
Setting the 'tmp_history_file' fixture above means that the IMPALA_HISTFILE
environment will be overridden. Here we override that environment by passing
the --history_file command line option, ensuring that the history ends up
in the appropriate spot.
"""
with NamedTemporaryFile() as new_hist:
shell_cmd = get_shell_cmd(vector) + ["--history_file=%s" % new_hist.name]
child_proc = pexpect.spawn(shell_cmd[0], shell_cmd[1:])
child_proc.expect(":{0}] default>".format(get_impalad_port(vector)))
self._expect_with_cmd(child_proc, "select 'hi'", vector, ('hi'))
child_proc.sendline('exit;')
child_proc.expect(pexpect.EOF)
history_contents = file(new_hist.name).read()
assert "select 'hi'" in history_contents
def test_rerun(self, vector, tmp_history_file):
"""Smoke test for the 'rerun' command"""
shell_cmd = get_shell_cmd(vector)
child_proc = pexpect.spawn(shell_cmd[0], shell_cmd[1:])
child_proc.expect(":{0}] default>".format(get_impalad_port(vector)))
self._expect_with_cmd(child_proc, "@1", vector, ("Command index out of range"))
self._expect_with_cmd(child_proc, "rerun -1", vector,
("Command index out of range"))
self._expect_with_cmd(child_proc, "select 'first_command'", vector,
("first_command"))
self._expect_with_cmd(child_proc, "rerun 1", vector, ("first_command"))
self._expect_with_cmd(child_proc, "@ -1", vector, ("first_command"))
self._expect_with_cmd(child_proc, "select 'second_command'", vector,
("second_command"))
child_proc.sendline('history;')
child_proc.expect(":{0}] default>".format(get_impalad_port(vector)))
assert '[1]: select \'first_command\';' in child_proc.before
assert '[2]: select \'second_command\';' in child_proc.before
assert '[3]: history;' in child_proc.before
# Rerunning command should not add an entry into history.
assert '[4]' not in child_proc.before
self._expect_with_cmd(child_proc, "@0", vector, ("Command index out of range"))
self._expect_with_cmd(child_proc, "rerun 4", vector, ("Command index out of range"))
self._expect_with_cmd(child_proc, "@-4", vector, ("Command index out of range"))
self._expect_with_cmd(child_proc, " @ 3 ", vector, ("second_command"))
self._expect_with_cmd(child_proc, "@-3", vector, ("first_command"))
self._expect_with_cmd(child_proc, "@", vector,
("Command index to be rerun must be an integer."))
self._expect_with_cmd(child_proc, "@1foo", vector,
("Command index to be rerun must be an integer."))
self._expect_with_cmd(child_proc, "@1 2", vector,
("Command index to be rerun must be an integer."))
self._expect_with_cmd(child_proc, "rerun1", vector, ("Syntax error"))
child_proc.sendline('quit;')
child_proc.wait()
def test_tip(self, vector):
"""Smoke test for the TIP command"""
# Temporarily add impala_shell module to path to get at TIPS list for verification
sys.path.append("%s/shell/" % os.environ['IMPALA_HOME'])
try:
import impala_shell
finally:
sys.path = sys.path[:-1]
result = run_impala_shell_interactive(vector, "tip;")
for t in impala_shell.TIPS:
if t in result.stderr: return
assert False, "No tip found in output %s" % result.stderr
def test_var_substitution(self, vector):
cmds = open(os.path.join(QUERY_FILE_PATH, 'test_var_substitution.sql')).read()
args = ["--var=foo=123", "--var=BAR=456", "--delimited", "--output_delimiter= "]
result = run_impala_shell_interactive(vector, cmds, shell_args=args)
assert_var_substitution(result)
def test_query_option_configuration(self, vector):
rcfile_path = os.path.join(QUERY_FILE_PATH, 'impalarc_with_query_options')
args = ['-Q', 'MT_dop=1', '--query_option=MAX_ERRORS=200',
'--config_file=%s' % rcfile_path]
cmds = "set all;"
result = run_impala_shell_interactive(vector, cmds, shell_args=args)
assert "\tMT_DOP: 1" in result.stdout
assert "\tMAX_ERRORS: 200" in result.stdout
assert "\tEXPLAIN_LEVEL: 2" in result.stdout
assert "INVALID_QUERY_OPTION is not supported for the impalad being connected to, "\
"ignoring." in result.stdout
# Verify that query options under [impala] override those under [impala.query_options]
assert "\tDEFAULT_FILE_FORMAT: avro" in result.stdout
def test_live_option_configuration(self, vector):
"""Test the optional configuration file with live_progress and live_summary."""
# Positive tests
# set live_summary and live_progress as True with config file
rcfile_path = os.path.join(QUERY_FILE_PATH, 'good_impalarc3')
args = ['--config_file=%s' % rcfile_path]
cmds = "set all;"
result = run_impala_shell_interactive(vector, cmds, shell_args=args)
assert 'WARNING:' not in result.stderr, \
"A valid config file should not trigger any warning: {0}".format(result.stderr)
assert "\tLIVE_SUMMARY: True" in result.stdout
assert "\tLIVE_PROGRESS: True" in result.stdout
# set live_summary and live_progress as False with config file
rcfile_path = os.path.join(QUERY_FILE_PATH, 'good_impalarc4')
args = ['--config_file=%s' % rcfile_path]
result = run_impala_shell_interactive(vector, cmds, shell_args=args)
assert 'WARNING:' not in result.stderr, \
"A valid config file should not trigger any warning: {0}".format(result.stderr)
assert "\tLIVE_SUMMARY: False" in result.stdout
assert "\tLIVE_PROGRESS: False" in result.stdout
# override options in config file through command line arguments
args = ['--live_progress', '--live_summary', '--config_file=%s' % rcfile_path]
result = run_impala_shell_interactive(vector, cmds, shell_args=args)
assert "\tLIVE_SUMMARY: True" in result.stdout
assert "\tLIVE_PROGRESS: True" in result.stdout
def test_source_file(self, vector):
cwd = os.getcwd()
try:
# Change working dir so that SOURCE command in shell.cmds can find shell2.cmds.
os.chdir("%s/tests/shell/" % os.environ['IMPALA_HOME'])
# IMPALA-5416: Test that a command following 'source' won't be run twice.
result = run_impala_shell_interactive(vector, "source shell.cmds;select \"second "
"command\";")
assert "Query: USE FUNCTIONAL" in result.stderr
assert "Query: SHOW TABLES" in result.stderr
assert "alltypes" in result.stdout
# This is from shell2.cmds, the result of sourcing a file from a sourced file.
assert "SELECT VERSION()" in result.stderr
assert "version()" in result.stdout
assert len(re.findall("'second command'", result.stdout)) == 1
# IMPALA-5416: Test that two source commands on a line won't crash the shell.
result = run_impala_shell_interactive(
vector, "source shell.cmds;source shell.cmds;")
assert len(re.findall("version\(\)", result.stdout)) == 2
finally:
os.chdir(cwd)
def test_source_file_with_errors(self, vector):
full_path = "%s/tests/shell/shell_error.cmds" % os.environ['IMPALA_HOME']
result = run_impala_shell_interactive(vector, "source %s;" % full_path)
assert "Could not execute command: USE UNKNOWN_DATABASE" in result.stderr
assert "Query: USE FUNCTIONAL" not in result.stderr
result = run_impala_shell_interactive(vector, "source %s;" % full_path, ['-c'])
assert "Could not execute command: USE UNKNOWN_DATABASE" in result.stderr,\
result.stderr
assert "Query: USE FUNCTIONAL" in result.stderr, result.stderr
assert "Query: SHOW TABLES" in result.stderr, result.stderr
assert "alltypes" in result.stdout, result.stdout
def test_source_missing_file(self, vector):
full_path = "%s/tests/shell/doesntexist.cmds" % os.environ['IMPALA_HOME']
result = run_impala_shell_interactive(vector, "source %s;" % full_path)
assert "No such file or directory" in result.stderr
def test_zero_row_fetch(self, vector):
# IMPALA-4418: DROP and USE are generally exceptional statements where
# the client does not fetch. For statements returning 0 rows we do not
# want an empty line in stdout.
result = run_impala_shell_interactive(vector, "-- foo \n use default;")
assert re.search('> \[', result.stdout)
result = run_impala_shell_interactive(vector,
"select * from functional.alltypes limit 0;")
assert "Fetched 0 row(s)" in result.stderr
assert re.search('> \[', result.stdout)
def test_set_and_set_all(self, vector):
"""IMPALA-2181. Tests the outputs of SET and SET ALL commands. SET should contain the
REGULAR and ADVANCED options only. SET ALL should contain all the options grouped by
display level."""
shell1 = ImpalaShell(vector)
shell1.send_cmd("set")
result = shell1.get_result()
assert "Query options (defaults shown in []):" in result.stdout
assert "ABORT_ON_ERROR" in result.stdout
assert "Advanced Query Options:" in result.stdout
assert "APPX_COUNT_DISTINCT" in result.stdout
assert vector.get_value("protocol") in ("hs2", "hs2-http")\
or "SUPPORT_START_OVER" in result.stdout
# Development, deprecated and removed options should not be shown.
# Note: there are currently no deprecated options
assert "Development Query Options:" not in result.stdout
assert "DEBUG_ACTION" not in result.stdout # Development option.
assert "MAX_IO_BUFFERS" not in result.stdout # Removed option.
shell2 = ImpalaShell(vector)
shell2.send_cmd("set all")
result = shell2.get_result()
assert "Query options (defaults shown in []):" in result.stdout
assert "Advanced Query Options:" in result.stdout
assert "Development Query Options:" in result.stdout
assert "Deprecated Query Options:" not in result.stdout
advanced_part_start_idx = result.stdout.find("Advanced Query Options")
development_part_start_idx = result.stdout.find("Development Query Options")
deprecated_part_start_idx = result.stdout.find("Deprecated Query Options")
advanced_part = result.stdout[advanced_part_start_idx:development_part_start_idx]
development_part = result.stdout[development_part_start_idx:deprecated_part_start_idx]
assert "ABORT_ON_ERROR" in result.stdout[:advanced_part_start_idx]
assert "APPX_COUNT_DISTINCT" in advanced_part
assert vector.get_value("protocol") in ("hs2", "hs2-http")\
or "SUPPORT_START_OVER" in advanced_part
assert "DEBUG_ACTION" in development_part
# Removed options should not be shown.
assert "MAX_IO_BUFFERS" not in result.stdout
def check_command_case_sensitivity(self, vector, command, expected):
shell = ImpalaShell(vector)
shell.send_cmd(command)
assert expected in shell.get_result().stderr
def test_unexpected_conversion_for_literal_string_to_lowercase(self, vector):
# IMPALA-4664: Impala shell can accidentally convert certain literal
# strings to lowercase. Impala shell splits each command into tokens
# and then converts the first token to lowercase to figure out how it
# should execute the command. The splitting is done by spaces only.
# Thus, if the user types a TAB after the SELECT, the first token after
# the split becomes the SELECT plus whatever comes after it.
result = run_impala_shell_interactive(vector, "select'MUST_HAVE_UPPER_STRING'")
assert re.search('MUST_HAVE_UPPER_STRING', result.stdout)
result = run_impala_shell_interactive(vector, "select\t'MUST_HAVE_UPPER_STRING'")
assert re.search('MUST_HAVE_UPPER_STRING', result.stdout)
result = run_impala_shell_interactive(vector, "select\n'MUST_HAVE_UPPER_STRING'")
assert re.search('MUST_HAVE_UPPER_STRING', result.stdout)
def test_case_sensitive_command(self, vector):
# IMPALA-2640: Make a given command case-sensitive
cwd = os.getcwd()
try:
self.check_command_case_sensitivity(vector, "sElEcT VERSION()", "Query: sElEcT")
self.check_command_case_sensitivity(vector, "sEt VaR:FoO=bOo", "Variable FOO")
self.check_command_case_sensitivity(vector, "sHoW tables", "Query: sHoW")
# Change working dir so that SOURCE command in shell_case_sensitive.cmds can
# find shell_case_sensitive2.cmds.
os.chdir("%s/tests/shell/" % os.environ['IMPALA_HOME'])
result = run_impala_shell_interactive(vector,
"sOuRcE shell_case_sensitive.cmds; SeLeCt 'second command'")
print result.stderr
assert "Query: uSe FUNCTIONAL" in result.stderr
assert "Query: ShOw TABLES" in result.stderr
assert "alltypes" in result.stdout
# This is from shell_case_sensitive2.cmds, the result of sourcing a file
# from a sourced file.
print result.stderr
assert "SeLeCt 'second command'" in result.stderr
finally:
os.chdir(cwd)
def test_line_with_leading_comment(self, vector, unique_database):
# IMPALA-2195: A line with a comment produces incorrect command.
table = "{0}.leading_comment".format(unique_database)
run_impala_shell_interactive(vector, 'create table {0} (i int);'.format(table))
result = run_impala_shell_interactive(vector, '-- comment\n'
'insert into {0} values(1);'.format(table))
assert 'Modified 1 row(s)' in result.stderr
result = run_impala_shell_interactive(vector, '-- comment\n'
'select * from {0};'.format(table))
assert 'Fetched 1 row(s)' in result.stderr
result = run_impala_shell_interactive(vector, '--한글\n'
'select * from {0};'.format(table))
assert 'Fetched 1 row(s)' in result.stderr
result = run_impala_shell_interactive(vector, '/* 한글 */\n'
'select * from {0};'.format(table))
assert 'Fetched 1 row(s)' in result.stderr
result = run_impala_shell_interactive(vector, '/* comment */\n'
'select * from {0};'.format(table))
assert 'Fetched 1 row(s)' in result.stderr
result = run_impala_shell_interactive(vector, '/* comment1 */\n'
'-- comment2\n'
'select * from {0};'.format(table))
assert 'Fetched 1 row(s)' in result.stderr
result = run_impala_shell_interactive(vector, '/* comment1\n'
'comment2 */ select * from {0};'.format(table))
assert 'Fetched 1 row(s)' in result.stderr
result = run_impala_shell_interactive(vector, '/* select * from {0} */ '
'select * from {0};'.format(table))
assert 'Fetched 1 row(s)' in result.stderr
result = run_impala_shell_interactive(vector, '/* comment */ help use')
assert 'Executes a USE... query' in result.stdout
result = run_impala_shell_interactive(vector, '-- comment\n'
' help use;')
assert 'Executes a USE... query' in result.stdout
result = run_impala_shell_interactive(vector, '/* comment1 */\n'
'-- comment2\n'
'desc {0};'.format(table))
assert 'Fetched 1 row(s)' in result.stderr
result = run_impala_shell_interactive(vector, '/* comment1 */\n'
'-- comment2\n'
'help use;')
assert 'Executes a USE... query' in result.stdout
def test_line_ends_with_comment(self, vector):
# IMPALA-5269: Test lines that end with a comment.
queries = ['select 1 + 1; --comment',
'select 1 + 1 --comment\n;']
for query in queries:
result = run_impala_shell_interactive(vector, query)
assert '| 1 + 1 |' in result.stdout
assert '| 2 |' in result.stdout
queries = ['select \'some string\'; --comment',
'select \'some string\' --comment\n;']
for query in queries:
result = run_impala_shell_interactive(vector, query)
assert '| \'some string\' |' in result.stdout
assert '| some string |' in result.stdout
queries = ['select "--"; -- "--"',
'select \'--\'; -- "--"',
'select "--" -- "--"\n;',
'select \'--\' -- "--"\n;']
for query in queries:
result = run_impala_shell_interactive(vector, query)
assert '| \'--\' |' in result.stdout
assert '| -- |' in result.stdout
query = ('select * from (\n' +
'select count(*) from functional.alltypes\n' +
') v; -- Incomplete SQL statement in this line')
result = run_impala_shell_interactive(vector, query)
assert '| count(*) |' in result.stdout
query = ('select id from functional.alltypes\n' +
'order by id; /*\n' +
'* Multi-line comment\n' +
'*/')
result = run_impala_shell_interactive(vector, query)
assert '| id |' in result.stdout
def test_fix_infinite_loop(self, vector):
# IMPALA-6337: Fix infinite loop.
result = run_impala_shell_interactive(vector, "select 1 + 1; \"\n;\";")
assert '| 2 |' in result.stdout
result = run_impala_shell_interactive(vector, "select '1234'\";\n;\n\";")
assert '| 1234 |' in result.stdout
result = run_impala_shell_interactive(vector, "select 1 + 1; \"\n;\"\n;")
assert '| 2 |' in result.stdout
result = run_impala_shell_interactive(vector, "select '1\\'23\\'4'\";\n;\n\";")
assert '| 1\'23\'4 |' in result.stdout
result = run_impala_shell_interactive(vector, "select '1\"23\"4'\";\n;\n\";")
assert '| 1"23"4 |' in result.stdout
def test_comment_with_quotes(self, vector):
# IMPALA-2751: Comment does not need to have matching quotes
queries = [
"select -- '\n1;",
'select -- "\n1;',
"select -- \"'\n 1;",
"select /*'\n*/ 1;",
'select /*"\n*/ 1;',
"select /*\"'\n*/ 1;",
"with a as (\nselect 1\n-- '\n) select * from a",
'with a as (\nselect 1\n-- "\n) select * from a',
"with a as (\nselect 1\n-- '\"\n) select * from a",
]
for query in queries:
result = run_impala_shell_interactive(vector, query)
assert '| 1 |' in result.stdout
def test_shell_prompt(self, vector):
shell_cmd = get_shell_cmd(vector)
proc = pexpect.spawn(shell_cmd[0], shell_cmd[1:])
proc.expect(":{0}] default>".format(get_impalad_port(vector)))
self._expect_with_cmd(proc, "use foo", vector, (), 'default')
self._expect_with_cmd(proc, "use functional", vector, (), 'functional')
self._expect_with_cmd(proc, "use foo", vector, (), 'functional')
self._expect_with_cmd(proc, 'use `tpch`', vector, (), 'tpch')
self._expect_with_cmd(proc, 'use ` tpch `', vector, (), 'tpch')
proc = pexpect.spawn(shell_cmd[0], shell_cmd[1:] + ['-d', 'functional'])
proc.expect(":{0}] functional>".format(get_impalad_port(vector)))
self._expect_with_cmd(proc, "use foo", vector, (), 'functional')
self._expect_with_cmd(proc, "use tpch", vector, (), 'tpch')
self._expect_with_cmd(proc, "use foo", vector, (), 'tpch')
proc = pexpect.spawn(shell_cmd[0], shell_cmd[1:] + ['-d', ' functional '])
proc.expect(":{0}] functional>".format(get_impalad_port(vector)))
proc = pexpect.spawn(shell_cmd[0], shell_cmd[1:] + ['-d', '` functional `'])
proc.expect(":{0}] functional>".format(get_impalad_port(vector)))
# Start an Impala shell with an invalid DB.
proc = pexpect.spawn(shell_cmd[0], shell_cmd[1:] + ['-d', 'foo'])
proc.expect(":{0}] default>".format(get_impalad_port(vector)))
self._expect_with_cmd(proc, "use foo", vector, (), 'default')
self._expect_with_cmd(proc, "use functional", vector, (), 'functional')
self._expect_with_cmd(proc, "use foo", vector, (), 'functional')
proc.sendeof()
proc.wait()
def test_strip_leading_comment(self, vector):
"""Test stripping leading comments from SQL statements"""
assert ('--delete\n', 'select 1') == \
ImpalaShellClass.strip_leading_comment('--delete\nselect 1')
assert ('--delete\n', 'select --do not delete\n1') == \
ImpalaShellClass.strip_leading_comment('--delete\nselect --do not delete\n1')
assert (None, 'select --do not delete\n1') == \
ImpalaShellClass.strip_leading_comment('select --do not delete\n1')
assert ('/*delete*/\n', 'select 1') == \
ImpalaShellClass.strip_leading_comment('/*delete*/\nselect 1')
assert ('/*delete\nme*/\n', 'select 1') == \
ImpalaShellClass.strip_leading_comment('/*delete\nme*/\nselect 1')
assert ('/*delete\nme*/\n', 'select 1') == \
ImpalaShellClass.strip_leading_comment('/*delete\nme*/\nselect 1')
assert ('/*delete*/', 'select 1') == \
ImpalaShellClass.strip_leading_comment('/*delete*/select 1')
assert ('/*delete*/ ', 'select /*do not delete*/ 1') == \
ImpalaShellClass.strip_leading_comment('/*delete*/ select /*do not delete*/ 1')
assert ('/*delete1*/ \n/*delete2*/ \n--delete3 \n', 'select /*do not delete*/ 1') == \
ImpalaShellClass.strip_leading_comment('/*delete1*/ \n'
'/*delete2*/ \n'
'--delete3 \n'
'select /*do not delete*/ 1')
assert (None, 'select /*do not delete*/ 1') == \
ImpalaShellClass.strip_leading_comment('select /*do not delete*/ 1')
assert ('/*delete*/\n', 'select c1 from\n'
'a\n'
'join -- +SHUFFLE\n'
'b') == \
ImpalaShellClass.strip_leading_comment('/*delete*/\n'
'select c1 from\n'
'a\n'
'join -- +SHUFFLE\n'
'b')
assert ('/*delete*/\n', 'select c1 from\n'
'a\n'
'join /* +SHUFFLE */\n'
'b') == \
ImpalaShellClass.strip_leading_comment('/*delete*/\n'
'select c1 from\n'
'a\n'
'join /* +SHUFFLE */\n'
'b')
assert (None, 'select 1') == \
ImpalaShellClass.strip_leading_comment('select 1')
def test_malformed_query(self, vector):
"""Test the handling of malformed query without closing quotation"""
shell = ImpalaShell(vector)
query = "with v as (select 1) \nselect foo('\\\\'), ('bar \n;"
shell.send_cmd(query)
result = shell.get_result()
assert "ERROR: ParseException: Unmatched string literal" in result.stderr,\
result.stderr
def test_timezone_validation(self, vector):
"""Test that query option TIMEZONE is validated when executing a query.
Query options are not sent to the coordinator immediately, so the error checking
will only happen when running a query.
"""
p = ImpalaShell(vector)
p.send_cmd('set timezone=BLA;')
p.send_cmd('select 1;')
results = p.get_result()
assert "Fetched 1 row" not in results.stderr
# assert "ERROR: Errors parsing query options" in results.stderr, results.stderr
assert "Invalid timezone name 'BLA'" in results.stderr, results.stderr
def test_with_clause(self, vector):
# IMPALA-7939: Fix issue where CTE that contains "insert", "upsert", "update", or
# "delete" is categorized as a DML statement.
for keyword in ["insert", "upsert", "update", "delete", "\\'insert\\'",
"\\'upsert\\'", "\\'update\\'", "\\'delete\\'"]:
p = ImpalaShell(vector)
p.send_cmd("with foo as "
"(select * from functional.alltypestiny where string_col='%s') "
"select * from foo limit 1" % keyword)
result = p.get_result()
assert "Fetched 0 row" in result.stderr
def test_http_codes(self, vector):
"""Check that the shell prints a good message when using hs2-http protocol
and the http server returns a 503 error."""
protocol = vector.get_value("protocol")
if protocol != 'hs2-http':
pytest.skip()
# Start an http server that always returns 503.
HOST = "localhost"
PORT = get_unused_port()
httpd = None
http_server_thread = None
try:
httpd = SocketServer.TCPServer((HOST, PORT), UnavailableRequestHandler)
http_server_thread = threading.Thread(target=httpd.serve_forever)
http_server_thread.start()
# Check that we get a message about the 503 error when we try to connect.
shell_args = ["--protocol={0}".format(protocol), "-i{0}:{1}".format(HOST, PORT)]
shell_proc = pexpect.spawn(IMPALA_SHELL_EXECUTABLE, shell_args)
shell_proc.expect("HTTP code 503", timeout=10)
finally:
# Clean up.
if httpd is not None:
httpd.shutdown()
if http_server_thread is not None:
http_server_thread.join()
def run_impala_shell_interactive(vector, input_lines, shell_args=None,
wait_until_connected=True):
"""Runs a command in the Impala shell interactively."""
# if argument "input_lines" is a string, makes it into a list
if type(input_lines) is str:
input_lines = [input_lines]
# workaround to make Popen environment 'utf-8' compatible
# since piping defaults to ascii
my_env = os.environ
my_env['PYTHONIOENCODING'] = 'utf-8'
p = ImpalaShell(vector, args=shell_args, env=my_env,
wait_until_connected=wait_until_connected)
for line in input_lines:
p.send_cmd(line)
return p.get_result()
|
test_comms.py | import asyncio
import types
from functools import partial
import os
import sys
import threading
import warnings
import pkg_resources
import pytest
from tornado import ioloop
from tornado.concurrent import Future
import distributed
from distributed.metrics import time
from distributed.utils import get_ip, get_ipv6
from distributed.utils_test import (
requires_ipv6,
has_ipv6,
get_cert,
get_server_ssl_context,
get_client_ssl_context,
)
from distributed.utils_test import loop # noqa: F401
from distributed.protocol import to_serialize, Serialized, serialize, deserialize
from distributed.comm.registry import backends, get_backend
from distributed.comm import (
tcp,
inproc,
connect,
listen,
CommClosedError,
parse_address,
parse_host_port,
unparse_host_port,
resolve_address,
get_address_host,
get_local_address_for,
)
EXTERNAL_IP4 = get_ip()
if has_ipv6():
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
EXTERNAL_IP6 = get_ipv6()
ca_file = get_cert("tls-ca-cert.pem")
# The Subject field of our test certs
cert_subject = (
(("countryName", "XY"),),
(("localityName", "Dask-distributed"),),
(("organizationName", "Dask"),),
(("commonName", "localhost"),),
)
def check_tls_extra(info):
assert isinstance(info, dict)
assert info["peercert"]["subject"] == cert_subject
assert "cipher" in info
cipher_name, proto_name, secret_bits = info["cipher"]
# Most likely
assert "AES" in cipher_name
assert "TLS" in proto_name
assert secret_bits >= 128
tls_kwargs = dict(
listen_args={"ssl_context": get_server_ssl_context()},
connect_args={"ssl_context": get_client_ssl_context()},
)
@pytest.mark.asyncio
async def get_comm_pair(listen_addr, listen_args={}, connect_args={}, **kwargs):
q = asyncio.Queue()
async def handle_comm(comm):
await q.put(comm)
listener = await listen(listen_addr, handle_comm, **listen_args, **kwargs)
comm = await connect(listener.contact_address, **connect_args, **kwargs)
serv_comm = await q.get()
return (comm, serv_comm)
def get_tcp_comm_pair(**kwargs):
return get_comm_pair("tcp://", **kwargs)
def get_tls_comm_pair(**kwargs):
kwargs.update(tls_kwargs)
return get_comm_pair("tls://", **kwargs)
def get_inproc_comm_pair(**kwargs):
return get_comm_pair("inproc://", **kwargs)
async def debug_loop():
"""
Debug helper
"""
while True:
loop = ioloop.IOLoop.current()
print(".", loop, loop._handlers)
await asyncio.sleep(0.50)
#
# Test utility functions
#
def test_parse_host_port():
f = parse_host_port
assert f("localhost:123") == ("localhost", 123)
assert f("127.0.0.1:456") == ("127.0.0.1", 456)
assert f("localhost:123", 80) == ("localhost", 123)
assert f("localhost", 80) == ("localhost", 80)
with pytest.raises(ValueError):
f("localhost")
assert f("[::1]:123") == ("::1", 123)
assert f("[fe80::1]:123", 80) == ("fe80::1", 123)
assert f("[::1]", 80) == ("::1", 80)
with pytest.raises(ValueError):
f("[::1]")
with pytest.raises(ValueError):
f("::1:123")
with pytest.raises(ValueError):
f("::1")
def test_unparse_host_port():
f = unparse_host_port
assert f("localhost", 123) == "localhost:123"
assert f("127.0.0.1", 123) == "127.0.0.1:123"
assert f("::1", 123) == "[::1]:123"
assert f("[::1]", 123) == "[::1]:123"
assert f("127.0.0.1") == "127.0.0.1"
assert f("127.0.0.1", None) == "127.0.0.1"
assert f("127.0.0.1", "*") == "127.0.0.1:*"
assert f("::1") == "[::1]"
assert f("[::1]") == "[::1]"
assert f("::1", "*") == "[::1]:*"
def test_get_address_host():
f = get_address_host
assert f("tcp://127.0.0.1:123") == "127.0.0.1"
assert f("inproc://%s/%d/123" % (get_ip(), os.getpid())) == get_ip()
def test_resolve_address():
f = resolve_address
assert f("tcp://127.0.0.1:123") == "tcp://127.0.0.1:123"
assert f("127.0.0.2:789") == "tcp://127.0.0.2:789"
assert f("tcp://0.0.0.0:456") == "tcp://0.0.0.0:456"
assert f("tcp://0.0.0.0:456") == "tcp://0.0.0.0:456"
if has_ipv6():
assert f("tcp://[::1]:123") == "tcp://[::1]:123"
assert f("tls://[::1]:123") == "tls://[::1]:123"
# OS X returns '::0.0.0.2' as canonical representation
assert f("[::2]:789") in ("tcp://[::2]:789", "tcp://[::0.0.0.2]:789")
assert f("tcp://[::]:123") == "tcp://[::]:123"
assert f("localhost:123") == "tcp://127.0.0.1:123"
assert f("tcp://localhost:456") == "tcp://127.0.0.1:456"
assert f("tls://localhost:456") == "tls://127.0.0.1:456"
def test_get_local_address_for():
f = get_local_address_for
assert f("tcp://127.0.0.1:80") == "tcp://127.0.0.1"
assert f("tcp://8.8.8.8:4444") == "tcp://" + get_ip()
if has_ipv6():
assert f("tcp://[::1]:123") == "tcp://[::1]"
inproc_arg = "inproc://%s/%d/444" % (get_ip(), os.getpid())
inproc_res = f(inproc_arg)
assert inproc_res.startswith("inproc://")
assert inproc_res != inproc_arg
#
# Test concrete transport APIs
#
@pytest.mark.asyncio
async def test_tcp_specific():
"""
Test concrete TCP API.
"""
async def handle_comm(comm):
assert comm.peer_address.startswith("tcp://" + host)
assert comm.extra_info == {}
msg = await comm.read()
msg["op"] = "pong"
await comm.write(msg)
await comm.close()
listener = await tcp.TCPListener("localhost", handle_comm)
host, port = listener.get_host_port()
assert host in ("localhost", "127.0.0.1", "::1")
assert port > 0
connector = tcp.TCPConnector()
l = []
async def client_communicate(key, delay=0):
addr = "%s:%d" % (host, port)
comm = await connector.connect(addr)
assert comm.peer_address == "tcp://" + addr
assert comm.extra_info == {}
await comm.write({"op": "ping", "data": key})
if delay:
await asyncio.sleep(delay)
msg = await comm.read()
assert msg == {"op": "pong", "data": key}
l.append(key)
await comm.close()
await client_communicate(key=1234)
# Many clients at once
N = 100
futures = [client_communicate(key=i, delay=0.05) for i in range(N)]
await asyncio.gather(*futures)
assert set(l) == {1234} | set(range(N))
@pytest.mark.asyncio
async def test_tls_specific():
"""
Test concrete TLS API.
"""
async def handle_comm(comm):
assert comm.peer_address.startswith("tls://" + host)
check_tls_extra(comm.extra_info)
msg = await comm.read()
msg["op"] = "pong"
await comm.write(msg)
await comm.close()
server_ctx = get_server_ssl_context()
client_ctx = get_client_ssl_context()
listener = await tcp.TLSListener("localhost", handle_comm, ssl_context=server_ctx)
host, port = listener.get_host_port()
assert host in ("localhost", "127.0.0.1", "::1")
assert port > 0
connector = tcp.TLSConnector()
l = []
async def client_communicate(key, delay=0):
addr = "%s:%d" % (host, port)
comm = await connector.connect(addr, ssl_context=client_ctx)
assert comm.peer_address == "tls://" + addr
check_tls_extra(comm.extra_info)
await comm.write({"op": "ping", "data": key})
if delay:
await asyncio.sleep(delay)
msg = await comm.read()
assert msg == {"op": "pong", "data": key}
l.append(key)
await comm.close()
await client_communicate(key=1234)
# Many clients at once
N = 100
futures = [client_communicate(key=i, delay=0.05) for i in range(N)]
await asyncio.gather(*futures)
assert set(l) == {1234} | set(range(N))
@pytest.mark.asyncio
async def test_comm_failure_threading():
"""
When we fail to connect, make sure we don't make a lot
of threads.
We only assert for PY3, because the thread limit only is
set for python 3. See github PR #2403 discussion for info.
"""
async def sleep_for_60ms():
max_thread_count = 0
for x in range(60):
await asyncio.sleep(0.001)
thread_count = threading.active_count()
if thread_count > max_thread_count:
max_thread_count = thread_count
return max_thread_count
original_thread_count = threading.active_count()
# tcp.TCPConnector()
sleep_future = sleep_for_60ms()
with pytest.raises(IOError):
await connect("tcp://localhost:28400", 0.052)
max_thread_count = await sleep_future
# 2 is the number set by BaseTCPConnector.executor (ThreadPoolExecutor)
assert max_thread_count <= 2 + original_thread_count
# tcp.TLSConnector()
sleep_future = sleep_for_60ms()
with pytest.raises(IOError):
await connect(
"tls://localhost:28400", 0.052, ssl_context=get_client_ssl_context(),
)
max_thread_count = await sleep_future
assert max_thread_count <= 2 + original_thread_count
async def check_inproc_specific(run_client):
"""
Test concrete InProc API.
"""
listener_addr = inproc.global_manager.new_address()
addr_head = listener_addr.rpartition("/")[0]
client_addresses = set()
N_MSGS = 3
async def handle_comm(comm):
assert comm.peer_address.startswith("inproc://" + addr_head)
client_addresses.add(comm.peer_address)
for i in range(N_MSGS):
msg = await comm.read()
msg["op"] = "pong"
await comm.write(msg)
await comm.close()
listener = await inproc.InProcListener(listener_addr, handle_comm)
assert (
listener.listen_address
== listener.contact_address
== "inproc://" + listener_addr
)
connector = inproc.InProcConnector(inproc.global_manager)
l = []
async def client_communicate(key, delay=0):
comm = await connector.connect(listener_addr)
assert comm.peer_address == "inproc://" + listener_addr
for i in range(N_MSGS):
await comm.write({"op": "ping", "data": key})
if delay:
await asyncio.sleep(delay)
msg = await comm.read()
assert msg == {"op": "pong", "data": key}
l.append(key)
with pytest.raises(CommClosedError):
await comm.read()
await comm.close()
client_communicate = partial(run_client, client_communicate)
await client_communicate(key=1234)
# Many clients at once
N = 20
futures = [client_communicate(key=i, delay=0.001) for i in range(N)]
await asyncio.gather(*futures)
assert set(l) == {1234} | set(range(N))
assert len(client_addresses) == N + 1
assert listener.contact_address not in client_addresses
def run_coro(func, *args, **kwargs):
return func(*args, **kwargs)
def run_coro_in_thread(func, *args, **kwargs):
fut = Future()
main_loop = ioloop.IOLoop.current()
def run():
thread_loop = ioloop.IOLoop() # need fresh IO loop for run_sync()
try:
res = thread_loop.run_sync(partial(func, *args, **kwargs), timeout=10)
except Exception:
main_loop.add_callback(fut.set_exc_info, sys.exc_info())
else:
main_loop.add_callback(fut.set_result, res)
finally:
thread_loop.close()
t = threading.Thread(target=run)
t.start()
return fut
@pytest.mark.asyncio
async def test_inproc_specific_same_thread():
await check_inproc_specific(run_coro)
@pytest.mark.asyncio
async def test_inproc_specific_different_threads():
await check_inproc_specific(run_coro_in_thread)
#
# Test communications through the abstract API
#
async def check_client_server(
addr,
check_listen_addr=None,
check_contact_addr=None,
listen_args={},
connect_args={},
):
"""
Abstract client / server test.
"""
async def handle_comm(comm):
scheme, loc = parse_address(comm.peer_address)
assert scheme == bound_scheme
msg = await comm.read()
assert msg["op"] == "ping"
msg["op"] = "pong"
await comm.write(msg)
msg = await comm.read()
assert msg["op"] == "foobar"
await comm.close()
# Arbitrary connection args should be ignored
listen_args = listen_args or {"xxx": "bar"}
connect_args = connect_args or {"xxx": "foo"}
listener = await listen(addr, handle_comm, **listen_args)
# Check listener properties
bound_addr = listener.listen_address
bound_scheme, bound_loc = parse_address(bound_addr)
assert bound_scheme in backends
assert bound_scheme == parse_address(addr)[0]
if check_listen_addr is not None:
check_listen_addr(bound_loc)
contact_addr = listener.contact_address
contact_scheme, contact_loc = parse_address(contact_addr)
assert contact_scheme == bound_scheme
if check_contact_addr is not None:
check_contact_addr(contact_loc)
else:
assert contact_addr == bound_addr
# Check client <-> server comms
l = []
async def client_communicate(key, delay=0):
comm = await connect(listener.contact_address, **connect_args)
assert comm.peer_address == listener.contact_address
await comm.write({"op": "ping", "data": key})
await comm.write({"op": "foobar"})
if delay:
await asyncio.sleep(delay)
msg = await comm.read()
assert msg == {"op": "pong", "data": key}
l.append(key)
await comm.close()
await client_communicate(key=1234)
# Many clients at once
futures = [client_communicate(key=i, delay=0.05) for i in range(20)]
await asyncio.gather(*futures)
assert set(l) == {1234} | set(range(20))
listener.stop()
@pytest.mark.asyncio
async def test_ucx_client_server():
pytest.importorskip("distributed.comm.ucx")
ucp = pytest.importorskip("ucp")
addr = ucp.get_address()
await check_client_server("ucx://" + addr)
def tcp_eq(expected_host, expected_port=None):
def checker(loc):
host, port = parse_host_port(loc)
assert host == expected_host
if expected_port is not None:
assert port == expected_port
else:
assert 1023 < port < 65536
return checker
tls_eq = tcp_eq
def inproc_check():
expected_ip = get_ip()
expected_pid = os.getpid()
def checker(loc):
ip, pid, suffix = loc.split("/")
assert ip == expected_ip
assert int(pid) == expected_pid
return checker
@pytest.mark.asyncio
async def test_default_client_server_ipv4():
# Default scheme is (currently) TCP
await check_client_server("127.0.0.1", tcp_eq("127.0.0.1"))
await check_client_server("127.0.0.1:3201", tcp_eq("127.0.0.1", 3201))
await check_client_server("0.0.0.0", tcp_eq("0.0.0.0"), tcp_eq(EXTERNAL_IP4))
await check_client_server(
"0.0.0.0:3202", tcp_eq("0.0.0.0", 3202), tcp_eq(EXTERNAL_IP4, 3202)
)
# IPv4 is preferred for the bound address
await check_client_server("", tcp_eq("0.0.0.0"), tcp_eq(EXTERNAL_IP4))
await check_client_server(
":3203", tcp_eq("0.0.0.0", 3203), tcp_eq(EXTERNAL_IP4, 3203)
)
@requires_ipv6
@pytest.mark.asyncio
async def test_default_client_server_ipv6():
await check_client_server("[::1]", tcp_eq("::1"))
await check_client_server("[::1]:3211", tcp_eq("::1", 3211))
await check_client_server("[::]", tcp_eq("::"), tcp_eq(EXTERNAL_IP6))
await check_client_server(
"[::]:3212", tcp_eq("::", 3212), tcp_eq(EXTERNAL_IP6, 3212)
)
@pytest.mark.asyncio
async def test_tcp_client_server_ipv4():
await check_client_server("tcp://127.0.0.1", tcp_eq("127.0.0.1"))
await check_client_server("tcp://127.0.0.1:3221", tcp_eq("127.0.0.1", 3221))
await check_client_server("tcp://0.0.0.0", tcp_eq("0.0.0.0"), tcp_eq(EXTERNAL_IP4))
await check_client_server(
"tcp://0.0.0.0:3222", tcp_eq("0.0.0.0", 3222), tcp_eq(EXTERNAL_IP4, 3222)
)
await check_client_server("tcp://", tcp_eq("0.0.0.0"), tcp_eq(EXTERNAL_IP4))
await check_client_server(
"tcp://:3223", tcp_eq("0.0.0.0", 3223), tcp_eq(EXTERNAL_IP4, 3223)
)
@requires_ipv6
@pytest.mark.asyncio
async def test_tcp_client_server_ipv6():
await check_client_server("tcp://[::1]", tcp_eq("::1"))
await check_client_server("tcp://[::1]:3231", tcp_eq("::1", 3231))
await check_client_server("tcp://[::]", tcp_eq("::"), tcp_eq(EXTERNAL_IP6))
await check_client_server(
"tcp://[::]:3232", tcp_eq("::", 3232), tcp_eq(EXTERNAL_IP6, 3232)
)
@pytest.mark.asyncio
async def test_tls_client_server_ipv4():
await check_client_server("tls://127.0.0.1", tls_eq("127.0.0.1"), **tls_kwargs)
await check_client_server(
"tls://127.0.0.1:3221", tls_eq("127.0.0.1", 3221), **tls_kwargs
)
await check_client_server(
"tls://", tls_eq("0.0.0.0"), tls_eq(EXTERNAL_IP4), **tls_kwargs
)
@requires_ipv6
@pytest.mark.asyncio
async def test_tls_client_server_ipv6():
await check_client_server("tls://[::1]", tls_eq("::1"), **tls_kwargs)
@pytest.mark.asyncio
async def test_inproc_client_server():
await check_client_server("inproc://", inproc_check())
await check_client_server(inproc.new_address(), inproc_check())
#
# TLS certificate handling
#
@pytest.mark.asyncio
async def test_tls_reject_certificate():
cli_ctx = get_client_ssl_context()
serv_ctx = get_server_ssl_context()
# These certs are not signed by our test CA
bad_cert_key = ("tls-self-signed-cert.pem", "tls-self-signed-key.pem")
bad_cli_ctx = get_client_ssl_context(*bad_cert_key)
bad_serv_ctx = get_server_ssl_context(*bad_cert_key)
async def handle_comm(comm):
scheme, loc = parse_address(comm.peer_address)
assert scheme == "tls"
await comm.close()
# Listener refuses a connector not signed by the CA
listener = await listen("tls://", handle_comm, ssl_context=serv_ctx)
with pytest.raises(EnvironmentError) as excinfo:
comm = await connect(
listener.contact_address, timeout=0.5, ssl_context=bad_cli_ctx,
)
await comm.write({"x": "foo"}) # TODO: why is this necessary in Tornado 6 ?
if os.name != "nt":
try:
# See https://serverfault.com/questions/793260/what-does-tlsv1-alert-unknown-ca-mean
assert "unknown ca" in str(excinfo.value)
except AssertionError:
if os.name == "nt":
assert "An existing connection was forcibly closed" in str(
excinfo.value
)
else:
raise
# Sanity check
comm = await connect(listener.contact_address, timeout=2, ssl_context=cli_ctx,)
await comm.close()
# Connector refuses a listener not signed by the CA
listener = await listen("tls://", handle_comm, ssl_context=bad_serv_ctx)
with pytest.raises(EnvironmentError) as excinfo:
await connect(
listener.contact_address, timeout=2, ssl_context=cli_ctx,
)
assert "certificate verify failed" in str(excinfo.value)
#
# Test communication closing
#
async def check_comm_closed_implicit(addr, delay=None, listen_args={}, connect_args={}):
async def handle_comm(comm):
await comm.close()
listener = await listen(addr, handle_comm, **listen_args)
contact_addr = listener.contact_address
comm = await connect(contact_addr, **connect_args)
with pytest.raises(CommClosedError):
await comm.write({})
comm = await connect(contact_addr, **connect_args)
with pytest.raises(CommClosedError):
await comm.read()
@pytest.mark.asyncio
async def test_tcp_comm_closed_implicit():
await check_comm_closed_implicit("tcp://127.0.0.1")
@pytest.mark.asyncio
async def test_tls_comm_closed_implicit():
await check_comm_closed_implicit("tls://127.0.0.1", **tls_kwargs)
@pytest.mark.asyncio
async def test_inproc_comm_closed_implicit():
await check_comm_closed_implicit(inproc.new_address())
async def check_comm_closed_explicit(addr, listen_args={}, connect_args={}):
a, b = await get_comm_pair(addr, listen_args=listen_args, connect_args=connect_args)
a_read = a.read()
b_read = b.read()
await a.close()
# In-flight reads should abort with CommClosedError
with pytest.raises(CommClosedError):
await a_read
with pytest.raises(CommClosedError):
await b_read
# New reads as well
with pytest.raises(CommClosedError):
await a.read()
with pytest.raises(CommClosedError):
await b.read()
# And writes
with pytest.raises(CommClosedError):
await a.write({})
with pytest.raises(CommClosedError):
await b.write({})
await b.close()
@pytest.mark.asyncio
async def test_tcp_comm_closed_explicit():
await check_comm_closed_explicit("tcp://127.0.0.1")
@pytest.mark.asyncio
async def test_tls_comm_closed_explicit():
await check_comm_closed_explicit("tls://127.0.0.1", **tls_kwargs)
@pytest.mark.asyncio
async def test_inproc_comm_closed_explicit():
await check_comm_closed_explicit(inproc.new_address())
@pytest.mark.asyncio
async def test_inproc_comm_closed_explicit_2():
listener_errors = []
async def handle_comm(comm):
# Wait
try:
await comm.read()
except CommClosedError:
assert comm.closed()
listener_errors.append(True)
else:
await comm.close()
listener = await listen("inproc://", handle_comm)
contact_addr = listener.contact_address
comm = await connect(contact_addr)
await comm.close()
assert comm.closed()
start = time()
while len(listener_errors) < 1:
assert time() < start + 1
await asyncio.sleep(0.01)
assert len(listener_errors) == 1
with pytest.raises(CommClosedError):
await comm.read()
with pytest.raises(CommClosedError):
await comm.write("foo")
comm = await connect(contact_addr)
await comm.write("foo")
with pytest.raises(CommClosedError):
await comm.read()
with pytest.raises(CommClosedError):
await comm.write("foo")
assert comm.closed()
comm = await connect(contact_addr)
await comm.write("foo")
start = time()
while not comm.closed():
await asyncio.sleep(0.01)
assert time() < start + 2
await comm.close()
await comm.close()
#
# Various stress tests
#
async def check_connect_timeout(addr):
t1 = time()
with pytest.raises(IOError):
await connect(addr, timeout=0.15)
dt = time() - t1
assert 1 >= dt >= 0.1
@pytest.mark.asyncio
async def test_tcp_connect_timeout():
await check_connect_timeout("tcp://127.0.0.1:44444")
@pytest.mark.asyncio
async def test_inproc_connect_timeout():
await check_connect_timeout(inproc.new_address())
async def check_many_listeners(addr):
async def handle_comm(comm):
pass
listeners = []
N = 100
for i in range(N):
listener = await listen(addr, handle_comm)
listeners.append(listener)
assert len(set(l.listen_address for l in listeners)) == N
assert len(set(l.contact_address for l in listeners)) == N
for listener in listeners:
listener.stop()
@pytest.mark.asyncio
async def test_tcp_many_listeners():
await check_many_listeners("tcp://127.0.0.1")
await check_many_listeners("tcp://0.0.0.0")
await check_many_listeners("tcp://")
@pytest.mark.asyncio
async def test_inproc_many_listeners():
await check_many_listeners("inproc://")
#
# Test deserialization
#
async def check_listener_deserialize(addr, deserialize, in_value, check_out):
q = asyncio.Queue()
async def handle_comm(comm):
msg = await comm.read()
q.put_nowait(msg)
await comm.close()
async with listen(addr, handle_comm, deserialize=deserialize) as listener:
comm = await connect(listener.contact_address)
await comm.write(in_value)
out_value = await q.get()
check_out(out_value)
await comm.close()
async def check_connector_deserialize(addr, deserialize, in_value, check_out):
done = asyncio.Event()
async def handle_comm(comm):
await comm.write(in_value)
await done.wait()
await comm.close()
async with listen(addr, handle_comm) as listener:
comm = await connect(listener.contact_address, deserialize=deserialize)
out_value = await comm.read()
done.set()
await comm.close()
check_out(out_value)
async def check_deserialize(addr):
"""
Check the "deserialize" flag on connect() and listen().
"""
# Test with Serialize and Serialized objects
msg = {
"op": "update",
"x": b"abc",
"to_ser": [to_serialize(123)],
"ser": Serialized(*serialize(456)),
}
msg_orig = msg.copy()
def check_out_false(out_value):
# Check output with deserialize=False
out_value = out_value.copy() # in case transport passed the object as-is
to_ser = out_value.pop("to_ser")
ser = out_value.pop("ser")
expected_msg = msg_orig.copy()
del expected_msg["ser"]
del expected_msg["to_ser"]
assert out_value == expected_msg
assert isinstance(ser, Serialized)
assert deserialize(ser.header, ser.frames) == 456
assert isinstance(to_ser, list)
(to_ser,) = to_ser
# The to_serialize() value could have been actually serialized
# or not (it's a transport-specific optimization)
if isinstance(to_ser, Serialized):
assert deserialize(to_ser.header, to_ser.frames) == 123
else:
assert to_ser == to_serialize(123)
def check_out_true(out_value):
# Check output with deserialize=True
expected_msg = msg.copy()
expected_msg["ser"] = 456
expected_msg["to_ser"] = [123]
assert out_value == expected_msg
await check_listener_deserialize(addr, False, msg, check_out_false)
await check_connector_deserialize(addr, False, msg, check_out_false)
await check_listener_deserialize(addr, True, msg, check_out_true)
await check_connector_deserialize(addr, True, msg, check_out_true)
# Test with long bytestrings, large enough to be transferred
# as a separate payload
_uncompressible = os.urandom(1024 ** 2) * 4 # end size: 8 MB
msg = {
"op": "update",
"x": _uncompressible,
"to_ser": [to_serialize(_uncompressible)],
"ser": Serialized(*serialize(_uncompressible)),
}
msg_orig = msg.copy()
def check_out(deserialize_flag, out_value):
# Check output with deserialize=False
assert sorted(out_value) == sorted(msg_orig)
out_value = out_value.copy() # in case transport passed the object as-is
to_ser = out_value.pop("to_ser")
ser = out_value.pop("ser")
expected_msg = msg_orig.copy()
del expected_msg["ser"]
del expected_msg["to_ser"]
assert out_value == expected_msg
if deserialize_flag:
assert isinstance(ser, (bytes, bytearray))
assert bytes(ser) == _uncompressible
else:
assert isinstance(ser, Serialized)
assert deserialize(ser.header, ser.frames) == _uncompressible
assert isinstance(to_ser, list)
(to_ser,) = to_ser
# The to_serialize() value could have been actually serialized
# or not (it's a transport-specific optimization)
if isinstance(to_ser, Serialized):
assert deserialize(to_ser.header, to_ser.frames) == _uncompressible
else:
assert to_ser == to_serialize(_uncompressible)
await check_listener_deserialize(addr, False, msg, partial(check_out, False))
await check_connector_deserialize(addr, False, msg, partial(check_out, False))
await check_listener_deserialize(addr, True, msg, partial(check_out, True))
await check_connector_deserialize(addr, True, msg, partial(check_out, True))
@pytest.mark.xfail(reason="intermittent failure on windows")
@pytest.mark.asyncio
async def test_tcp_deserialize():
await check_deserialize("tcp://")
@pytest.mark.asyncio
async def test_inproc_deserialize():
await check_deserialize("inproc://")
async def check_deserialize_roundtrip(addr):
"""
Sanity check round-tripping with "deserialize" on and off.
"""
# Test with long bytestrings, large enough to be transferred
# as a separate payload
_uncompressible = os.urandom(1024 ** 2) * 4 # end size: 4 MB
msg = {
"op": "update",
"x": _uncompressible,
"to_ser": [to_serialize(_uncompressible)],
"ser": Serialized(*serialize(_uncompressible)),
}
for should_deserialize in (True, False):
a, b = await get_comm_pair(addr, deserialize=should_deserialize)
await a.write(msg)
got = await b.read()
await b.write(got)
got = await a.read()
assert sorted(got) == sorted(msg)
for k in ("op", "x"):
assert got[k] == msg[k]
if should_deserialize:
assert isinstance(got["to_ser"][0], (bytes, bytearray))
assert isinstance(got["ser"], (bytes, bytearray))
else:
assert isinstance(got["to_ser"][0], (to_serialize, Serialized))
assert isinstance(got["ser"], Serialized)
@pytest.mark.asyncio
async def test_inproc_deserialize_roundtrip():
await check_deserialize_roundtrip("inproc://")
@pytest.mark.asyncio
async def test_tcp_deserialize_roundtrip():
await check_deserialize_roundtrip("tcp://")
def _raise_eoferror():
raise EOFError
class _EOFRaising:
def __reduce__(self):
return _raise_eoferror, ()
async def check_deserialize_eoferror(addr):
"""
EOFError when deserializing should close the comm.
"""
async def handle_comm(comm):
await comm.write({"data": to_serialize(_EOFRaising())})
with pytest.raises(CommClosedError):
await comm.read()
async with listen(addr, handle_comm) as listener:
comm = await connect(listener.contact_address, deserialize=deserialize)
with pytest.raises(CommClosedError):
await comm.read()
@pytest.mark.asyncio
async def test_tcp_deserialize_eoferror():
await check_deserialize_eoferror("tcp://")
#
# Test various properties
#
async def check_repr(a, b):
assert "closed" not in repr(a)
assert "closed" not in repr(b)
await a.close()
assert "closed" in repr(a)
await b.close()
assert "closed" in repr(b)
@pytest.mark.asyncio
async def test_tcp_repr():
a, b = await get_tcp_comm_pair()
assert a.local_address in repr(b)
assert b.local_address in repr(a)
await check_repr(a, b)
@pytest.mark.asyncio
async def test_tls_repr():
a, b = await get_tls_comm_pair()
assert a.local_address in repr(b)
assert b.local_address in repr(a)
await check_repr(a, b)
@pytest.mark.asyncio
async def test_inproc_repr():
a, b = await get_inproc_comm_pair()
assert a.local_address in repr(b)
assert b.local_address in repr(a)
await check_repr(a, b)
async def check_addresses(a, b):
assert a.peer_address == b.local_address
assert a.local_address == b.peer_address
a.abort()
b.abort()
@pytest.mark.asyncio
async def test_tcp_adresses():
a, b = await get_tcp_comm_pair()
await check_addresses(a, b)
@pytest.mark.asyncio
async def test_tls_adresses():
a, b = await get_tls_comm_pair()
await check_addresses(a, b)
@pytest.mark.asyncio
async def test_inproc_adresses():
a, b = await get_inproc_comm_pair()
await check_addresses(a, b)
def test_register_backend_entrypoint():
# Code adapted from pandas backend entry point testing
# https://github.com/pandas-dev/pandas/blob/2470690b9f0826a8feb426927694fa3500c3e8d2/pandas/tests/plotting/test_backend.py#L50-L76
dist = pkg_resources.get_distribution("distributed")
if dist.module_path not in distributed.__file__:
# We are running from a non-installed distributed, and this test is invalid
pytest.skip("Testing a non-installed distributed")
mod = types.ModuleType("dask_udp")
mod.UDPBackend = lambda: 1
sys.modules[mod.__name__] = mod
entry_point_name = "distributed.comm.backends"
backends_entry_map = pkg_resources.get_entry_map("distributed")
if entry_point_name not in backends_entry_map:
backends_entry_map[entry_point_name] = dict()
backends_entry_map[entry_point_name]["udp"] = pkg_resources.EntryPoint(
"udp", mod.__name__, attrs=["UDPBackend"], dist=dist
)
result = get_backend("udp")
assert result == 1
|
middleman.py | import workman
import configparser
import time
import threading
from database import init_engine, db_session
from models import Scan
class Middleman:
def __init__(self):
self.config = configparser.ConfigParser()
self.config.read('config.ini')
self.max_scans = int(self.config["WEBAUDIT"]["MaxConcurrentScans"])
#init_engine('mysql+pymysql://' + self.config['DATABASE']['Username'] + ':' + self.config['DATABASE']['Password'] + '@' + self.config['DATABASE']['Server'] + '/' + self.config['DATABASE']['Database'], pool_recycle=3600)
self.scans_in_progress = dict()
def run(self):
while True:
self.process_queue()
self.process_running_scans()
time.sleep(int(self.config["WEBAUDIT"]["ProcessingRestPeriod"]))
def process_running_scans(self):
current_scans = len(self.scans_in_progress)
if current_scans == 0:
return
print("Processing running scans [" + str(current_scans) + "/" + str(self.max_scans) + "]" , flush=True)
finished_scans = [];
for key, list in self.scans_in_progress.items():
if not list['thread'].is_alive():
finished_scans.append(key);
for finished_scan in finished_scans:
self.scans_in_progress.pop(finished_scan, None)
def process_queue(self):
print("Processing queue", flush=True)
current_scans = len(self.scans_in_progress)
slots_available = (self.max_scans - current_scans)
if current_scans > 0:
query = db_session.query(Scan).filter(Scan.status == 0).filter(~Scan.id.in_(self.scans_in_progress.keys())).order_by(Scan.created_date).limit(slots_available)
else:
query = db_session.query(Scan).filter(Scan.status == 0).order_by(Scan.created_date).limit(slots_available)
if slots_available > 0:
for scan in query:
print("Scanning scan ID #" + str(scan.id), flush=True)
scan_thread = threading.Thread(target=self.init_scan, args=(scan.id,))
scan_thread.start()
self.scans_in_progress[scan.id] = { 'thread': scan_thread }
db_session.close()
@staticmethod
def init_scan(scan_id):
worker = workman.Workman(scan_id)
worker.start_scan()
|
telemetry.py | '''fonty.lib.telemetry'''
import sys
import json
import platform
import threading
from enum import Enum
from datetime import datetime
from typing import Tuple
import requests
from fonty.version import __version__
from fonty.lib.json_encoder import FontyJSONEncoder
from fonty.lib.constants import TELEMETRY_ENDPOINT, JSON_DUMP_OPTS
from fonty.lib.config import CommonConfiguration
class TelemetryEventTypes(Enum):
'''An enum of possible telemetry event types.'''
FONTY_SETUP = 'FONTY_SETUP'
FONT_INSTALL = 'FONT_INSTALL'
FONT_UNINSTALL = 'FONT_UNINSTALL'
FONT_LIST = 'FONT_LIST'
FONT_LIST_REBUILD = 'FONT_LIST_REBUILD'
FONT_CONVERT = 'FONT_CONVERT'
SOURCE_LIST = 'SOURCE_LIST'
SOURCE_ADD = 'SOURCE_ADD'
SOURCE_REMOVE = 'SOURCE_REMOVE'
SOURCE_UPDATE = 'SOURCE_UPDATE'
class TelemetryEvent:
'''The TelemetryEvent class describes a fonty telemetry event.
By default, fonty sends some light usage data to better understand (1) how
users use fonty and (2) also to identify interesting font usage statistics.
It can however be disabled by turning off the `telemetry` setting in the
fonty configuration file.
'''
#: The event type of this telemetry event.
event_type: TelemetryEventTypes
#: The timestamp of this telemetry event.
timestamp: datetime
#: The fonty version currently being used.
fonty_version: str
#: The operating system family of the current environment.
os_family: str
#: The operating system version of the current environment.
os_version: str
#: The current Python version.
python_version: str
#: The time it took (in seconds) to execute this event.
execution_time: float
#: The status code of the current command. 0 means success, >1 means error.
status_code: int
#: The additional data that is relevant to this telemetry event.
data: dict
def __init__(
self,
status_code: int,
event_type: TelemetryEventTypes,
execution_time: float = None,
data: dict = None
) -> None:
self.event_type = event_type
self.timestamp = datetime.now()
self.fonty_version = __version__
self.python_version = '{major}.{minor}.{micro}'.format(
major=sys.version_info.major,
minor=sys.version_info.minor,
micro=sys.version_info.micro
)
self.os_family, self.os_version = TelemetryEvent._get_os_info()
self.execution_time = execution_time
self.status_code = status_code
self.data = data
def send(self, force=False, asynchronous=True) -> None:
'''Sends the telemetry data to the central logging server.'''
if not CommonConfiguration.telemetry and not force:
return
# Create payload
d = {
'timestamp': self.timestamp,
'status_code': self.status_code,
'event_type': self.event_type.value,
'execution_time': self.execution_time,
'fonty_version': self.fonty_version,
'os_family': self.os_family,
'os_version': self.os_version,
'python_version': self.python_version,
'data': self.data
}
# Send request
if asynchronous:
threading.Thread(target=self._send_request, args=(d,)).start()
else:
self._send_request(d)
def _send_request(self, d):
try:
requests.post(
url=TELEMETRY_ENDPOINT,
data=json.dumps(d, cls=FontyJSONEncoder, **JSON_DUMP_OPTS),
headers={'Content-Type': 'application/json'}
)
except: # pylint: disable=W0702
pass
@staticmethod
def _get_os_info() -> Tuple[str, str]:
'''Gets the current operating system information and returns it as a
tuple of (family, version).
'''
family = platform.system()
version = ''
if family.lower() == 'darwin':
version = platform.mac_ver()[0]
elif family.lower() == 'windows':
win = sys.getwindowsversion() # pylint: disable=E1101
version = '{}.{}-{}-{}'.format(win.major, win.minor, win.build, win.platform)
elif family.lower() == 'linux':
import distro
dist = distro.linux_distribution(full_distribution_name=False)
version = '{}-{}'.format(dist[0], dist[1])
else:
version = platform.platform()
return family, version
|
scheduler.py | import sys
import traceback
if sys.version_info[0] < 3:
import Queue as queue
else:
import queue
import threading
from yaku.task_manager \
import \
run_task, order_tasks, TaskManager
from yaku.utils \
import \
get_exception
import yaku.errors
def run_tasks(ctx, tasks=None):
if tasks is None:
tasks = ctx.tasks
task_manager = TaskManager(tasks)
s = SerialRunner(ctx, task_manager)
s.start()
s.run()
def run_tasks_parallel(ctx, tasks=None, maxjobs=1):
if tasks is None:
tasks = ctx.tasks
task_manager = TaskManager(tasks)
r = ParallelRunner(ctx, task_manager, maxjobs)
r.start()
r.run()
class SerialRunner(object):
def __init__(self, ctx, task_manager):
self.ctx = ctx
self.task_manager = task_manager
def start(self):
# Dummy to give same interface as ParallelRunner
pass
def run(self):
grp = self.task_manager.next_set()
while grp:
for task in grp:
run_task(self.ctx, task)
grp = self.task_manager.next_set()
class ParallelRunner(object):
def __init__(self, ctx, task_manager, maxjobs=1):
self.njobs = maxjobs
self.task_manager = task_manager
self.ctx = ctx
self.worker_queue = queue.Queue()
self.error_out = queue.Queue()
self.failure_lock = threading.Lock()
self.stop = False
def start(self):
def _worker():
# XXX: this whole thing is an hack - find a better way to
# notify task execution failure to all worker threads
while not self.stop:
task = self.worker_queue.get()
try:
run_task(self.ctx, task)
except yaku.errors.TaskRunFailure:
e = get_exception()
self.failure_lock.acquire()
self.stop = True
self.failure_lock.release()
task.error_msg = e.explain
task.error_cmd = e.cmd
self.error_out.put(task)
except Exception:
e = get_exception()
exc_type, exc_value, tb = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, tb)
self.failure_lock.acquire()
self.stop = True
self.failure_lock.release()
task.error_msg = "".join(lines)
task.error_cmd = []
self.error_out.put(task)
self.worker_queue.task_done()
for i in range(self.njobs):
t = threading.Thread(target=_worker)
t.setDaemon(True)
t.start()
def run(self):
grp = self.task_manager.next_set()
while grp:
for task in grp:
self.worker_queue.put(task)
# XXX: we only join once we detect the worker queue to be empty, to
# avoid blocking for a long time. This is naive, and will break if
# the worker_queue is filled after this point
while not self.stop:
if self.worker_queue.empty():
self.worker_queue.join()
break
if not self.error_out.empty():
task = self.error_out.get()
msg = task.error_msg
cmd = task.error_cmd
raise yaku.errors.TaskRunFailure(cmd, msg)
grp = self.task_manager.next_set()
|
server.py | # Adafruit BNO055 WebGL Example
#
# Requires the flask web framework to be installed. See http://flask.pocoo.org/
# for installation instructions, however on a Linux machine like the Raspberry
# Pi or BeagleBone black you can likely install it by running:
# sudo apt-get update
# sudo apt-get install python-pip
# sudo pip install flask
#
# Copyright (c) 2015 Adafruit Industries
# Author: Tony DiCola
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import json
import logging
import threading
import time
from flask import *
from Adafruit_BNO055 import BNO055
# Create and configure the BNO sensor connection. Make sure only ONE of the
# below 'bno = ...' lines is uncommented:
# Raspberry Pi configuration with serial UART and RST connected to GPIO 18:
bno = BNO055.BNO055(serial_port='/dev/ttyAMA0', rst=18)
# BeagleBone Black configuration with default I2C connection (SCL=P9_19, SDA=P9_20),
# and RST connected to pin P9_12:
#bno = BNO055.BNO055(rst='P9_12')
# Application configuration below. You probably don't need to change these values.
# How often to update the BNO sensor data (in hertz).
BNO_UPDATE_FREQUENCY_HZ = 10
# Name of the file to store calibration data when the save/load calibration
# button is pressed. Calibration data is stored in JSON format.
CALIBRATION_FILE = 'calibration.json'
# BNO sensor axes remap values. These are the parameters to the BNO.set_axis_remap
# function. Don't change these without consulting section 3.4 of the datasheet.
# The default axes mapping below assumes the Adafruit BNO055 breakout is flat on
# a table with the row of SDA, SCL, GND, VIN, etc pins facing away from you.
BNO_AXIS_REMAP = { 'x': BNO055.AXIS_REMAP_X,
'y': BNO055.AXIS_REMAP_Z,
'z': BNO055.AXIS_REMAP_Y,
'x_sign': BNO055.AXIS_REMAP_POSITIVE,
'y_sign': BNO055.AXIS_REMAP_POSITIVE,
'z_sign': BNO055.AXIS_REMAP_NEGATIVE }
# Create flask application.
app = Flask(__name__)
# Global state to keep track of the latest readings from the BNO055 sensor.
# This will be accessed from multiple threads so care needs to be taken to
# protect access with a lock (or else inconsistent/partial results might be read).
# A condition object is used both as a lock for safe access across threads, and
# to notify threads that the BNO state has changed.
bno_data = {}
bno_changed = threading.Condition()
# Background thread to read BNO sensor data. Will be created right before
# the first request is served (see start_bno_thread below).
bno_thread = None
def read_bno():
"""Function to read the BNO sensor and update the bno_data object with the
latest BNO orientation, etc. state. Must be run in its own thread because
it will never return!
"""
while True:
# Grab new BNO sensor readings.
temp = bno.read_temp()
heading, roll, pitch = bno.read_euler()
x, y, z, w = bno.read_quaternion()
sys, gyro, accel, mag = bno.get_calibration_status()
status, self_test, error = bno.get_system_status(run_self_test=False)
if error != 0:
print 'Error! Value: {0}'.format(error)
# Capture the lock on the bno_changed condition so the bno_data shared
# state can be updated.
with bno_changed:
bno_data['euler'] = (heading, roll, pitch)
bno_data['temp'] = temp
bno_data['quaternion'] = (x, y, z, w)
bno_data['calibration'] = (sys, gyro, accel, mag)
# Notify any waiting threads that the BNO state has been updated.
bno_changed.notifyAll()
# Sleep until the next reading.
time.sleep(1.0/BNO_UPDATE_FREQUENCY_HZ)
def bno_sse():
"""Function to handle sending BNO055 sensor data to the client web browser
using HTML5 server sent events (aka server push). This is a generator function
that flask will run in a thread and call to get new data that is pushed to
the client web page.
"""
# Loop forever waiting for a new BNO055 sensor reading and sending it to
# the client. Since this is a generator function the yield statement is
# used to return a new result.
while True:
# Capture the bno_changed condition lock and then wait for it to notify
# a new reading is available.
with bno_changed:
bno_changed.wait()
# A new reading is available! Grab the reading value and then give
# up the lock.
heading, roll, pitch = bno_data['euler']
temp = bno_data['temp']
x, y, z, w = bno_data['quaternion']
sys, gyro, accel, mag = bno_data['calibration']
# Send the data to the connected client in HTML5 server sent event format.
data = {'heading': heading, 'roll': roll, 'pitch': pitch, 'temp': temp,
'quatX': x, 'quatY': y, 'quatZ': z, 'quatW': w,
'calSys': sys, 'calGyro': gyro, 'calAccel': accel, 'calMag': mag }
yield 'data: {0}\n\n'.format(json.dumps(data))
@app.before_first_request
def start_bno_thread():
# Start the BNO thread right before the first request is served. This is
# necessary because in debug mode flask will start multiple main threads so
# this is the only spot to put code that can only run once after starting.
# See this SO question for more context:
# http://stackoverflow.com/questions/24617795/starting-thread-while-running-flask-with-debug
global bno_thread
# Initialize BNO055 sensor.
if not bno.begin():
raise RuntimeError('Failed to initialize BNO055!')
bno.set_axis_remap(**BNO_AXIS_REMAP)
# Kick off BNO055 reading thread.
bno_thread = threading.Thread(target=read_bno)
bno_thread.daemon = True # Don't let the BNO reading thread block exiting.
bno_thread.start()
@app.route('/bno')
def bno_path():
# Return SSE response and call bno_sse function to stream sensor data to
# the webpage.
return Response(bno_sse(), mimetype='text/event-stream')
@app.route('/save_calibration', methods=['POST'])
def save_calibration():
# Save calibration data to disk.
# First grab the lock on BNO sensor access to make sure nothing else is
# writing to the sensor right now.
with bno_changed:
data = bno.get_calibration()
# Write the calibration to disk.
with open(CALIBRATION_FILE, 'w') as cal_file:
json.dump(data, cal_file)
return 'OK'
@app.route('/load_calibration', methods=['POST'])
def load_calibration():
# Load calibration from disk.
with open(CALIBRATION_FILE, 'r') as cal_file:
data = json.load(cal_file)
# Grab the lock on BNO sensor access to serial access to the sensor.
with bno_changed:
bno.set_calibration(data)
return 'OK'
@app.route('/')
def root():
return render_template('index.html')
if __name__ == '__main__':
# Create a server listening for external connections on the default
# port 5000. Enable debug mode for better error messages and live
# reloading of the server on changes. Also make the server threaded
# so multiple connections can be processed at once (very important
# for using server sent events).
app.run(host='0.0.0.0', debug=True, threaded=True)
|
index.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import hashlib
import logging
import os
import shutil
import subprocess
import tempfile
from threading import Thread
from distlib import DistlibException
from distlib.compat import (HTTPBasicAuthHandler, Request, HTTPPasswordMgr,
urlparse, build_opener)
from distlib.util import cached_property, zip_dir
logger = logging.getLogger(__name__)
DEFAULT_INDEX = 'http://pypi.python.org/pypi'
DEFAULT_REALM = 'pypi'
class PackageIndex(object):
"""
This class represents a package index compatible with PyPI, the Python
Package Index.
"""
boundary = b'----------ThIs_Is_tHe_distlib_index_bouNdaRY_$'
def __init__(self, url=None):
"""
Initialise an instance.
:param url: The URL of the index. If not specified, the URL for PyPI is
used.
"""
self.url = url or DEFAULT_INDEX
self.read_configuration()
scheme, netloc, path, params, query, frag = urlparse(self.url)
if params or query or frag or scheme not in ('http', 'https'):
raise DistlibException('invalid repository: %s' % self.url)
self.password_handler = None
self.ssl_verifier = None
self.gpg = None
self.gpg_home = None
with open(os.devnull, 'w') as sink:
for s in ('gpg2', 'gpg'):
try:
rc = subprocess.check_call([s, '--version'], stdout=sink,
stderr=sink)
if rc == 0:
self.gpg = s
break
except OSError:
pass
def _get_pypirc_command(self):
"""
Get the distutils command for interacting with PyPI configurations.
:return: the command.
"""
from distutils.core import Distribution
from distutils.config import PyPIRCCommand
d = Distribution()
return PyPIRCCommand(d)
def read_configuration(self):
"""
Read the PyPI access configuration as supported by distutils, getting
PyPI to do the acutal work. This populates ``username``, ``password``,
``realm`` and ``url`` attributes from the configuration.
"""
# get distutils to do the work
c = self._get_pypirc_command()
c.repository = self.url
cfg = c._read_pypirc()
self.username = cfg.get('username')
self.password = cfg.get('password')
self.realm = cfg.get('realm', 'pypi')
self.url = cfg.get('repository', self.url)
def save_configuration(self):
"""
Save the PyPI access configuration. You must have set ``username`` and
``password`` attributes before calling this method.
Again, distutils is used to do the actual work.
"""
self.check_credentials()
# get distutils to do the work
c = self._get_pypirc_command()
c._store_pypirc(self.username, self.password)
def check_credentials(self):
"""
Check that ``username`` and ``password`` have been set, and raise an
exception if not.
"""
if self.username is None or self.password is None:
raise DistlibException('username and password must be set')
pm = HTTPPasswordMgr()
_, netloc, _, _, _, _ = urlparse(self.url)
pm.add_password(self.realm, netloc, self.username, self.password)
self.password_handler = HTTPBasicAuthHandler(pm)
def register(self, metadata):
"""
Register a distribution on PyPI, using the provided metadata.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the distribution to be
registered.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
metadata.validate()
d = metadata.todict()
d[':action'] = 'verify'
request = self.encode_request(d.items(), [])
response = self.send_request(request)
d[':action'] = 'submit'
request = self.encode_request(d.items(), [])
return self.send_request(request)
def _reader(self, name, stream, outbuf):
"""
Thread runner for reading lines of from a subprocess into a buffer.
:param name: The logical name of the stream (used for logging only).
:param stream: The stream to read from. This will typically a pipe
connected to the output stream of a subprocess.
:param outbuf: The list to append the read lines to.
"""
while True:
s = stream.readline()
if not s:
break
s = s.decode('utf-8').rstrip()
outbuf.append(s)
logger.debug('%s: %s' % (name, s))
stream.close()
def get_sign_command(self, filename, signer, sign_password):
"""
Return a suitable command for signing a file.
:param filename: The pathname to the file to be signed.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:return: The signing command as a list suitable to be
passed to :class:`subprocess.Popen`.
"""
cmd = [self.gpg, '--status-fd', '2', '--no-tty']
if self.gpg_home:
cmd.extend(['--homedir', self.gpg_home])
if sign_password is not None:
cmd.extend(['--batch', '--passphrase-fd', '0'])
td = tempfile.mkdtemp()
sf = os.path.join(td, os.path.basename(filename) + '.asc')
cmd.extend(['--detach-sign', '--armor', '--local-user',
signer, '--output', sf, filename])
logger.debug('invoking: %s', ' '.join(cmd))
return cmd, sf
def run_command(self, cmd, input_data=None):
"""
Run a command in a child process , passing it any input data specified.
:param cmd: The command to run.
:param input_data: If specified, this must be a byte string containing
data to be sent to the child process.
:return: A tuple consisting of the subprocess' exit code, a list of
lines read from the subprocess' ``stdout``, and a list of
lines read from the subprocess' ``stderr``.
"""
kwargs = {
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
}
if input_data is not None:
kwargs['stdin'] = subprocess.PIPE
stdout = []
stderr = []
p = subprocess.Popen(cmd, **kwargs)
# We don't use communicate() here because we may need to
# get clever with interacting with the command
t1 = Thread(target=self._reader, args=('stdout', p.stdout, stdout))
t1.start()
t2 = Thread(target=self._reader, args=('stderr', p.stderr, stderr))
t2.start()
if input_data is not None:
p.stdin.write(input_data)
p.stdin.close()
p.wait()
t1.join()
t2.join()
return p.returncode, stdout, stderr
def sign_file(self, filename, signer, sign_password):
"""
Sign a file.
:param filename: The pathname to the file to be signed.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:return: The absolute pathname of the file where the signature is
stored.
"""
cmd, sig_file = self.get_sign_command(filename, signer, sign_password)
rc, stdout, stderr = self.run_command(cmd,
sign_password.encode('utf-8'))
if rc != 0:
raise DistlibException('sign command failed with error '
'code %s' % rc)
return sig_file
def upload_file(self, metadata, filename, signer=None, sign_password=None,
filetype='sdist', pyversion='source'):
"""
Upload a release file to the index.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the file to be uploaded.
:param filename: The pathname of the file to be uploaded.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param filetype: The type of the file being uploaded. This is the
distutils command which produced that file, e.g.
``sdist`` or ``bdist_wheel``.
:param pyversion: The version of Python which the release relates
to. For code compatible with any Python, this would
be ``source``, otherwise it would be e.g. ``3.2``.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
if not os.path.exists(filename):
raise DistlibException('not found: %s' % filename)
metadata.validate()
d = metadata.todict()
sig_file = None
if signer:
if not self.gpg:
logger.warning('no signing program available - not signed')
else:
sig_file = self.sign_file(filename, signer, sign_password)
with open(filename, 'rb') as f:
file_data = f.read()
md5_digest = hashlib.md5(file_data).hexdigest()
sha256_digest = hashlib.sha256(file_data).hexdigest()
d.update({
':action': 'file_upload',
'protcol_version': '1',
'filetype': filetype,
'pyversion': pyversion,
'md5_digest': md5_digest,
'sha256_digest': sha256_digest,
})
files = [('content', os.path.basename(filename), file_data)]
if sig_file:
with open(sig_file, 'rb') as f:
sig_data = f.read()
files.append(('gpg_signature', os.path.basename(sig_file),
sig_data))
shutil.rmtree(os.path.dirname(sig_file))
logger.debug('files: %s', files)
request = self.encode_request(d.items(), files)
return self.send_request(request)
def upload_documentation(self, metadata, doc_dir):
"""
Upload documentation to the index.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the documentation to be
uploaded.
:param doc_dir: The pathname of the directory which contains the
documentation. This should be the directory that
contains the ``index.html`` for the documentation.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
if not os.path.isdir(doc_dir):
raise DistlibException('not a directory: %r' % doc_dir)
fn = os.path.join(doc_dir, 'index.html')
if not os.path.exists(fn):
raise DistlibException('not found: %r' % fn)
metadata.validate()
name, version = metadata.name, metadata.version
zip_data = zip_dir(doc_dir).getvalue()
fields = [(':action', 'doc_upload'),
('name', name), ('version', version)]
files = [('content', name, zip_data)]
request = self.encode_request(fields, files)
return self.send_request(request)
def get_verify_command(self, signature_filename, data_filename):
"""
Return a suitable command for verifying a file.
:param signature_filename: The pathname to the file containing the
signature.
:param data_filename: The pathname to the file containing the
signed data.
:return: The verifying command as a list suitable to be
passed to :class:`subprocess.Popen`.
"""
cmd = [self.gpg, '--status-fd', '2', '--no-tty']
if self.gpg_home:
cmd.extend(['--homedir', self.gpg_home])
cmd.extend(['--verify', signature_filename, data_filename])
logger.debug('invoking: %s', ' '.join(cmd))
return cmd
def verify_signature(self, signature_filename, data_filename):
"""
Verify a signature for a file.
:param signature_filename: The pathname to the file containing the
signature.
:param data_filename: The pathname to the file containing the
signed data.
:return: True if the signature was verified, else False.
"""
if not self.gpg:
raise DistlibException('verification unavailable because gpg '
'unavailable')
cmd = self.get_verify_command(signature_filename, data_filename)
rc, stdout, stderr = self.run_command(cmd)
if rc not in (0, 1):
raise DistlibException('verify command failed with error '
'code %s' % rc)
return rc == 0
def download_file(self, url, destfile, digest=None, reporthook=None):
"""
This is a convenience method for downloading a file from an URL.
Normally, this will be a file from the index, though currently
no check is made for this (i.e. a file can be downloaded from
anywhere).
The method is just like the :func:`urlretrieve` function in the
standard library, except that it allows digest computation to be
done during download and checking that the downloaded data
matched any expected value.
:param url: The URL of the file to be downloaded (assumed to be
available via an HTTP GET request).
:param destfile: The pathname where the downloaded file is to be
saved.
:param digest: If specified, this must be a (hasher, value)
tuple, where hasher is the algorithm used (e.g.
``'md5'``) and ``value`` is the expected value.
:param reporthook: The same as for :func:`urlretrieve` in the
standard library.
"""
if digest is None:
digester = None
logger.debug('No digest specified')
else:
if isinstance(digest, (list, tuple)):
hasher, digest = digest
else:
hasher = 'md5'
digester = getattr(hashlib, hasher)()
logger.debug('Digest specified: %s' % digest)
# The following code is equivalent to urlretrieve.
# We need to do it this way so that we can compute the
# digest of the file as we go.
with open(destfile, 'wb') as dfp:
# addinfourl is not a context manager on 2.x
# so we have to use try/finally
sfp = self.send_request(Request(url))
try:
headers = sfp.info()
blocksize = 8192
size = -1
read = 0
blocknum = 0
if "content-length" in headers:
size = int(headers["Content-Length"])
if reporthook:
reporthook(blocknum, blocksize, size)
while True:
block = sfp.read(blocksize)
if not block:
break
read += len(block)
dfp.write(block)
if digester:
digester.update(block)
blocknum += 1
if reporthook:
reporthook(blocknum, blocksize, size)
finally:
sfp.close()
# check that we got the whole file, if we can
if size >= 0 and read < size:
raise DistlibException(
'retrieval incomplete: got only %d out of %d bytes'
% (read, size))
# if we have a digest, it must match.
if digester:
actual = digester.hexdigest()
if digest != actual:
raise DistlibException('%s digest mismatch for %s: expected '
'%s, got %s' % (hasher, destfile,
digest, actual))
logger.debug('Digest verified: %s', digest)
def send_request(self, req):
"""
Send a standard library :class:`Request` to PyPI and return its
response.
:param req: The request to send.
:return: The HTTP response from PyPI (a standard library HTTPResponse).
"""
handlers = []
if self.password_handler:
handlers.append(self.password_handler)
if self.ssl_verifier:
handlers.append(self.ssl_verifier)
opener = build_opener(*handlers)
return opener.open(req)
def encode_request(self, fields, files):
"""
Encode fields and files for posting to an HTTP server.
:param fields: The fields to send as a list of (fieldname, value)
tuples.
:param files: The files to send as a list of (fieldname, filename,
file_bytes) tuple.
"""
# Adapted from packaging, which in turn was adapted from
# http://code.activestate.com/recipes/146306
parts = []
boundary = self.boundary
for k, values in fields:
if not isinstance(values, (list, tuple)):
values = [values]
for v in values:
parts.extend((
b'--' + boundary,
('Content-Disposition: form-data; name="%s"' %
k).encode('utf-8'),
b'',
v.encode('utf-8')))
for key, filename, value in files:
parts.extend((
b'--' + boundary,
('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename)).encode('utf-8'),
b'',
value))
parts.extend((b'--' + boundary + b'--', b''))
body = b'\r\n'.join(parts)
ct = b'multipart/form-data; boundary=' + boundary
headers = {
'Content-type': ct,
'Content-length': str(len(body))
}
return Request(self.url, body, headers)
|
postproc.py | #!/usr/bin/python -OO
# Copyright 2007-2019 The SABnzbd-Team <team@sabnzbd.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
sabnzbd.postproc - threaded post-processing of jobs
"""
import os
import Queue
import logging
import sabnzbd
import xml.sax.saxutils
import time
import re
from sabnzbd.newsunpack import unpack_magic, par2_repair, external_processing, \
sfv_check, build_filelists, rar_sort
from threading import Thread
from sabnzbd.misc import real_path, get_unique_path, create_dirs, move_to_path, \
make_script_path, long_path, clip_path, recursive_listdir, \
on_cleanup_list, renamer, remove_dir, remove_all, globber, globber_full, \
set_permissions, cleanup_empty_directories, fix_unix_encoding, \
sanitize_and_trim_path, sanitize_files_in_folder, remove_file
from sabnzbd.sorting import Sorter
from sabnzbd.constants import REPAIR_PRIORITY, TOP_PRIORITY, POSTPROC_QUEUE_FILE_NAME, \
POSTPROC_QUEUE_VERSION, sample_match, JOB_ADMIN, Status, VERIFIED_FILE
from sabnzbd.encoding import TRANS, unicoder
from sabnzbd.rating import Rating
import sabnzbd.emailer as emailer
import sabnzbd.dirscanner as dirscanner
import sabnzbd.downloader
import sabnzbd.config as config
import sabnzbd.cfg as cfg
import sabnzbd.nzbqueue
import sabnzbd.database as database
import sabnzbd.notifier as notifier
import sabnzbd.utils.rarfile as rarfile
import sabnzbd.utils.checkdir
MAX_FAST_JOB_COUNT = 3
# Match samples
RE_SAMPLE = re.compile(sample_match, re.I)
class PostProcessor(Thread):
""" PostProcessor thread, designed as Singleton """
do = None # Link to instance of the thread
def __init__(self):
""" Initialize PostProcessor thread """
Thread.__init__(self)
# This history queue is simply used to log what active items to display in the web_ui
self.load()
if self.history_queue is None:
self.history_queue = []
# Fast-queue for jobs already finished by DirectUnpack
self.fast_queue = Queue.Queue()
# Regular queue for jobs that might need more attention
self.slow_queue = Queue.Queue()
# Load all old jobs
for nzo in self.history_queue:
self.process(nzo)
# Counter to not only process fast-jobs
self.__fast_job_count = 0
# State variables
self.__stop = False
self.__busy = False
self.paused = False
PostProcessor.do = self
def save(self):
""" Save postproc queue """
logging.info("Saving postproc queue")
sabnzbd.save_admin((POSTPROC_QUEUE_VERSION, self.history_queue), POSTPROC_QUEUE_FILE_NAME)
def load(self):
""" Save postproc queue """
self.history_queue = []
logging.info("Loading postproc queue")
data = sabnzbd.load_admin(POSTPROC_QUEUE_FILE_NAME)
if data is None:
return
try:
version, history_queue = data
if POSTPROC_QUEUE_VERSION != version:
logging.warning(T('Old queue detected, use Status->Repair to convert the queue'))
elif isinstance(history_queue, list):
self.history_queue = [nzo for nzo in history_queue if os.path.exists(nzo.downpath)]
except:
logging.info('Corrupt %s file, discarding', POSTPROC_QUEUE_FILE_NAME)
logging.info("Traceback: ", exc_info=True)
def delete(self, nzo_id, del_files=False):
""" Remove a job from the post processor queue """
for nzo in self.history_queue:
if nzo.nzo_id == nzo_id:
if nzo.status in (Status.FAILED, Status.COMPLETED):
nzo.to_be_removed = True
elif nzo.status in (Status.DOWNLOADING, Status.QUEUED):
self.remove(nzo)
nzo.purge_data(keep_basic=False, del_files=del_files)
logging.info('Removed job %s from postproc queue', nzo.work_name)
nzo.work_name = '' # Mark as deleted job
break
def process(self, nzo):
""" Push on finished job in the queue """
if nzo not in self.history_queue:
self.history_queue.append(nzo)
# Fast-track if it has DirectUnpacked jobs or if it's still going
if nzo.direct_unpacker and (nzo.direct_unpacker.success_sets or not nzo.direct_unpacker.killed):
self.fast_queue.put(nzo)
else:
self.slow_queue.put(nzo)
self.save()
sabnzbd.history_updated()
def remove(self, nzo):
""" Remove given nzo from the queue """
try:
self.history_queue.remove(nzo)
except:
pass
self.save()
sabnzbd.history_updated()
def stop(self):
""" Stop thread after finishing running job """
self.__stop = True
self.slow_queue.put(None)
self.fast_queue.put(None)
def cancel_pp(self, nzo_id):
""" Change the status, so that the PP is canceled """
for nzo in self.history_queue:
if nzo.nzo_id == nzo_id:
nzo.abort_direct_unpacker()
if nzo.pp_active:
nzo.pp_active = False
return True
return None
def empty(self):
""" Return True if pp queue is empty """
return self.slow_queue.empty() and self.fast_queue.empty() and not self.__busy
def get_queue(self):
""" Return list of NZOs that still need to be processed """
return [nzo for nzo in self.history_queue if nzo.work_name]
def get_path(self, nzo_id):
""" Return download path for given nzo_id or None when not found """
for nzo in self.history_queue:
if nzo.nzo_id == nzo_id:
return nzo.downpath
return None
def run(self):
""" Postprocessor loop """
# First we do a dircheck
complete_dir = sabnzbd.cfg.complete_dir.get_path()
if sabnzbd.utils.checkdir.isFAT(complete_dir):
logging.warning(T('Completed Download Folder %s is on FAT file system, limiting maximum file size to 4GB') % complete_dir)
else:
logging.info("Completed Download Folder %s is not on FAT", complete_dir)
# Check on Windows if we have unicode-subprocess
if sabnzbd.WIN32:
try:
import subprocessww
except ImportError:
logging.warning(T('Module subprocessww missing. Expect problems with Unicoded file and directory names in downloads.'))
# Do a pruge of the history-items if it was set, just to be sure
history_db = database.HistoryDB()
history_db.auto_history_purge()
history_db.close()
# Start looping
check_eoq = False
while not self.__stop:
self.__busy = False
if self.paused:
time.sleep(5)
continue
# Something in the fast queue?
try:
# Every few fast-jobs we should check allow a
# slow job so that they don't wait forever
if self.__fast_job_count >= MAX_FAST_JOB_COUNT and self.slow_queue.qsize():
raise Queue.Empty
nzo = self.fast_queue.get(timeout=2)
self.__fast_job_count += 1
except Queue.Empty:
# Try the slow queue
try:
nzo = self.slow_queue.get(timeout=2)
# Reset fast-counter
self.__fast_job_count = 0
except Queue.Empty:
# Check for empty queue
if check_eoq:
check_eoq = False
handle_empty_queue()
# No fast or slow jobs, better luck next loop!
continue
# Stop job
if not nzo:
continue
# Job was already deleted.
if not nzo.work_name:
check_eoq = True
continue
# Flag NZO as being processed
nzo.pp_active = True
# Pause downloader, if users wants that
if cfg.pause_on_post_processing():
sabnzbd.downloader.Downloader.do.wait_for_postproc()
self.__busy = True
process_job(nzo)
if nzo.to_be_removed:
history_db = database.HistoryDB()
history_db.remove_history(nzo.nzo_id)
history_db.close()
nzo.purge_data(keep_basic=False, del_files=True)
# Processing done
nzo.pp_active = False
self.remove(nzo)
check_eoq = True
# Allow download to proceed
sabnzbd.downloader.Downloader.do.resume_from_postproc()
def process_job(nzo):
""" Process one job """
start = time.time()
# keep track of whether we can continue
all_ok = True
# keep track of par problems
par_error = False
# keep track of any unpacking errors
unpack_error = False
# Signal empty download, for when 'empty_postproc' is enabled
empty = False
nzb_list = []
# These need to be initialized in case of a crash
workdir_complete = ''
script_log = ''
script_line = ''
# Get the job flags
nzo.save_attribs()
flag_repair, flag_unpack, flag_delete = nzo.repair_opts
# Normalize PP
if flag_delete:
flag_unpack = True
if flag_unpack:
flag_repair = True
# Get the NZB name
filename = nzo.final_name
if nzo.fail_msg: # Special case: aborted due to too many missing data
nzo.status = Status.FAILED
nzo.save_attribs()
all_ok = False
par_error = True
unpack_error = 1
try:
# Get the folder containing the download result
workdir = nzo.downpath
tmp_workdir_complete = None
# if no files are present (except __admin__), fail the job
if all_ok and len(globber(workdir)) < 2:
if nzo.precheck:
_enough, ratio = nzo.check_availability_ratio()
req_ratio = float(cfg.req_completion_rate()) / 100.0
# Make sure that rounded ratio doesn't equal required ratio
# when it is actually below required
if (ratio < req_ratio) and (req_ratio - ratio) < 0.001:
ratio = req_ratio - 0.001
emsg = '%.1f%%' % (ratio * 100.0)
emsg2 = '%.1f%%' % float(cfg.req_completion_rate())
emsg = T('Download might fail, only %s of required %s available') % (emsg, emsg2)
else:
emsg = T('Download failed - Not on your server(s)')
empty = True
emsg += ' - https://sabnzbd.org/not-complete'
nzo.fail_msg = emsg
nzo.set_unpack_info('Fail', emsg)
nzo.status = Status.FAILED
# do not run unpacking or parity verification
flag_repair = flag_unpack = False
all_ok = cfg.empty_postproc() and empty
if not all_ok:
par_error = True
unpack_error = 1
script = nzo.script
logging.info('Starting Post-Processing on %s' +
' => Repair:%s, Unpack:%s, Delete:%s, Script:%s, Cat:%s',
filename, flag_repair, flag_unpack, flag_delete, script, nzo.cat)
# Set complete dir to workdir in case we need to abort
workdir_complete = workdir
# Par processing, if enabled
if all_ok and flag_repair:
par_error, re_add = parring(nzo, workdir)
if re_add:
# Try to get more par files
return False
# If we don't need extra par2, we can disconnect
if sabnzbd.nzbqueue.NzbQueue.do.actives(grabs=False) == 0 and cfg.autodisconnect():
# This was the last job, close server connections
sabnzbd.downloader.Downloader.do.disconnect()
# Sanitize the resulting files
if sabnzbd.WIN32:
sanitize_files_in_folder(workdir)
# Check if user allows unsafe post-processing
if flag_repair and cfg.safe_postproc():
all_ok = all_ok and not par_error
if all_ok:
# Fix encodings
fix_unix_encoding(workdir)
# Use dirs generated by direct-unpacker
if nzo.direct_unpacker and nzo.direct_unpacker.unpack_dir_info:
tmp_workdir_complete, workdir_complete, file_sorter, one_folder, marker_file = nzo.direct_unpacker.unpack_dir_info
else:
# Generate extraction path
tmp_workdir_complete, workdir_complete, file_sorter, one_folder, marker_file = prepare_extraction_path(nzo)
newfiles = []
# Run Stage 2: Unpack
if flag_unpack:
# set the current nzo status to "Extracting...". Used in History
nzo.status = Status.EXTRACTING
logging.info("Running unpack_magic on %s", filename)
unpack_error, newfiles = unpack_magic(nzo, workdir, tmp_workdir_complete, flag_delete, one_folder, (), (), (), (), ())
logging.info("Unpacked files %s", newfiles)
if sabnzbd.WIN32:
# Sanitize the resulting files
newfiles = sanitize_files_in_folder(tmp_workdir_complete)
logging.info("Finished unpack_magic on %s", filename)
if cfg.safe_postproc():
all_ok = all_ok and not unpack_error
if all_ok:
# Move any (left-over) files to destination
nzo.status = Status.MOVING
nzo.set_action_line(T('Moving'), '...')
for root, _dirs, files in os.walk(workdir):
if not root.endswith(JOB_ADMIN):
for file_ in files:
path = os.path.join(root, file_)
new_path = path.replace(workdir, tmp_workdir_complete)
ok, new_path = move_to_path(path, new_path)
if new_path:
newfiles.append(new_path)
if not ok:
nzo.set_unpack_info('Unpack', T('Failed moving %s to %s') % (unicoder(path), unicoder(new_path)))
all_ok = False
break
# Set permissions right
set_permissions(tmp_workdir_complete)
if all_ok and marker_file:
del_marker(os.path.join(tmp_workdir_complete, marker_file))
remove_from_list(marker_file, newfiles)
if all_ok:
# Remove files matching the cleanup list
cleanup_list(tmp_workdir_complete, True)
# Check if this is an NZB-only download, if so redirect to queue
# except when PP was Download-only
if flag_repair:
nzb_list = nzb_redirect(tmp_workdir_complete, nzo.final_name, nzo.pp, script, nzo.cat, priority=nzo.priority)
else:
nzb_list = None
if nzb_list:
nzo.set_unpack_info('Download', T('Sent %s to queue') % unicoder(nzb_list))
cleanup_empty_directories(tmp_workdir_complete)
else:
cleanup_list(tmp_workdir_complete, False)
script_output = ''
script_ret = 0
if not nzb_list:
# Give destination its final name
if cfg.folder_rename() and tmp_workdir_complete and not one_folder:
if all_ok:
try:
newfiles = rename_and_collapse_folder(tmp_workdir_complete, workdir_complete, newfiles)
except:
logging.error(T('Error renaming "%s" to "%s"'), clip_path(tmp_workdir_complete), clip_path(workdir_complete))
logging.info('Traceback: ', exc_info=True)
# Better disable sorting because filenames are all off now
file_sorter.sort_file = None
else:
workdir_complete = tmp_workdir_complete.replace('_UNPACK_', '_FAILED_')
workdir_complete = get_unique_path(workdir_complete, n=0, create_dir=False)
if empty:
job_result = -1
else:
job_result = int(par_error) + int(bool(unpack_error)) * 2
if cfg.ignore_samples():
remove_samples(workdir_complete)
# TV/Movie/Date Renaming code part 2 - rename and move files to parent folder
if all_ok and file_sorter.sort_file:
if newfiles:
file_sorter.rename(newfiles, workdir_complete)
workdir_complete, ok = file_sorter.move(workdir_complete)
else:
workdir_complete, ok = file_sorter.rename_with_ext(workdir_complete)
if not ok:
nzo.set_unpack_info('Unpack', T('Failed to move files'))
all_ok = False
# Run the user script
script_path = make_script_path(script)
if (all_ok or not cfg.safe_postproc()) and (not nzb_list) and script_path:
# Set the current nzo status to "Ext Script...". Used in History
nzo.status = Status.RUNNING
nzo.set_action_line(T('Running script'), unicoder(script))
nzo.set_unpack_info('Script', T('Running user script %s') % unicoder(script), unique=True)
script_log, script_ret = external_processing(script_path, nzo, clip_path(workdir_complete),
nzo.final_name, job_result)
script_line = get_last_line(script_log)
if script_log:
script_output = nzo.nzo_id
if script_line:
nzo.set_unpack_info('Script', unicoder(script_line), unique=True)
else:
nzo.set_unpack_info('Script', T('Ran %s') % unicoder(script), unique=True)
else:
script = ""
script_line = ""
script_ret = 0
# Maybe bad script result should fail job
if script_ret and cfg.script_can_fail():
script_error = True
all_ok = False
nzo.fail_msg = T('Script exit code is %s') % script_ret
else:
script_error = False
# Email the results
if (not nzb_list) and cfg.email_endjob():
if (cfg.email_endjob() == 1) or (cfg.email_endjob() == 2 and (unpack_error or par_error or script_error)):
emailer.endjob(nzo.final_name, nzo.cat, all_ok, workdir_complete, nzo.bytes_downloaded,
nzo.fail_msg, nzo.unpack_info, script, TRANS(script_log), script_ret)
if script_output:
# Can do this only now, otherwise it would show up in the email
if script_ret:
script_ret = 'Exit(%s) ' % script_ret
else:
script_ret = ''
if len(script_log.rstrip().split('\n')) > 1:
nzo.set_unpack_info('Script',
u'%s%s <a href="./scriptlog?name=%s">(%s)</a>' % (script_ret, script_line,
xml.sax.saxutils.escape(script_output), T('More')), unique=True)
else:
# No '(more)' button needed
nzo.set_unpack_info('Script', u'%s%s ' % (script_ret, script_line), unique=True)
# Cleanup again, including NZB files
if all_ok:
cleanup_list(workdir_complete, False)
# Force error for empty result
all_ok = all_ok and not empty
# Update indexer with results
if cfg.rating_enable():
if nzo.encrypted > 0:
Rating.do.update_auto_flag(nzo.nzo_id, Rating.FLAG_ENCRYPTED)
if empty:
hosts = map(lambda s: s.host, sabnzbd.downloader.Downloader.do.nzo_servers(nzo))
if not hosts:
hosts = [None]
for host in hosts:
Rating.do.update_auto_flag(nzo.nzo_id, Rating.FLAG_EXPIRED, host)
except:
logging.error(T('Post Processing Failed for %s (%s)'), filename, T('see logfile'))
logging.info("Traceback: ", exc_info=True)
nzo.fail_msg = T('PostProcessing was aborted (%s)') % T('see logfile')
notifier.send_notification(T('Download Failed'), filename, 'failed', nzo.cat)
nzo.status = Status.FAILED
par_error = True
all_ok = False
if cfg.email_endjob():
emailer.endjob(nzo.final_name, nzo.cat, all_ok, clip_path(workdir_complete), nzo.bytes_downloaded,
nzo.fail_msg, nzo.unpack_info, '', '', 0)
if all_ok:
# If the folder only contains one file OR folder, have that as the path
# Be aware that series/generic/date sorting may move a single file into a folder containing other files
workdir_complete = one_file_or_folder(workdir_complete)
workdir_complete = os.path.normpath(workdir_complete)
# Clean up the NZO
try:
logging.info('Cleaning up %s (keep_basic=%s)', filename, str(not all_ok))
sabnzbd.nzbqueue.NzbQueue.do.cleanup_nzo(nzo, keep_basic=not all_ok)
except:
logging.error(T('Cleanup of %s failed.'), nzo.final_name)
logging.info("Traceback: ", exc_info=True)
# Remove download folder
if all_ok:
try:
if os.path.exists(workdir):
logging.debug('Removing workdir %s', workdir)
remove_all(workdir, recursive=True)
except:
logging.error(T('Error removing workdir (%s)'), clip_path(workdir))
logging.info("Traceback: ", exc_info=True)
# Use automatic retry link on par2 errors and encrypted/bad RARs
if par_error or unpack_error in (2, 3):
try_alt_nzb(nzo)
# Show final status in history
if all_ok:
notifier.send_notification(T('Download Completed'), filename, 'complete', nzo.cat)
nzo.status = Status.COMPLETED
else:
notifier.send_notification(T('Download Failed'), filename, 'failed', nzo.cat)
nzo.status = Status.FAILED
# Log the overall time taken for postprocessing
postproc_time = int(time.time() - start)
# Create the history DB instance
history_db = database.HistoryDB()
# Add the nzo to the database. Only the path, script and time taken is passed
# Other information is obtained from the nzo
history_db.add_history_db(nzo, clip_path(workdir_complete), nzo.downpath, postproc_time, script_log, script_line)
# Purge items
history_db.auto_history_purge()
# The connection is only used once, so close it here
history_db.close()
sabnzbd.history_updated()
return True
def prepare_extraction_path(nzo):
""" Based on the information that we have, generate
the extraction path and create the directory.
Separated so it can be called from DirectUnpacker
"""
one_folder = False
marker_file = None
# Determine class directory
catdir = config.get_categories(nzo.cat).dir()
if catdir.endswith('*'):
catdir = catdir.strip('*')
one_folder = True
complete_dir = real_path(cfg.complete_dir.get_path(), catdir)
complete_dir = long_path(complete_dir)
# TV/Movie/Date Renaming code part 1 - detect and construct paths
if cfg.enable_meta():
file_sorter = Sorter(nzo, nzo.cat)
else:
file_sorter = Sorter(None, nzo.cat)
complete_dir = file_sorter.detect(nzo.final_name, complete_dir)
if file_sorter.sort_file:
one_folder = False
complete_dir = sanitize_and_trim_path(complete_dir)
if one_folder:
workdir_complete = create_dirs(complete_dir)
else:
workdir_complete = get_unique_path(os.path.join(complete_dir, nzo.final_name), create_dir=True)
marker_file = set_marker(workdir_complete)
if not workdir_complete or not os.path.exists(workdir_complete):
logging.error(T('Cannot create final folder %s') % unicoder(os.path.join(complete_dir, nzo.final_name)))
raise IOError
if cfg.folder_rename() and not one_folder:
prefixed_path = prefix(workdir_complete, '_UNPACK_')
tmp_workdir_complete = get_unique_path(prefix(workdir_complete, '_UNPACK_'), create_dir=False)
try:
renamer(workdir_complete, tmp_workdir_complete)
except:
pass # On failure, just use the original name
# Is the unique path different? Then we also need to modify the final path
if prefixed_path != tmp_workdir_complete:
workdir_complete = workdir_complete + os.path.splitext(tmp_workdir_complete)[1]
else:
tmp_workdir_complete = workdir_complete
return tmp_workdir_complete, workdir_complete, file_sorter, one_folder, marker_file
def parring(nzo, workdir):
""" Perform par processing. Returns: (par_error, re_add) """
filename = nzo.final_name
notifier.send_notification(T('Post-processing'), filename, 'pp', nzo.cat)
logging.info('Starting verification and repair of %s', filename)
# Get verification status of sets
verified = sabnzbd.load_data(VERIFIED_FILE, nzo.workpath, remove=False) or {}
repair_sets = nzo.extrapars.keys()
re_add = False
par_error = False
single = len(repair_sets) == 1
if repair_sets:
for setname in repair_sets:
if cfg.ignore_samples() and RE_SAMPLE.search(setname.lower()):
continue
if not verified.get(setname, False):
logging.info("Running verification and repair on set %s", setname)
parfile_nzf = nzo.partable[setname]
# Check if file maybe wasn't deleted and if we maybe have more files in the parset
if os.path.exists(os.path.join(nzo.downpath, parfile_nzf.filename)) or nzo.extrapars[setname]:
need_re_add, res = par2_repair(parfile_nzf, nzo, workdir, setname, single=single)
# Was it aborted?
if not nzo.pp_active:
re_add = False
par_error = True
break
re_add = re_add or need_re_add
verified[setname] = res
else:
continue
par_error = par_error or not res
else:
# We must not have found any par2..
logging.info("No par2 sets for %s", filename)
nzo.set_unpack_info('Repair', T('[%s] No par2 sets') % unicoder(filename))
if cfg.sfv_check() and not verified.get('', False):
par_error = not try_sfv_check(nzo, workdir, '')
verified[''] = not par_error
# If still no success, do RAR-check
if not par_error and cfg.enable_unrar():
par_error = not try_rar_check(nzo, workdir, '')
verified[''] = not par_error
if re_add:
logging.info('Re-added %s to queue', filename)
if nzo.priority != TOP_PRIORITY:
nzo.priority = REPAIR_PRIORITY
nzo.status = Status.FETCHING
sabnzbd.nzbqueue.NzbQueue.do.add(nzo)
sabnzbd.downloader.Downloader.do.resume_from_postproc()
sabnzbd.save_data(verified, VERIFIED_FILE, nzo.workpath)
logging.info('Verification and repair finished for %s', filename)
return par_error, re_add
def try_sfv_check(nzo, workdir, setname):
""" Attempt to verify set using SFV file
Return True if verified, False when failed
When setname is '', all SFV files will be used, otherwise only the matching one
When setname is '' and no SFV files are found, True is returned
"""
# Get list of SFV names; shortest name first, minimizes the chance on a mismatch
sfvs = globber_full(workdir, '*.sfv')
sfvs.sort(lambda x, y: len(x) - len(y))
par_error = False
found = False
for sfv in sfvs:
if setname.lower() in os.path.basename(sfv).lower():
found = True
nzo.status = Status.VERIFYING
nzo.set_unpack_info('Repair', T('Trying SFV verification'))
nzo.set_action_line(T('Trying SFV verification'), '...')
failed = sfv_check(sfv)
if failed:
fail_msg = T('Some files failed to verify against "%s"') % unicoder(os.path.basename(sfv))
msg = fail_msg + '; '
msg += '; '.join(failed)
nzo.set_unpack_info('Repair', msg)
par_error = True
else:
nzo.set_unpack_info('Repair', T('Verified successfully using SFV files'))
if setname:
break
# Show error in GUI
if found and par_error:
nzo.status = Status.FAILED
nzo.fail_msg = fail_msg
return (found or not setname) and not par_error
def try_rar_check(nzo, workdir, setname):
""" Attempt to verify set using the RARs
Return True if verified, False when failed
When setname is '', all RAR files will be used, otherwise only the matching one
If no RAR's are found, returns True
"""
_, _, rars, _, _ = build_filelists(workdir)
if setname:
# Filter based on set
rars = [rar for rar in rars if os.path.basename(rar).startswith(setname)]
# Sort
rars.sort(rar_sort)
# Test
if rars:
nzo.status = Status.VERIFYING
nzo.set_unpack_info('Repair', T('Trying RAR-based verification'))
nzo.set_action_line(T('Trying RAR-based verification'), '...')
try:
# Set path to unrar and open the file
# Requires de-unicode for RarFile to work!
rarfile.UNRAR_TOOL = sabnzbd.newsunpack.RAR_COMMAND
zf = rarfile.RarFile(rars[0])
# Skip if it's encrypted
if zf.needs_password():
msg = T('[%s] RAR-based verification failed: %s') % (unicoder(os.path.basename(rars[0])), T('Passworded'))
nzo.set_unpack_info('Repair', msg)
return True
# Will throw exception if something is wrong
zf.testrar()
# Success!
msg = T('RAR files verified successfully')
nzo.set_unpack_info('Repair', msg)
logging.info(msg)
return True
except rarfile.Error as e:
nzo.fail_msg = T('RAR files failed to verify')
msg = T('[%s] RAR-based verification failed: %s') % (unicoder(os.path.basename(rars[0])), unicoder(e.message.replace('\r\n', ' ')))
nzo.set_unpack_info('Repair', msg)
logging.info(msg)
return False
else:
# No rar-files, so just continue
return True
def handle_empty_queue():
""" Check if empty queue calls for action """
if sabnzbd.nzbqueue.NzbQueue.do.actives() == 0:
sabnzbd.save_state()
# Perform end-of-queue action when one is set
if sabnzbd.QUEUECOMPLETEACTION:
logging.info("Queue has finished, launching: %s (%s)",
sabnzbd.QUEUECOMPLETEACTION, sabnzbd.QUEUECOMPLETEARG)
if sabnzbd.QUEUECOMPLETEARG:
sabnzbd.QUEUECOMPLETEACTION(sabnzbd.QUEUECOMPLETEARG)
else:
Thread(target=sabnzbd.QUEUECOMPLETEACTION).start()
sabnzbd.change_queue_complete_action(cfg.queue_complete(), new=False)
def cleanup_list(wdir, skip_nzb):
""" Remove all files whose extension matches the cleanup list,
optionally ignoring the nzb extension
"""
if cfg.cleanup_list():
try:
files = os.listdir(wdir)
except:
files = ()
for filename in files:
path = os.path.join(wdir, filename)
if os.path.isdir(path):
cleanup_list(path, skip_nzb)
else:
if on_cleanup_list(filename, skip_nzb):
try:
logging.info("Removing unwanted file %s", path)
remove_file(path)
except:
logging.error(T('Removing %s failed'), clip_path(path))
logging.info("Traceback: ", exc_info=True)
if files:
try:
remove_dir(wdir)
except:
pass
def prefix(path, pre):
""" Apply prefix to last part of path
'/my/path' and 'hi_' will give '/my/hi_path'
"""
p, d = os.path.split(path)
return os.path.join(p, pre + d)
def nzb_redirect(wdir, nzbname, pp, script, cat, priority):
""" Check if this job contains only NZB files,
if so send to queue and remove if on CleanList
Returns list of processed NZB's
"""
files = recursive_listdir(wdir)
for file_ in files:
if os.path.splitext(file_)[1].lower() != '.nzb':
return None
# For multiple NZBs, cannot use the current job name
if len(files) != 1:
nzbname = None
# Process all NZB files
for file_ in files:
dirscanner.ProcessSingleFile(os.path.split(file_)[1], file_, pp, script, cat,
priority=priority, keep=False, dup_check=False, nzbname=nzbname)
return files
def one_file_or_folder(folder):
""" If the dir only contains one file or folder, join that file/folder onto the path """
if os.path.exists(folder) and os.path.isdir(folder):
try:
cont = os.listdir(folder)
if len(cont) == 1:
folder = os.path.join(folder, cont[0])
folder = one_file_or_folder(folder)
except WindowsError:
# Can occur on paths it doesn't like, for example "C:"
pass
return folder
TAG_RE = re.compile(r'<[^>]+>')
def get_last_line(txt):
""" Return last non-empty line of a text, trim to 150 max """
# First we remove HTML code in a basic way
txt = TAG_RE.sub(' ', txt)
# Then we get the last line
lines = txt.split('\n')
n = len(lines) - 1
while n >= 0 and not lines[n].strip('\r\t '):
n = n - 1
line = lines[n].strip('\r\t ')
if len(line) >= 150:
line = line[:147] + '...'
return line
def remove_samples(path):
""" Remove all files that match the sample pattern """
for root, _dirs, files in os.walk(path):
for file_ in files:
if RE_SAMPLE.search(file_):
path = os.path.join(root, file_)
try:
logging.info("Removing unwanted sample file %s", path)
remove_file(path)
except:
logging.error(T('Removing %s failed'), clip_path(path))
logging.info("Traceback: ", exc_info=True)
def rename_and_collapse_folder(oldpath, newpath, files):
""" Rename folder, collapsing when there's just a single subfolder
oldpath --> newpath OR oldpath/subfolder --> newpath
Modify list of filenames accordingly
"""
orgpath = oldpath
items = globber(oldpath)
if len(items) == 1:
folder = items[0]
folder_path = os.path.join(oldpath, folder)
if os.path.isdir(folder_path) and folder not in ('VIDEO_TS', 'AUDIO_TS'):
logging.info('Collapsing %s', os.path.join(newpath, folder))
oldpath = folder_path
oldpath = os.path.normpath(oldpath)
newpath = os.path.normpath(newpath)
files = [os.path.normpath(f).replace(oldpath, newpath) for f in files]
renamer(oldpath, newpath)
try:
remove_dir(orgpath)
except:
pass
return files
def set_marker(folder):
""" Set marker file and return name """
name = cfg.marker_file()
if name:
path = os.path.join(folder, name)
logging.debug('Create marker file %s', path)
try:
fp = open(path, 'w')
fp.close()
except:
logging.info('Cannot create marker file %s', path)
logging.info("Traceback: ", exc_info=True)
name = None
return name
def del_marker(path):
""" Remove marker file """
if path and os.path.exists(path):
logging.debug('Removing marker file %s', path)
try:
remove_file(path)
except:
logging.info('Cannot remove marker file %s', path)
logging.info("Traceback: ", exc_info=True)
def remove_from_list(name, lst):
if name:
for n in xrange(len(lst)):
if lst[n].endswith(name):
logging.debug('Popping %s', lst[n])
lst.pop(n)
return
def try_alt_nzb(nzo):
""" Try to get a new NZB if available """
url = nzo.nzo_info.get('failure')
if url and cfg.new_nzb_on_failure():
sabnzbd.add_url(url, nzo.pp, nzo.script, nzo.cat, nzo.priority)
|
adb-d8.py | #!/usr/bin/env python
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Runs an android build of d8 over adb, with any given arguments. Files
# requested by d8 are transferred on-demand from the caller, by reverse port
# forwarding a simple TCP file server from the computer to the android device.
#
# Usage:
# adb-d8.py [-v|--verbose] <build_dir> [<d8_args>...]
#
# Options:
# -v|--verbose Print verbose information.
# <build_dir> The directory containing the android build of d8.
# <d8_args>... The arguments passed through to d8.
from __future__ import print_function
import os
import sys
import struct
import threading
import subprocess
import SocketServer # TODO(leszeks): python 3 compatibility
def CreateFileHandlerClass(root_path, verbose):
class FileHandler(SocketServer.BaseRequestHandler):
def handle(self):
data = self.request.recv(1024);
while data[-1] != "\0":
data += self.request.recv(1024);
filename = data[0:-1]
try:
filename = os.path.abspath(filename)
if not filename.startswith(root_path):
raise Exception("{} not in root {}".format(filename, root_path))
if not os.path.isfile(filename):
raise Exception("{} is not a file".format(filename))
if verbose:
sys.stdout.write("Serving {}\r\n".format(os.path.relpath(filename)))
with open(filename) as f:
contents = f.read();
self.request.sendall(struct.pack("!i", len(contents)))
self.request.sendall(contents)
except Exception as e:
if verbose:
sys.stderr.write(
"Request failed ({})\n".format(e).replace('\n','\r\n'))
self.request.sendall(struct.pack("!i", -1))
return FileHandler
def TransferD8ToDevice(adb, build_dir, device_d8_dir, verbose):
files_to_copy = ["d8", "natives_blob.bin", "snapshot_blob.bin"]
# Pipe the output of md5sum from the local computer to the device, checking
# the md5 hashes on the device.
local_md5_sum_proc = subprocess.Popen(
["md5sum"] + files_to_copy,
cwd=build_dir,
stdout=subprocess.PIPE
)
device_md5_check_proc = subprocess.Popen(
[
adb, "shell",
"mkdir -p '{0}' ; cd '{0}' ; md5sum -c -".format(device_d8_dir)
],
stdin=local_md5_sum_proc.stdout,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
# Push any files which failed the md5 check.
(stdoutdata, stderrdata) = device_md5_check_proc.communicate()
for line in stdoutdata.split('\n'):
if line.endswith(": FAILED"):
filename = line[:-len(": FAILED")]
if verbose:
print("Updating {}...".format(filename))
subprocess.check_call([
adb, "push",
os.path.join(build_dir, filename),
device_d8_dir
], stdout=sys.stdout if verbose else open(os.devnull, 'wb'))
def AdbForwardDeviceToLocal(adb, device_port, server_port, verbose):
if verbose:
print("Forwarding device:{} to localhost:{}...".format(
device_port, server_port))
subprocess.check_call([
adb, "reverse",
"tcp:{}".format(device_port),
"tcp:{}".format(server_port)
])
def AdbRunD8(adb, device_d8_dir, device_port, d8_args, verbose):
# Single-quote the arguments to d8, and concatenate them into a string.
d8_arg_str = " ".join("'{}'".format(a) for a in d8_args)
d8_arg_str = "--read-from-tcp-port='{}' ".format(device_port) + d8_arg_str
# Don't use os.path.join for d8 because we care about the device's os, not
# the host os.
d8_str = "{}/d8 {}".format(device_d8_dir, d8_arg_str)
if verbose:
print("Running adb shell -t \"{}\"".format(d8_str))
# Run adb shell with -t to have a tty if we run d8 without a script.
return subprocess.call([adb, "shell", "-t", d8_str])
def PrintUsage(file=sys.stdout):
print("Usage: adb-d8.py [-v|--verbose] [--] <build_dir> [<d8 args>...]",
file=file)
def PrintHelp(file=sys.stdout):
print("""Usage:
adb-d8.py [-v|--verbose] [--] <build_dir> [<d8_args>...]
adb-d8.py -h|--help
Options:
-h|--help Show this help message and exit
-v|--verbose Print verbose output
<build_dir> The directory containing the android build of d8
<d8_args>... The arguments passed through to d8""", file=file)
def Main():
if len(sys.argv) < 2:
PrintUsage(sys.stderr)
return 1
script_dir = os.path.dirname(sys.argv[0])
# Use the platform-tools version of adb so that we know it has the reverse
# command.
adb = os.path.join(
script_dir,
"../third_party/android_tools/sdk/platform-tools/adb"
)
# Read off any command line flags before build_dir (or --). Do this
# manually, rather than using something like argparse, to be able to split
# the adb-d8 options from the passthrough d8 options.
verbose = False
for arg_index,arg in enumerate(sys.argv[1:], start=1):
if not arg.startswith("-"):
break
elif arg == "--":
arg_index += 1
break
elif arg == "-h" or arg == "--help":
PrintHelp(sys.stdout)
return 0
elif arg == "-v" or arg == "--verbose":
verbose = True
else:
print("ERROR: Unrecognised option: {}".format(arg))
PrintUsage(sys.stderr)
return 1
# Transfer d8 (and dependencies) to the device.
build_dir = os.path.abspath(sys.argv[arg_index])
device_d8_dir = '/data/local/tmp/v8'
TransferD8ToDevice(adb, build_dir, device_d8_dir, verbose)
# Start a file server for the files d8 might need.
script_root_dir = os.path.abspath(os.curdir)
server = SocketServer.TCPServer(
("localhost", 0), # 0 means an arbitrary unused port.
CreateFileHandlerClass(script_root_dir, verbose)
)
try:
# Start the file server in its own thread.
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
# Port-forward the given device port to the file server.
# TODO(leszeks): Pick an unused device port.
# TODO(leszeks): Remove the port forwarding on exit.
server_ip, server_port = server.server_address
device_port = 4444
AdbForwardDeviceToLocal(adb, device_port, server_port, verbose)
# Run d8 over adb with the remaining arguments, using the given device
# port to forward file reads.
return AdbRunD8(
adb, device_d8_dir, device_port, sys.argv[arg_index+1:], verbose)
finally:
if verbose:
print("Shutting down file server...")
server.shutdown()
server.server_close()
if __name__ == '__main__':
sys.exit(Main())
|
pyminer.py | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 19994
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
meraki_captive_portal_simulator.py | """
Cisco Meraki Captive Portal simulator
Default port: 5003
Matt DeNapoli
2018
https://developer.cisco.com/site/Meraki
"""
# Libraries
from flask import Flask, request, render_template, redirect, url_for
import random
import datetime
import time
import requests
import webview
import netifaces as nif
import datetime
import threading
app = Flask(__name__)
# Globals
global captive_portal_url
captive_portal_url = ""
global user_continue_url
user_continue_url = ""
@app.route("/go", methods=["GET"])
def get_go():
return render_template("index.html", **locals())
# Kick off simulator and create baseline dataset
@app.route("/connecttowifi", methods=["POST"])
def connect_to_wifi():
global captive_portal_url
global user_continue_url
captive_portal_url = request.form["captive_portal_url"]
base_grant_url = request.host_url + "splash/grant";
user_continue_url = request.form["user_continue_url"]
node_mac = generate_fake_mac()
client_ip = request.remote_addr
client_mac = generate_fake_mac()
splashclick_time = datetime.datetime.now()
full_url = captive_portal_url + \
"?base_grant_url=" + base_grant_url + \
"&user_continue_url=" + user_continue_url + \
"&node_mac=" + node_mac + \
"&client_ip=" + client_ip + \
"&client_mac=" + client_mac
webview.load_url(full_url,uid='master')
return render_template("connected.html", full_url=full_url)
@app.route("/splash/grant", methods=["GET"])
def continue_to_url():
return redirect(request.args.get("continue_url"), code=302)
def generate_fake_mac():
fake_mac = ""
for mac_part in range(6):
fake_mac += "".join(
random.choice("0123456789abcdef") for i in range(2)
)
if mac_part < 5:
fake_mac += ":"
return fake_mac
@app.route("/setupserver", methods=["GET"])
def setupserver():
return render_template("setupserver.html", serversetupurl=request.host_url
+ "go")
def start_server():
app.run(host="0.0.0.0", threaded=True, port=5003, debug=False)
if __name__ == "__main__":
t = threading.Thread(target = start_server)
t.dameon = True
t.start()
webview.create_window("Captive Portal", "http://localhost:5003/setupserver",
js_api=None, width=800, height=600, resizable=True, fullscreen=False,
min_size=(200, 100), strings={}, confirm_quit=False,
background_color='#FFF', debug=False, text_select=True)
|
Hiwin_RT605_ArmCommand_Socket_20190627180740.py | #!/usr/bin/env python3
# license removed for brevity
import rospy
import os
import socket
##多執行序
import threading
import time
import sys
import matplotlib as plot
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
from std_msgs.msg import Int32MultiArray
import math
import enum
Socket = 0
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
NAME = 'socket_server'
arm_mode_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0.0,36.8,11.35,-90.0,0.0,0.0)
##------------class socket_cmd---------
class socket_data():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
socket_cmd = socket_data(0,0.0,0,0,0,0,0)
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
class StateFeedback():
def __init__(self,ArmState,SentFlag):
self.ArmState = ArmState
self.SentFlag = SentFlag
state_feedback = StateFeedback(0,0)
def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料
pos.x = '%s'%x
pos.y = '%s'%y
pos.z = '%s'%z
pos.pitch = '%s'%pitch
pos.roll = '%s'%roll
pos.yaw = '%s'%yaw
##----------Arm Mode-------------###
def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = int('%s'%action)
socket_cmd.grip = int('%s'%grip)
socket_cmd.ra = int('%s'%ra)
socket_cmd.setvel = int('%s'%setvel)
socket_cmd.setboth = int('%s'%setboth)
arm_mode_flag = True
Socket_command()
##-------Arm Speed Mode------------###
def Speed_Mode(speedmode): ##接收策略端傳送手臂模式資料
global speed_mode_flag
socket_cmd.Speedmode = speedmode
def socket_talker(): ##創建Server node
pub = rospy.Publisher('chatter', Int32MultiArray, queue_size=10)
rospy.init_node(NAME)
rate = rospy.Rate(10) # 10hz
print ("Ready to connect")
while not rospy.is_shutdown():
# hello_str = "hello world %s" % rospy.get_time()
state = Int32MultiArray()
state.data = [state_feedback.ArmState,state_feedback.SentFlag]
# rospy.loginfo(state)
pub.publish(state)
rate.sleep()
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command():
global Socket,arm_mode_flag,data
if arm_mode_flag == True:
arm_mode_flag = False
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 5 ##切換初始mode狀態
print(data)
Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
##-----------socket client--------
def socket_client():
global Socket
try:
Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(Socket.recv(1024))
while 1:
feedback_str = Socket.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
state_feedback.ArmState = 0
if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
state_feedback.ArmState = 1
if str(feedback_str[2]) == '54':# 6 策略完成
state_feedback.ArmState = 6
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
state_feedback.SentFlag = 0
if str(feedback_str[4]) == '49':#回傳1 true
state_feedback.SentFlag = 1
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
break
rospy.on_shutdown(myhook)
Socket.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
# def thread_test():
# socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5##切換初始mode狀態
t = threading.Thread(target=socket_client)
t.start() # 開啟多執行緒
try:
socket_talker()
except rospy.ROSInterruptException:
pass
t.join() |
Client.py | from tkinter import *
import tkinter.messagebox
from PIL import Image, ImageTk
import socket, threading, sys, traceback, os
from RtpPacket import RtpPacket
CACHE_FILE_NAME = "cache-"
CACHE_FILE_EXT = ".jpg"
class Client:
INIT = 0
READY = 1
PLAYING = 2
state = INIT
SETUP = 0
PLAY = 1
PAUSE = 2
TEARDOWN = 3
# Initiation..
def __init__(self, master, serveraddr, serverport, rtpport, filename):
self.master = master
self.master.protocol("WM_DELETE_WINDOW", self.handler)
self.createWidgets()
self.serverAddr = serveraddr
self.serverPort = int(serverport)
self.rtpPort = int(rtpport)
self.fileName = filename
self.rtspSeq = 0
self.sessionId = 0
self.requestSent = -1
self.teardownAcked = 0
self.connectToServer()
self.frameNbr = 0
def createWidgets(self):
"""Build GUI."""
# Create Setup button
self.setup = Button(self.master, width=20, padx=3, pady=3)
self.setup["text"] = "Setup"
self.setup["command"] = self.setupMovie
self.setup.grid(row=1, column=0, padx=2, pady=2)
# Create Play button
self.start = Button(self.master, width=20, padx=3, pady=3)
self.start["text"] = "Play"
self.start["command"] = self.playMovie
self.start.grid(row=1, column=1, padx=2, pady=2)
# Create Pause button
self.pause = Button(self.master, width=20, padx=3, pady=3)
self.pause["text"] = "Pause"
self.pause["command"] = self.pauseMovie
self.pause.grid(row=1, column=2, padx=2, pady=2)
# Create Teardown button
self.teardown = Button(self.master, width=20, padx=3, pady=3)
self.teardown["text"] = "Teardown"
self.teardown["command"] = self.exitClient
self.teardown.grid(row=1, column=3, padx=2, pady=2)
# Create a label to display the movie
self.label = Label(self.master, height=19)
self.label.grid(row=0, column=0, columnspan=4, sticky=W+E+N+S, padx=5, pady=5)
def setupMovie(self):
"""Setup button handler."""
if self.state == self.INIT:
self.sendRtspRequest(self.SETUP)
def exitClient(self):
"""Teardown button handler."""
self.sendRtspRequest(self.TEARDOWN)
self.master.destroy() # Close the gui window
os.remove(CACHE_FILE_NAME + str(self.sessionId) + CACHE_FILE_EXT) # Delete the cache image from video
def pauseMovie(self):
"""Pause button handler."""
if self.state == self.PLAYING:
self.sendRtspRequest(self.PAUSE)
def playMovie(self):
"""Play button handler."""
if self.state == self.READY:
# Create a new thread to listen for RTP packets
threading.Thread(target=self.listenRtp).start()
self.playEvent = threading.Event()
self.playEvent.clear()
self.sendRtspRequest(self.PLAY)
def listenRtp(self):
"""Listen for RTP packets."""
while True:
try:
data = self.rtpSocket.recv(20480)
if data:
rtpPacket = RtpPacket()
rtpPacket.decode(data)
currFrameNbr = rtpPacket.seqNum()
print("Current Seq Num: " + str(currFrameNbr))
if currFrameNbr > self.frameNbr: # Discard the late packet
self.frameNbr = currFrameNbr
self.updateMovie(self.writeFrame(rtpPacket.getPayload()))
except:
# Stop listening upon requesting PAUSE or TEARDOWN
if self.playEvent.isSet():
break
# Upon receiving ACK for TEARDOWN request,
# close the RTP socket
if self.teardownAcked == 1:
self.rtpSocket.shutdown(socket.SHUT_RDWR)
self.rtpSocket.close()
break
def writeFrame(self, data):
"""Write the received frame to a temp image file. Return the image file."""
cachename = CACHE_FILE_NAME + str(self.sessionId) + CACHE_FILE_EXT
file = open(cachename, "wb")
file.write(data)
file.close()
return cachename
def updateMovie(self, imageFile):
"""Update the image file as video frame in the GUI."""
photo = ImageTk.PhotoImage(Image.open(imageFile))
self.label.configure(image = photo, height=288)
self.label.image = photo
def connectToServer(self):
"""Connect to the Server. Start a new RTSP/TCP session."""
self.rtspSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.rtspSocket.connect((self.serverAddr, self.serverPort))
except:
tkMessageBox.showwarning('Connection Failed', 'Connection to \'%s\' failed.' %self.serverAddr)
def sendRtspRequest(self, requestCode):
"""Send RTSP request to the server."""
# Setup request
if requestCode == self.SETUP and self.state == self.INIT:
threading.Thread(target=self.recvRtspReply).start()
self.rtspSeq = 1
request = "SETUP " + str(self.fileName) + "\n" + str(self.rtspSeq) + "\n" + " RTSP/1.0 RTP/UDP " + str(self.rtpPort)
self.rtspSocket.send(request.encode())
self.requestSent = self.SETUP
# Play request
elif requestCode == self.PLAY and self.state == self.READY:
self.rtspSeq = self.rtspSeq + 1
request = "PLAY " + "\n" + str(self.rtspSeq)
self.rtspSocket.send(request.encode())
self.requestSent = self.PLAY
# Pause request
elif requestCode == self.PAUSE and self.state == self.PLAYING:
self.rtspSeq = self.rtspSeq + 1
request = "PAUSE " + "\n" + str(self.rtspSeq)
self.rtspSocket.send(request.encode())
self.requestSent = self.PAUSE
elif requestCode == self.TEARDOWN and not self.state == self.INIT:
self.rtspSeq = self.rtspSeq + 1
request = "TEARDOWN " + "\n" + str(self.rtspSeq)
self.rtspSocket.send(request.encode())
self.requestSent = self.TEARDOWN
else:
return
def recvRtspReply(self):
while True:
reply = self.rtspSocket.recv(1024)
if reply:
self.parseRtspReply(reply)
if self.requestSent == self.TEARDOWN:
self.rtspSocket.shutdown(socket.SHUT_RDWR)
self.rtspSocket.close()
break
def parseRtspReply(self, data):
"""Parse the RTSP reply from the server."""
lines = data.split('\n')
seqNum = int(lines[1].split(' ')[1])
# Process only if the server reply's sequence number is the same as the request's
if seqNum == self.rtspSeq:
session = int(lines[2].split(' ')[1])
# New RTSP session ID
if self.sessionId == 0:
self.sessionId = session
# Process only if the session ID is the same
if self.sessionId == session:
if int(lines[0].split(' ')[1]) == 200:
if self.requestSent == self.SETUP:
self.state = self.READY
# Open RTP port.
self.openRtpPort()
elif self.requestSent == self.PLAY:
self.state = self.PLAYING
elif self.requestSent == self.PAUSE:
self.state = self.READY
# The play thread exits. A new thread is created on resume.
self.playEvent.set()
elif self.requestSent == self.TEARDOWN:
# self.state = ...
# Flag the teardownAcked to close the socket.
self.teardownAcked = 1
def openRtpPort(self):
"""Open RTP socket binded to a specified port."""
self.rtpSocket.settimeout(0.5)
try:
self.rtpSocket.bind((self.serverAddr,self.rtpPort))
except:
tkMessageBox.showwarning('Unable to Bind', 'Unable to bind PORT=%d' %self.rtpPort)
def handler(self):
"""Handler on explicitly closing the GUI window."""
self.pauseMovie()
if tkMessageBox.askokcancel("Quit?", "Are you sure you want to quit?"):
self.exitClient()
else: # When the user presses cancel, resume playing.
self.playMovie()
threading.Thread(target=self.listenRtp).start()
self.sendRtspRequest(self.PLAY)
|
server.py | """RPC server implementation.
Note
----
Server is TCP based with the following protocol:
- Initial handshake to the peer
- [RPC_MAGIC, keysize(int32), key-bytes]
- The key is in format
- {server|client}:device-type[:random-key] [-timeout=timeout]
"""
from __future__ import absolute_import
import os
import ctypes
import socket
import select
import struct
import logging
import multiprocessing
import subprocess
import time
from ..._ffi.function import register_func
from ..._ffi.base import py_str
from ..._ffi.libinfo import find_lib_path
from ...module import load as _load_module
from .. import util
from . import base
from . base import TrackerCode
def _server_env(load_library):
"""Server environment function return temp dir"""
temp = util.tempdir()
# pylint: disable=unused-variable
@register_func("tvm.contrib.rpc.server.workpath")
def get_workpath(path):
return temp.relpath(path)
@register_func("tvm.contrib.rpc.server.load_module", override=True)
def load_module(file_name):
"""Load module from remote side."""
path = temp.relpath(file_name)
m = _load_module(path)
logging.info("load_module %s", path)
return m
libs = []
load_library = load_library.split(":") if load_library else []
for file_name in load_library:
file_name = find_lib_path(file_name)[0]
libs.append(ctypes.CDLL(file_name, ctypes.RTLD_GLOBAL))
logging.info("Load additional library %s", file_name)
temp.libs = libs
return temp
def _serve_loop(sock, addr, load_library):
"""Server loop"""
sockfd = sock.fileno()
temp = _server_env(load_library)
base._ServerLoop(sockfd)
temp.remove()
logging.info("Finish serving %s", addr)
def _parse_server_opt(opts):
# parse client options
ret = {}
for kv in opts:
if kv.startswith("-timeout="):
ret["timeout"] = float(kv[9:])
return ret
def _listen_loop(sock, port, rpc_key, tracker_addr, load_library):
"""Lisenting loop of the server master."""
def _accept_conn(listen_sock, tracker_conn, ping_period=2):
"""Accept connection from the other places.
Parameters
----------
listen_sock: Socket
The socket used by listening process.
tracker_conn : connnection to tracker
Tracker connection
ping_period : float, optional
ping tracker every k seconds if no connection is accepted.
"""
old_keyset = set()
# Report resource to tracker
if tracker_conn:
matchkey = base.random_key(rpc_key + ":")
base.sendjson(tracker_conn,
[TrackerCode.PUT, rpc_key, (port, matchkey)])
assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS
else:
matchkey = rpc_key
unmatch_period_count = 0
unmatch_timeout = 4
# Wait until we get a valid connection
while True:
if tracker_conn:
trigger = select.select([listen_sock], [], [], ping_period)
if not listen_sock in trigger[0]:
base.sendjson(tracker_conn, [TrackerCode.GET_PENDING_MATCHKEYS])
pending_keys = base.recvjson(tracker_conn)
old_keyset.add(matchkey)
# if match key not in pending key set
# it means the key is aqquired by a client but not used.
if matchkey not in pending_keys:
unmatch_period_count += 1
else:
unmatch_period_count = 0
# regenerate match key if key is aqquired but not used for a while
if unmatch_period_count * ping_period > unmatch_timeout + ping_period:
logging.info("RPCServer: no incoming connections, regenerate key ...")
matchkey = base.random_key(rpc_key + ":", old_keyset)
base.sendjson(tracker_conn,
[TrackerCode.PUT, rpc_key, (port, matchkey)])
assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS
unmatch_period_count = 0
continue
conn, addr = listen_sock.accept()
magic = struct.unpack("<i", base.recvall(conn, 4))[0]
if magic != base.RPC_MAGIC:
conn.close()
continue
keylen = struct.unpack("<i", base.recvall(conn, 4))[0]
key = py_str(base.recvall(conn, keylen))
arr = key.split()
expect_header = "client:" + matchkey
server_key = "server:" + rpc_key
if arr[0] != expect_header:
conn.sendall(struct.pack("<i", base.RPC_CODE_MISMATCH))
conn.close()
logging.info("RPCServer: mismatch key from %s", addr)
continue
else:
conn.sendall(struct.pack("<i", base.RPC_CODE_SUCCESS))
conn.sendall(struct.pack("<i", len(server_key)))
conn.sendall(server_key.encode("utf-8"))
return conn, addr, _parse_server_opt(arr[1:])
# Server logic
tracker_conn = None
while True:
try:
# step 1: setup tracker and report to tracker
if tracker_addr and tracker_conn is None:
tracker_conn = base.connect_with_retry(tracker_addr)
tracker_conn.sendall(struct.pack("<i", base.RPC_TRACKER_MAGIC))
magic = struct.unpack("<i", base.recvall(tracker_conn, 4))[0]
if magic != base.RPC_TRACKER_MAGIC:
raise RuntimeError("%s is not RPC Tracker" % str(tracker_addr))
# report status of current queue
cinfo = {"key" : "server:" + rpc_key}
base.sendjson(tracker_conn,
[TrackerCode.UPDATE_INFO, cinfo])
assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS
# step 2: wait for in-coming connections
conn, addr, opts = _accept_conn(sock, tracker_conn)
except (socket.error, IOError):
# retry when tracker is dropped
if tracker_conn:
tracker_conn.close()
tracker_conn = None
continue
# step 3: serving
logging.info("RPCServer: connection from %s", addr)
server_proc = multiprocessing.Process(target=_serve_loop, args=(conn, addr, load_library))
server_proc.deamon = True
server_proc.start()
# close from our side.
conn.close()
# wait until server process finish or timeout
server_proc.join(opts.get("timeout", None))
if server_proc.is_alive():
logging.info("RPCServer: Timeout in RPC session, kill..")
server_proc.terminate()
def _connect_proxy_loop(addr, key, load_library):
key = "server:" + key
retry_count = 0
max_retry = 5
retry_period = 5
while True:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(addr)
sock.sendall(struct.pack("<i", base.RPC_MAGIC))
sock.sendall(struct.pack("<i", len(key)))
sock.sendall(key.encode("utf-8"))
magic = struct.unpack("<i", base.recvall(sock, 4))[0]
if magic == base.RPC_CODE_DUPLICATE:
raise RuntimeError("key: %s has already been used in proxy" % key)
elif magic == base.RPC_CODE_MISMATCH:
logging.info("RPCProxy do not have matching client key %s", key)
elif magic != base.RPC_CODE_SUCCESS:
raise RuntimeError("%s is not RPC Proxy" % str(addr))
keylen = struct.unpack("<i", base.recvall(sock, 4))[0]
remote_key = py_str(base.recvall(sock, keylen))
opts = _parse_server_opt(remote_key.split()[1:])
logging.info("RPCProxy connected to %s", str(addr))
process = multiprocessing.Process(
target=_serve_loop, args=(sock, addr, load_library))
process.deamon = True
process.start()
sock.close()
process.join(opts.get("timeout", None))
if process.is_alive():
logging.info("RPCProxyServer: Timeout in RPC session, kill..")
process.terminate()
retry_count = 0
except (socket.error, IOError) as err:
retry_count += 1
logging.info("Error encountered %s, retry in %g sec", str(err), retry_period)
if retry_count > max_retry:
raise RuntimeError("Maximum retry error: last error: %s" % str(err))
time.sleep(retry_period)
def _popen(cmd):
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=os.environ)
(out, _) = proc.communicate()
if proc.returncode != 0:
msg = "Server invoke error:\n"
msg += out
raise RuntimeError(msg)
class Server(object):
"""Start RPC server on a seperate process.
This is a simple python implementation based on multi-processing.
It is also possible to implement a similar C based sever with
TVM runtime which does not depend on the python.
Parameters
----------
host : str
The host url of the server.
port : int
The port to be bind to
port_end : int, optional
The end port to search
is_proxy : bool, optional
Whether the address specified is a proxy.
If this is true, the host and port actually corresponds to the
address of the proxy server.
use_popen : bool, optional
Whether to use Popen to start a fresh new process instead of fork.
This is recommended to switch on if we want to do local RPC demonstration
for GPU devices to avoid fork safety issues.
key : str, optional
The key used to identify the server in Proxy connection.
load_library : str, optional
List of additional libraries to be loaded during execution.
"""
def __init__(self,
host,
port=9091,
port_end=9199,
is_proxy=False,
use_popen=False,
tracker_addr=None,
key="",
load_library=None):
try:
if base._ServerLoop is None:
raise RuntimeError("Please compile with USE_RPC=1")
except NameError:
raise RuntimeError("Please compile with USE_RPC=1")
self.host = host
self.port = port
self.libs = []
if use_popen:
cmd = ["python",
"-m", "tvm.exec.rpc_server",
"--host=%s" % host,
"--port=%s" % port]
if tracker_addr:
assert key
cmd += ["--tracker=%s:%d" % tracker_addr,
"--key=%s" % key]
if load_library:
cmd += ["--load-libary", load_library]
self.proc = multiprocessing.Process(
target=subprocess.check_call, args=(cmd,))
self.proc.deamon = True
self.proc.start()
time.sleep(1)
elif not is_proxy:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = None
for my_port in range(port, port_end):
try:
sock.bind((host, my_port))
self.port = my_port
break
except socket.error as sock_err:
if sock_err.errno in [98, 48]:
continue
else:
raise sock_err
if not self.port:
raise ValueError("cannot bind to any port in [%d, %d)" % (port, port_end))
logging.info("RPCServer: bind to %s:%d", host, self.port)
sock.listen(1)
self.sock = sock
self.proc = multiprocessing.Process(
target=_listen_loop, args=(
self.sock, self.port, key, tracker_addr, load_library))
self.proc.deamon = True
self.proc.start()
else:
self.proc = multiprocessing.Process(
target=_connect_proxy_loop, args=((host, port), key, load_library))
self.proc.deamon = True
self.proc.start()
def terminate(self):
"""Terminate the server process"""
if self.proc:
self.proc.terminate()
self.proc = None
def __del__(self):
self.terminate()
|
ui_utils.py | # -*- coding: utf-8 -*-
import os
import platform
import re
import subprocess
import sys
import textwrap
import threading
import time
import tkinter as tk
import tkinter.font
import traceback
from logging import getLogger
from tkinter import filedialog, messagebox, ttk
from typing import Callable, List, Optional, Tuple, Union # @UnusedImport
from _tkinter import TclError
from thonny import get_workbench, misc_utils, tktextext
from thonny.common import TextRange
from thonny.languages import get_button_padding, tr
from thonny.misc_utils import (
running_on_linux,
running_on_mac_os,
running_on_rpi,
running_on_windows,
)
from thonny.tktextext import TweakableText
PARENS_REGEX = re.compile(r"[\(\)\{\}\[\]]")
logger = getLogger(__name__)
class CommonDialog(tk.Toplevel):
def __init__(self, master=None, **kw):
assert master
super().__init__(master=master, class_="Thonny", **kw)
self.withdraw() # remain invisible until size calculations are done
# TODO: Is it still required ?
# self.bind("<FocusIn>", self._unlock_on_focus_in, True)
# https://bugs.python.org/issue43655
if self._windowingsystem == "aqua":
self.tk.call("::tk::unsupported::MacWindowStyle", "style", self, "moveableModal", "")
elif self._windowingsystem == "x11":
self.wm_attributes("-type", "dialog")
self.parent = master
def _unlock_on_focus_in(self, event):
if not self.winfo_ismapped():
focussed_widget = self.focus_get()
self.deiconify()
if focussed_widget:
focussed_widget.focus_set()
def get_padding(self):
return ems_to_pixels(2)
def get_internal_padding(self):
return self.get_padding() // 4
def set_initial_focus(self, node=None) -> bool:
if node is None:
node = self
if isinstance(
node,
(
ttk.Entry,
ttk.Combobox,
ttk.Treeview,
tk.Text,
ttk.Notebook,
ttk.Button,
tk.Listbox,
),
):
node.focus_set()
return True
else:
for child in node.winfo_children():
if self.set_initial_focus(child):
return True
return False
class CommonDialogEx(CommonDialog):
def __init__(self, master=None, cnf={}, **kw):
super().__init__(master=master, cnf=cnf, **kw)
# Need to fill the dialog with a frame to gain theme support
self.main_frame = ttk.Frame(self)
self.main_frame.grid(row=0, column=0, sticky="nsew")
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.bind("<Escape>", self.on_close, True)
self.protocol("WM_DELETE_WINDOW", self.on_close)
def on_close(self, event=None):
self.destroy()
class QueryDialog(CommonDialogEx):
def __init__(
self,
master,
title: str,
prompt: str,
initial_value: str = "",
options: List[str] = [],
entry_width: Optional[int] = None,
):
super().__init__(master)
self.var = tk.StringVar(value=initial_value)
self.result = None
margin = self.get_padding()
spacing = margin // 2
self.title(title)
self.prompt_label = ttk.Label(self.main_frame, text=prompt)
self.prompt_label.grid(row=1, column=1, columnspan=2, padx=margin, pady=(margin, spacing))
if options:
self.entry_widget = ttk.Combobox(
self.main_frame, textvariable=self.var, values=options, height=15, width=entry_width
)
else:
self.entry_widget = ttk.Entry(self.main_frame, textvariable=self.var, width=entry_width)
self.entry_widget.bind("<Return>", self.on_ok, True)
self.entry_widget.bind("<KP_Enter>", self.on_ok, True)
self.entry_widget.grid(
row=3, column=1, columnspan=2, sticky="we", padx=margin, pady=(0, margin)
)
self.ok_button = ttk.Button(
self.main_frame, text=tr("OK"), command=self.on_ok, default="active"
)
self.ok_button.grid(row=5, column=1, padx=(margin, spacing), pady=(0, margin), sticky="e")
self.cancel_button = ttk.Button(self.main_frame, text=tr("Cancel"), command=self.on_cancel)
self.cancel_button.grid(row=5, column=2, padx=(0, margin), pady=(0, margin), sticky="e")
self.main_frame.columnconfigure(1, weight=1)
self.entry_widget.focus_set()
def on_ok(self, event=None):
self.result = self.var.get()
self.destroy()
def on_cancel(self, event=None):
self.result = None
self.destroy()
def get_result(self) -> Optional[str]:
return self.result
def ask_string(
title: str,
prompt: str,
initial_value: str = "",
options: List[str] = [],
entry_width: Optional[int] = None,
master=None,
):
dlg = QueryDialog(
master, title, prompt, initial_value=initial_value, options=options, entry_width=entry_width
)
show_dialog(dlg, master)
return dlg.get_result()
class CustomMenubar(ttk.Frame):
def __init__(self, master):
ttk.Frame.__init__(self, master, style="CustomMenubar.TFrame")
self._menus = []
self._opened_menu = None
ttk.Style().map(
"CustomMenubarLabel.TLabel",
background=[
("!active", lookup_style_option("Menubar", "background", "gray")),
("active", lookup_style_option("Menubar", "activebackground", "LightYellow")),
],
foreground=[
("!active", lookup_style_option("Menubar", "foreground", "black")),
("active", lookup_style_option("Menubar", "activeforeground", "black")),
],
)
def add_cascade(self, label, menu):
label_widget = ttk.Label(
self,
style="CustomMenubarLabel.TLabel",
text=label,
padding=[6, 3, 6, 2],
font="TkDefaultFont",
)
if len(self._menus) == 0:
padx = (6, 0)
else:
padx = 0
label_widget.grid(row=0, column=len(self._menus), padx=padx)
def enter(event):
label_widget.state(("active",))
# Don't know how to open this menu when another menu is open
# another tk_popup just doesn't work unless old menu is closed by click or Esc
# https://stackoverflow.com/questions/38081470/is-there-a-way-to-know-if-tkinter-optionmenu-dropdown-is-active
# unpost doesn't work in Win and Mac: https://www.tcl.tk/man/tcl8.5/TkCmd/menu.htm#M62
# print("ENTER", menu, self._opened_menu)
if self._opened_menu is not None:
self._opened_menu.unpost()
click(event)
def leave(event):
label_widget.state(("!active",))
def click(event):
try:
# print("Before")
self._opened_menu = menu
menu.tk_popup(
label_widget.winfo_rootx(),
label_widget.winfo_rooty() + label_widget.winfo_height(),
)
finally:
# print("After")
self._opened_menu = None
label_widget.bind("<Enter>", enter, True)
label_widget.bind("<Leave>", leave, True)
label_widget.bind("<1>", click, True)
self._menus.append(menu)
class AutomaticPanedWindow(tk.PanedWindow):
"""
Enables inserting panes according to their position_key-s.
Automatically adds/removes itself to/from its master AutomaticPanedWindow.
Fixes some style glitches.
"""
def __init__(self, master, position_key=None, preferred_size_in_pw=None, **kwargs):
tk.PanedWindow.__init__(self, master, border=0, **kwargs)
self._pane_minsize = 100
self.position_key = position_key
self._restoring_pane_sizes = False
self._last_window_size = (0, 0)
self._full_size_not_final = True
self._configure_binding = self.bind("<Configure>", self._on_window_resize, True)
self._update_appearance_binding = self.bind(
"<<ThemeChanged>>", self._update_appearance, True
)
self.bind("<B1-Motion>", self._on_mouse_dragged, True)
self._update_appearance()
# should be in the end, so that it can be detected when
# constructor hasn't completed yet
self.preferred_size_in_pw = preferred_size_in_pw
def insert(self, pos, child, **kw):
kw.setdefault("minsize", self._pane_minsize)
if pos == "auto":
# According to documentation I should use self.panes()
# but this doesn't return expected widgets
for sibling in sorted(
self.pane_widgets(),
key=lambda p: p.position_key if hasattr(p, "position_key") else 0,
):
if (
not hasattr(sibling, "position_key")
or sibling.position_key == None
or sibling.position_key > child.position_key
):
pos = sibling
break
else:
pos = "end"
if isinstance(pos, tk.Widget):
kw["before"] = pos
self.add(child, **kw)
def add(self, child, **kw):
kw.setdefault("minsize", self._pane_minsize)
tk.PanedWindow.add(self, child, **kw)
self._update_visibility()
self._check_restore_preferred_sizes()
def remove(self, child):
tk.PanedWindow.remove(self, child)
self._update_visibility()
self._check_restore_preferred_sizes()
def forget(self, child):
tk.PanedWindow.forget(self, child)
self._update_visibility()
self._check_restore_preferred_sizes()
def destroy(self):
self.unbind("<Configure>", self._configure_binding)
self.unbind("<<ThemeChanged>>", self._update_appearance_binding)
tk.PanedWindow.destroy(self)
def is_visible(self):
if not isinstance(self.master, AutomaticPanedWindow):
return self.winfo_ismapped()
else:
return self in self.master.pane_widgets()
def pane_widgets(self):
result = []
for pane in self.panes():
# pane is not the widget but some kind of reference object
assert not isinstance(pane, tk.Widget)
result.append(self.nametowidget(str(pane)))
return result
def _on_window_resize(self, event):
if event.width < 10 or event.height < 10:
return
window = self.winfo_toplevel()
window_size = (window.winfo_width(), window.winfo_height())
initializing = hasattr(window, "initializing") and window.initializing
if (
not initializing
and not self._restoring_pane_sizes
and (window_size != self._last_window_size or self._full_size_not_final)
):
self._check_restore_preferred_sizes()
self._last_window_size = window_size
def _on_mouse_dragged(self, event):
if event.widget == self and not self._restoring_pane_sizes:
self._update_preferred_sizes()
def _update_preferred_sizes(self):
for pane in self.pane_widgets():
if getattr(pane, "preferred_size_in_pw", None) is not None:
if self.cget("orient") == "horizontal":
current_size = pane.winfo_width()
else:
current_size = pane.winfo_height()
if current_size > 20:
pane.preferred_size_in_pw = current_size
# paneconfig width/height effectively puts
# unexplainable maxsize to some panes
# if self.cget("orient") == "horizontal":
# self.paneconfig(pane, width=current_size)
# else:
# self.paneconfig(pane, height=current_size)
#
# else:
# self.paneconfig(pane, width=1000, height=1000)
def _check_restore_preferred_sizes(self):
window = self.winfo_toplevel()
if getattr(window, "initializing", False):
return
try:
self._restoring_pane_sizes = True
self._restore_preferred_sizes()
finally:
self._restoring_pane_sizes = False
def _restore_preferred_sizes(self):
total_preferred_size = 0
panes_without_preferred_size = []
panes = self.pane_widgets()
for pane in panes:
if not hasattr(pane, "preferred_size_in_pw"):
# child isn't fully constructed yet
return
if pane.preferred_size_in_pw is None:
panes_without_preferred_size.append(pane)
# self.paneconfig(pane, width=1000, height=1000)
else:
total_preferred_size += pane.preferred_size_in_pw
# Without updating pane width/height attribute
# the preferred size may lose effect when squeezing
# non-preferred panes too small. Also zooming/unzooming
# changes the supposedly fixed panes ...
#
# but
# paneconfig width/height effectively puts
# unexplainable maxsize to some panes
# if self.cget("orient") == "horizontal":
# self.paneconfig(pane, width=pane.preferred_size_in_pw)
# else:
# self.paneconfig(pane, height=pane.preferred_size_in_pw)
assert len(panes_without_preferred_size) <= 1
size = self._get_size()
if size is None:
return
leftover_size = self._get_size() - total_preferred_size
used_size = 0
for i, pane in enumerate(panes[:-1]):
used_size += pane.preferred_size_in_pw or leftover_size
self._place_sash(i, used_size)
used_size += int(str(self.cget("sashwidth")))
def _get_size(self):
if self.cget("orient") == tk.HORIZONTAL:
result = self.winfo_width()
else:
result = self.winfo_height()
if result < 20:
# Not ready yet
return None
else:
return result
def _place_sash(self, i, distance):
if self.cget("orient") == tk.HORIZONTAL:
self.sash_place(i, distance, 0)
else:
self.sash_place(i, 0, distance)
def _update_visibility(self):
if not isinstance(self.master, AutomaticPanedWindow):
return
if len(self.panes()) == 0 and self.is_visible():
self.master.forget(self)
if len(self.panes()) > 0 and not self.is_visible():
self.master.insert("auto", self)
def _update_appearance(self, event=None):
self.configure(sashwidth=lookup_style_option("Sash", "sashthickness", ems_to_pixels(0.6)))
self.configure(background=lookup_style_option("TPanedWindow", "background"))
class ClosableNotebook(ttk.Notebook):
def __init__(self, master, style="ButtonNotebook.TNotebook", **kw):
super().__init__(master, style=style, **kw)
self.tab_menu = self.create_tab_menu()
self._popup_index = None
self.pressed_index = None
self.bind("<ButtonPress-1>", self._letf_btn_press, True)
self.bind("<ButtonRelease-1>", self._left_btn_release, True)
if running_on_mac_os():
self.bind("<ButtonPress-2>", self._right_btn_press, True)
self.bind("<Control-Button-1>", self._right_btn_press, True)
else:
self.bind("<ButtonPress-3>", self._right_btn_press, True)
# self._check_update_style()
def create_tab_menu(self):
menu = tk.Menu(self.winfo_toplevel(), tearoff=False, **get_style_configuration("Menu"))
menu.add_command(label=tr("Close"), command=self._close_tab_from_menu)
menu.add_command(label=tr("Close others"), command=self._close_other_tabs)
menu.add_command(label=tr("Close all"), command=self.close_tabs)
return menu
def _letf_btn_press(self, event):
try:
elem = self.identify(event.x, event.y)
index = self.index("@%d,%d" % (event.x, event.y))
if "closebutton" in elem:
self.state(["pressed"])
self.pressed_index = index
except Exception:
# may fail, if clicked outside of tab
return
def _left_btn_release(self, event):
if not self.instate(["pressed"]):
return
try:
elem = self.identify(event.x, event.y)
index = self.index("@%d,%d" % (event.x, event.y))
except Exception:
# may fail, when mouse is dragged
return
else:
if "closebutton" in elem and self.pressed_index == index:
self.close_tab(index)
self.state(["!pressed"])
finally:
self.pressed_index = None
def _right_btn_press(self, event):
try:
index = self.index("@%d,%d" % (event.x, event.y))
self._popup_index = index
self.tab_menu.tk_popup(*self.winfo_toplevel().winfo_pointerxy())
except Exception:
logger.exception("Opening tab menu")
def _close_tab_from_menu(self):
self.close_tab(self._popup_index)
def _close_other_tabs(self):
self.close_tabs(self._popup_index)
def close_tabs(self, except_index=None):
for tab_index in reversed(range(len(self.winfo_children()))):
if except_index is not None and tab_index == except_index:
continue
else:
self.close_tab(tab_index)
def close_tab(self, index):
child = self.get_child_by_index(index)
if hasattr(child, "close"):
child.close()
else:
self.forget(index)
child.destroy()
def get_child_by_index(self, index):
tab_id = self.tabs()[index]
if tab_id:
return self.nametowidget(tab_id)
else:
return None
def get_current_child(self):
child_id = self.select()
if child_id:
return self.nametowidget(child_id)
else:
return None
def focus_set(self):
editor = self.get_current_child()
if editor:
editor.focus_set()
else:
super().focus_set()
def _check_update_style(self):
style = ttk.Style()
if "closebutton" in style.element_names():
# It's done already
return
# respect if required images have been defined already
if "img_close" not in self.image_names():
img_dir = os.path.join(os.path.dirname(__file__), "res")
ClosableNotebook._close_img = tk.PhotoImage(
"img_tab_close", file=os.path.join(img_dir, "tab_close.gif")
)
ClosableNotebook._close_active_img = tk.PhotoImage(
"img_tab_close_active", file=os.path.join(img_dir, "tab_close_active.gif")
)
style.element_create(
"closebutton",
"image",
"img_tab_close",
("active", "pressed", "!disabled", "img_tab_close_active"),
("active", "!disabled", "img_tab_close_active"),
border=8,
sticky="",
)
style.layout(
"ButtonNotebook.TNotebook.Tab",
[
(
"Notebook.tab",
{
"sticky": "nswe",
"children": [
(
"Notebook.padding",
{
"side": "top",
"sticky": "nswe",
"children": [
(
"Notebook.focus",
{
"side": "top",
"sticky": "nswe",
"children": [
(
"Notebook.label",
{"side": "left", "sticky": ""},
),
(
"Notebook.closebutton",
{"side": "left", "sticky": ""},
),
],
},
)
],
},
)
],
},
)
],
)
def _check_remove_padding(self, kw):
# Windows themes produce 1-pixel padding to the bottom of the pane
# Don't know how to get rid of it using themes
if "padding" not in kw and ttk.Style().theme_use().lower() in (
"windows",
"xpnative",
"vista",
):
kw["padding"] = (0, 0, 0, -1)
def add(self, child, **kw):
self._check_remove_padding(kw)
super().add(child, **kw)
def insert(self, pos, child, **kw):
self._check_remove_padding(kw)
super().insert(pos, child, **kw)
class AutomaticNotebook(ClosableNotebook):
"""
Enables inserting views according to their position keys.
Remember its own position key. Automatically updates its visibility.
"""
def __init__(self, master, position_key, preferred_size_in_pw=None):
if get_workbench().in_simple_mode():
style = "TNotebook"
else:
style = "ButtonNotebook.TNotebook"
super().__init__(master, style=style, padding=0)
self.position_key = position_key
# should be in the end, so that it can be detected when
# constructor hasn't completed yet
self.preferred_size_in_pw = preferred_size_in_pw
def add(self, child, **kw):
super().add(child, **kw)
self._update_visibility()
def insert(self, pos, child, **kw):
if pos == "auto":
for sibling in map(self.nametowidget, self.tabs()):
if (
not hasattr(sibling, "position_key")
or sibling.position_key == None
or sibling.position_key > child.position_key
):
pos = sibling
break
else:
pos = "end"
super().insert(pos, child, **kw)
self._update_visibility()
def hide(self, tab_id):
super().hide(tab_id)
self._update_visibility()
def forget(self, tab_id):
if tab_id in self.tabs() or tab_id in self.winfo_children():
super().forget(tab_id)
self._update_visibility()
def is_visible(self):
return self in self.master.pane_widgets()
def get_visible_child(self):
for child in self.winfo_children():
if str(child) == str(self.select()):
return child
return None
def _update_visibility(self):
if not isinstance(self.master, AutomaticPanedWindow):
return
if len(self.tabs()) == 0 and self.is_visible():
self.master.remove(self)
if len(self.tabs()) > 0 and not self.is_visible():
self.master.insert("auto", self)
class TreeFrame(ttk.Frame):
def __init__(
self,
master,
columns,
displaycolumns="#all",
show_scrollbar=True,
show_statusbar=False,
borderwidth=0,
relief="flat",
**tree_kw,
):
ttk.Frame.__init__(self, master, borderwidth=borderwidth, relief=relief)
# http://wiki.tcl.tk/44444#pagetoc50f90d9a
self.vert_scrollbar = ttk.Scrollbar(
self, orient=tk.VERTICAL, style=scrollbar_style("Vertical")
)
if show_scrollbar:
self.vert_scrollbar.grid(
row=0, column=1, sticky=tk.NSEW, rowspan=2 if show_statusbar else 1
)
self.tree = ttk.Treeview(
self,
columns=columns,
displaycolumns=displaycolumns,
yscrollcommand=self.vert_scrollbar.set,
**tree_kw,
)
self.tree["show"] = "headings"
self.tree.grid(row=0, column=0, sticky=tk.NSEW)
self.vert_scrollbar["command"] = self.tree.yview
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.tree.bind("<<TreeviewSelect>>", self.on_select, "+")
self.tree.bind("<Double-Button-1>", self.on_double_click, "+")
self.error_label = ttk.Label(self.tree)
if show_statusbar:
self.statusbar = ttk.Frame(self)
self.statusbar.grid(row=1, column=0, sticky="nswe")
else:
self.statusbar = None
def _clear_tree(self):
for child_id in self.tree.get_children():
self.tree.delete(child_id)
def clear(self):
self._clear_tree()
def on_select(self, event):
pass
def on_double_click(self, event):
pass
def show_error(self, error_text):
self.error_label.configure(text=error_text)
self.error_label.grid()
def clear_error(self):
self.error_label.grid_remove()
def scrollbar_style(orientation):
# In mac ttk.Scrollbar uses native rendering unless style attribute is set
# see http://wiki.tcl.tk/44444#pagetoc50f90d9a
# Native rendering doesn't look good in dark themes
if running_on_mac_os() and get_workbench().uses_dark_ui_theme():
return orientation + ".TScrollbar"
else:
return None
def sequence_to_accelerator(sequence):
"""Translates Tk event sequence to customary shortcut string
for showing in the menu"""
if not sequence:
return ""
if not sequence.startswith("<"):
return sequence
accelerator = (
sequence.strip("<>").replace("Key-", "").replace("KeyPress-", "").replace("Control", "Ctrl")
)
# Tweaking individual parts
parts = accelerator.split("-")
# tkinter shows shift with capital letter, but in shortcuts it's customary to include it explicitly
if len(parts[-1]) == 1 and parts[-1].isupper() and not "Shift" in parts:
parts.insert(-1, "Shift")
# even when shift is not required, it's customary to show shortcut with capital letter
if len(parts[-1]) == 1:
parts[-1] = parts[-1].upper()
accelerator = "+".join(parts)
# Post processing
accelerator = (
accelerator.replace("Minus", "-")
.replace("minus", "-")
.replace("Plus", "+")
.replace("plus", "+")
)
return accelerator
def get_zoomed(toplevel):
if "-zoomed" in toplevel.wm_attributes(): # Linux
return bool(toplevel.wm_attributes("-zoomed"))
else: # Win/Mac
return toplevel.wm_state() == "zoomed"
def set_zoomed(toplevel, value):
if "-zoomed" in toplevel.wm_attributes(): # Linux
toplevel.wm_attributes("-zoomed", str(int(value)))
else: # Win/Mac
if value:
toplevel.wm_state("zoomed")
else:
toplevel.wm_state("normal")
class EnhancedTextWithLogging(tktextext.EnhancedText):
def __init__(self, master=None, style="Text", tag_current_line=False, cnf={}, **kw):
super().__init__(
master=master, style=style, tag_current_line=tag_current_line, cnf=cnf, **kw
)
self._last_event_changed_line_count = False
def direct_insert(self, index, chars, tags=None, **kw):
# try removing line numbers
# TODO: shouldn't it take place only on paste?
# TODO: does it occur when opening a file with line numbers in it?
# if self._propose_remove_line_numbers and isinstance(chars, str):
# chars = try_remove_linenumbers(chars, self)
concrete_index = self.index(index)
line_before = self.get(concrete_index + " linestart", concrete_index + " lineend")
self._last_event_changed_line_count = "\n" in chars
result = tktextext.EnhancedText.direct_insert(self, index, chars, tags=tags, **kw)
line_after = self.get(concrete_index + " linestart", concrete_index + " lineend")
trivial_for_coloring, trivial_for_parens = self._is_trivial_edit(
chars, line_before, line_after
)
get_workbench().event_generate(
"TextInsert",
index=concrete_index,
text=chars,
tags=tags,
text_widget=self,
trivial_for_coloring=trivial_for_coloring,
trivial_for_parens=trivial_for_parens,
)
return result
def direct_delete(self, index1, index2=None, **kw):
try:
# index1 may be eg "sel.first" and it doesn't make sense *after* deletion
concrete_index1 = self.index(index1)
if index2 is not None:
concrete_index2 = self.index(index2)
else:
concrete_index2 = None
chars = self.get(index1, index2)
self._last_event_changed_line_count = "\n" in chars
line_before = self.get(
concrete_index1 + " linestart",
(concrete_index1 if concrete_index2 is None else concrete_index2) + " lineend",
)
return tktextext.EnhancedText.direct_delete(self, index1, index2=index2, **kw)
finally:
line_after = self.get(
concrete_index1 + " linestart",
(concrete_index1 if concrete_index2 is None else concrete_index2) + " lineend",
)
trivial_for_coloring, trivial_for_parens = self._is_trivial_edit(
chars, line_before, line_after
)
get_workbench().event_generate(
"TextDelete",
index1=concrete_index1,
index2=concrete_index2,
text_widget=self,
trivial_for_coloring=trivial_for_coloring,
trivial_for_parens=trivial_for_parens,
)
def _is_trivial_edit(self, chars, line_before, line_after):
# line is taken after edit for insertion and before edit for deletion
if not chars.strip():
# linebreaks, including with automatic indent
# check it doesn't break a triple-quote
trivial_for_coloring = line_before.count("'''") == line_after.count(
"'''"
) and line_before.count('"""') == line_after.count('"""')
trivial_for_parens = trivial_for_coloring
elif len(chars) > 1:
# paste, cut, load or something like this
trivial_for_coloring = False
trivial_for_parens = False
elif chars == "#":
trivial_for_coloring = "''''" not in line_before and '"""' not in line_before
trivial_for_parens = trivial_for_coloring and not re.search(PARENS_REGEX, line_before)
elif chars in "()[]{}":
trivial_for_coloring = line_before.count("'''") == line_after.count(
"'''"
) and line_before.count('"""') == line_after.count('"""')
trivial_for_parens = False
elif chars == "'":
trivial_for_coloring = "'''" not in line_before and "'''" not in line_after
trivial_for_parens = False # can put parens into open string
elif chars == '"':
trivial_for_coloring = '"""' not in line_before and '"""' not in line_after
trivial_for_parens = False # can put parens into open string
elif chars == "\\":
# can shorten closing quote
trivial_for_coloring = '"""' not in line_before and '"""' not in line_after
trivial_for_parens = False
else:
trivial_for_coloring = line_before.count("'''") == line_after.count(
"'''"
) and line_before.count('"""') == line_after.count('"""')
trivial_for_parens = trivial_for_coloring
return trivial_for_coloring, trivial_for_parens
class SafeScrollbar(ttk.Scrollbar):
def __init__(self, master=None, **kw):
super().__init__(master=master, **kw)
def set(self, first, last):
try:
ttk.Scrollbar.set(self, first, last)
except Exception:
traceback.print_exc()
class AutoScrollbar(SafeScrollbar):
# http://effbot.org/zone/tkinter-autoscrollbar.htm
# a vert_scrollbar that hides itself if it's not needed. only
# works if you use the grid geometry manager.
def __init__(self, master=None, **kw):
super().__init__(master=master, **kw)
def set(self, first, last):
if float(first) <= 0.0 and float(last) >= 1.0:
self.grid_remove()
elif float(first) > 0.001 or float(last) < 0.999:
# with >0 and <1 it occasionally made scrollbar wobble back and forth
self.grid()
ttk.Scrollbar.set(self, first, last)
def pack(self, **kw):
raise tk.TclError("cannot use pack with this widget")
def place(self, **kw):
raise tk.TclError("cannot use place with this widget")
def update_entry_text(entry, text):
original_state = entry.cget("state")
entry.config(state="normal")
entry.delete(0, "end")
entry.insert(0, text)
entry.config(state=original_state)
class VerticallyScrollableFrame(ttk.Frame):
# http://tkinter.unpythonic.net/wiki/VerticalScrolledFrame
def __init__(self, master):
ttk.Frame.__init__(self, master)
# set up scrolling with canvas
vscrollbar = ttk.Scrollbar(self, orient=tk.VERTICAL)
self.canvas = tk.Canvas(self, bd=0, highlightthickness=0, yscrollcommand=vscrollbar.set)
vscrollbar.config(command=self.canvas.yview)
self.canvas.xview_moveto(0)
self.canvas.yview_moveto(0)
self.canvas.grid(row=0, column=0, sticky=tk.NSEW)
vscrollbar.grid(row=0, column=1, sticky=tk.NSEW)
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.interior = ttk.Frame(self.canvas)
self.interior_id = self.canvas.create_window(0, 0, window=self.interior, anchor=tk.NW)
self.bind("<Configure>", self._configure_interior, "+")
self.bind("<Expose>", self._expose, "+")
def _expose(self, event):
self.update_idletasks()
self.update_scrollbars()
def _configure_interior(self, event):
self.update_scrollbars()
def update_scrollbars(self):
# update the scrollbars to match the size of the inner frame
size = (self.canvas.winfo_width(), self.interior.winfo_reqheight())
self.canvas.config(scrollregion="0 0 %s %s" % size)
if (
self.interior.winfo_reqwidth() != self.canvas.winfo_width()
and self.canvas.winfo_width() > 10
):
# update the interior's width to fit canvas
# print("CAWI", self.canvas.winfo_width())
self.canvas.itemconfigure(self.interior_id, width=self.canvas.winfo_width())
class ScrollableFrame(ttk.Frame):
# http://tkinter.unpythonic.net/wiki/VerticalScrolledFrame
def __init__(self, master):
ttk.Frame.__init__(self, master)
# set up scrolling with canvas
vscrollbar = ttk.Scrollbar(self, orient=tk.VERTICAL)
hscrollbar = ttk.Scrollbar(self, orient=tk.HORIZONTAL)
self.canvas = tk.Canvas(self, bd=0, highlightthickness=0, yscrollcommand=vscrollbar.set)
vscrollbar.config(command=self.canvas.yview)
hscrollbar.config(command=self.canvas.xview)
self.canvas.xview_moveto(0)
self.canvas.yview_moveto(0)
self.canvas.grid(row=0, column=0, sticky=tk.NSEW)
vscrollbar.grid(row=0, column=1, sticky=tk.NSEW)
hscrollbar.grid(row=1, column=0, sticky=tk.NSEW)
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.interior = ttk.Frame(self.canvas)
self.interior.columnconfigure(0, weight=1)
self.interior.rowconfigure(0, weight=1)
self.interior_id = self.canvas.create_window(0, 0, window=self.interior, anchor=tk.NW)
self.bind("<Configure>", self._configure_interior, "+")
self.bind("<Expose>", self._expose, "+")
def _expose(self, event):
self.update_idletasks()
self._configure_interior(event)
def _configure_interior(self, event):
# update the scrollbars to match the size of the inner frame
size = (self.canvas.winfo_reqwidth(), self.interior.winfo_reqheight())
self.canvas.config(scrollregion="0 0 %s %s" % size)
class ThemedListbox(tk.Listbox):
def __init__(self, master=None, cnf={}, **kw):
super().__init__(master=master, cnf=cnf, **kw)
self._ui_theme_change_binding = self.bind(
"<<ThemeChanged>>", self._reload_theme_options, True
)
self._reload_theme_options()
def _reload_theme_options(self, event=None):
style = ttk.Style()
states = []
if self["state"] == "disabled":
states.append("disabled")
# Following crashes when a combobox is focused
# if self.focus_get() == self:
# states.append("focus")
opts = {}
for key in [
"background",
"foreground",
"highlightthickness",
"highlightcolor",
"highlightbackground",
]:
value = style.lookup(self.get_style_name(), key, states)
if value:
opts[key] = value
self.configure(opts)
def get_style_name(self):
return "Listbox"
def destroy(self):
self.unbind("<<ThemeChanged>>", self._ui_theme_change_binding)
super().destroy()
class ToolTip:
"""Taken from http://www.voidspace.org.uk/python/weblog/arch_d7_2006_07_01.shtml"""
def __init__(self, widget, options):
self.widget = widget
self.tipwindow = None
self.id = None
self.x = self.y = 0
self.options = options
def showtip(self, text):
"Display text in tooltip window"
self.text = text
if self.tipwindow or not self.text:
return
x, y, _, cy = self.widget.bbox("insert")
x = x + self.widget.winfo_rootx() + 27
y = y + cy + self.widget.winfo_rooty() + self.widget.winfo_height() + 2
self.tipwindow = tw = tk.Toplevel(self.widget)
if running_on_mac_os():
try:
# Must be the first thing to do after creating window
# https://wiki.tcl-lang.org/page/MacWindowStyle
tw.tk.call(
"::tk::unsupported::MacWindowStyle", "style", tw._w, "help", "noActivates"
)
if get_tk_version_info() >= (8, 6, 10) and running_on_mac_os():
tw.wm_overrideredirect(1)
except tk.TclError:
pass
else:
tw.wm_overrideredirect(1)
tw.wm_geometry("+%d+%d" % (x, y))
if running_on_mac_os():
# TODO: maybe it's because of Tk 8.5, not because of Mac
tw.wm_transient(self.widget)
label = tk.Label(tw, text=self.text, **self.options)
label.pack()
# get_workbench().bind("WindowFocusOut", self.hidetip, True)
def hidetip(self, event=None):
tw = self.tipwindow
self.tipwindow = None
if tw:
tw.destroy()
# get_workbench().unbind("WindowFocusOut", self.hidetip)
def create_tooltip(widget, text, **kw):
options = get_style_configuration("Tooltip").copy()
options.setdefault("background", "#ffffe0")
options.setdefault("foreground", "#000000")
options.setdefault("relief", "solid")
options.setdefault("borderwidth", 1)
options.setdefault("padx", 1)
options.setdefault("pady", 0)
options.update(kw)
toolTip = ToolTip(widget, options)
def enter(event):
toolTip.showtip(text)
def leave(event):
toolTip.hidetip()
widget.bind("<Enter>", enter)
widget.bind("<Leave>", leave)
class NoteBox(CommonDialog):
def __init__(self, master=None, max_default_width=300, **kw):
super().__init__(master=master, highlightthickness=0, **kw)
self._max_default_width = max_default_width
self.wm_overrideredirect(True)
if running_on_mac_os():
# TODO: maybe it's because of Tk 8.5, not because of Mac
self.wm_transient(master)
try:
# For Mac OS
self.tk.call(
"::tk::unsupported::MacWindowStyle", "style", self._w, "help", "noActivates"
)
except tk.TclError:
pass
self._current_chars = ""
self._click_bindings = {}
self.padx = 5
self.pady = 5
self.text = TweakableText(
self,
background="#ffffe0",
borderwidth=1,
relief="solid",
undo=False,
read_only=True,
font="TkDefaultFont",
highlightthickness=0,
padx=self.padx,
pady=self.pady,
wrap="word",
)
self.text.grid(row=0, column=0, sticky="nsew")
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.text.bind("<Escape>", self.close, True)
# tk._default_root.bind_all("<1>", self._close_maybe, True)
# tk._default_root.bind_all("<Key>", self.close, True)
self.withdraw()
def clear(self):
for tag in self._click_bindings:
self.text.tag_unbind(tag, "<1>", self._click_bindings[tag])
self.text.tag_remove(tag, "1.0", "end")
self.text.direct_delete("1.0", "end")
self._current_chars = ""
self._click_bindings.clear()
def set_content(self, *items):
self.clear()
for item in items:
if isinstance(item, str):
self.text.direct_insert("1.0", item)
self._current_chars = item
else:
assert isinstance(item, (list, tuple))
chars, *props = item
if len(props) > 0 and callable(props[-1]):
tags = tuple(props[:-1])
click_handler = props[-1]
else:
tags = tuple(props)
click_handler = None
self.append_text(chars, tags, click_handler)
self.text.see("1.0")
def append_text(self, chars, tags=(), click_handler=None):
tags = tuple(tags)
if click_handler is not None:
click_tag = "click_%d" % len(self._click_bindings)
tags = tags + (click_tag,)
binding = self.text.tag_bind(click_tag, "<1>", click_handler, True)
self._click_bindings[click_tag] = binding
self.text.direct_insert("end", chars, tags)
self._current_chars += chars
def place(self, target, focus=None):
# Compute the area that will be described by this Note
focus_x = target.winfo_rootx()
focus_y = target.winfo_rooty()
focus_height = target.winfo_height()
if isinstance(focus, TextRange):
assert isinstance(target, tk.Text)
topleft = target.bbox("%d.%d" % (focus.lineno, focus.col_offset))
if focus.end_col_offset == 0:
botright = target.bbox(
"%d.%d lineend" % (focus.end_lineno - 1, focus.end_lineno - 1)
)
else:
botright = target.bbox("%d.%d" % (focus.end_lineno, focus.end_col_offset))
if topleft and botright:
focus_x += topleft[0]
focus_y += topleft[1]
focus_height = botright[1] - topleft[1] + botright[3]
elif isinstance(focus, (list, tuple)):
focus_x += focus[0]
focus_y += focus[1]
focus_height = focus[3]
elif focus is None:
pass
else:
raise TypeError("Unsupported focus")
# Compute dimensions of the note
font = self.text["font"]
if isinstance(font, str):
font = tk.font.nametofont(font)
lines = self._current_chars.splitlines()
max_line_width = 0
for line in lines:
max_line_width = max(max_line_width, font.measure(line))
width = min(max_line_width, self._max_default_width) + self.padx * 2 + 2
self.wm_geometry("%dx%d+%d+%d" % (width, 100, focus_x, focus_y + focus_height))
self.update_idletasks()
line_count = int(float(self.text.index("end")))
line_height = font.metrics()["linespace"]
self.wm_geometry(
"%dx%d+%d+%d" % (width, line_count * line_height, focus_x, focus_y + focus_height)
)
# TODO: detect the situation when note doesn't fit under
# the focus box and should be placed above
self.deiconify()
def show_note(self, *content_items: Union[str, List], target=None, focus=None) -> None:
self.set_content(*content_items)
self.place(target, focus)
def _close_maybe(self, event):
if event.widget not in [self, self.text]:
self.close(event)
def close(self, event=None):
self.withdraw()
def get_widget_offset_from_toplevel(widget):
x = 0
y = 0
toplevel = widget.winfo_toplevel()
while widget != toplevel:
x += widget.winfo_x()
y += widget.winfo_y()
widget = widget.master
return x, y
class EnhancedVar(tk.Variable):
def __init__(self, master=None, value=None, name=None, modification_listener=None):
if master is not None and not isinstance(master, (tk.Widget, tk.Wm)):
raise TypeError("First positional argument 'master' must be None, Widget or Wm")
super().__init__(master=master, value=value, name=name)
self.modified = False
self.modification_listener = modification_listener
if sys.version_info < (3, 6):
self.trace("w", self._on_write)
else:
self.trace_add("write", self._on_write)
def _on_write(self, *args):
self.modified = True
if self.modification_listener:
try:
self.modification_listener()
except Exception:
# Otherwise whole process will be brought down
# because for some reason Tk tries to call non-existing method
# on variable
get_workbench().report_exception()
class EnhancedStringVar(EnhancedVar, tk.StringVar):
pass
class EnhancedIntVar(EnhancedVar, tk.IntVar):
pass
class EnhancedBooleanVar(EnhancedVar, tk.BooleanVar):
pass
class EnhancedDoubleVar(EnhancedVar, tk.DoubleVar):
pass
def create_string_var(value, modification_listener=None) -> EnhancedStringVar:
"""Creates a tk.StringVar with "modified" attribute
showing whether the variable has been modified after creation"""
return EnhancedStringVar(None, value, None, modification_listener)
def create_int_var(value, modification_listener=None) -> EnhancedIntVar:
"""See create_string_var"""
return EnhancedIntVar(None, value, None, modification_listener)
def create_double_var(value, modification_listener=None) -> EnhancedDoubleVar:
"""See create_string_var"""
return EnhancedDoubleVar(None, value, None, modification_listener)
def create_boolean_var(value, modification_listener=None) -> EnhancedBooleanVar:
"""See create_string_var"""
return EnhancedBooleanVar(None, value, None, modification_listener)
def shift_is_pressed(event: tk.Event) -> bool:
# https://tkdocs.com/shipman/event-handlers.html
# http://stackoverflow.com/q/32426250/261181
return event.state & 0x0001
def caps_lock_is_on(event: tk.Event) -> bool:
# https://tkdocs.com/shipman/event-handlers.html
# http://stackoverflow.com/q/32426250/261181
return event.state & 0x0002
def control_is_pressed(event: tk.Event) -> bool:
# https://tkdocs.com/shipman/event-handlers.html
# http://stackoverflow.com/q/32426250/261181
return event.state & 0x0004
def alt_is_pressed_without_char(event: tk.Event) -> bool:
# https://tkdocs.com/shipman/event-handlers.html
# http://stackoverflow.com/q/32426250/261181
# https://bugs.python.org/msg268429
if event.char:
return False
if running_on_windows():
return event.state & 0x20000
elif running_on_mac_os():
# combinations always produce a char or are consumed by the OS
return False
else:
return event.state & 0x0010
def command_is_pressed(event: tk.Event) -> bool:
# https://tkdocs.com/shipman/event-handlers.html
# http://stackoverflow.com/q/32426250/261181
if not running_on_mac_os():
return False
return event.state & 0x0008
def get_hyperlink_cursor() -> str:
if running_on_mac_os():
return "pointinghand"
else:
return "hand2"
def get_beam_cursor() -> str:
if running_on_mac_os() or running_on_windows():
return "ibeam"
else:
return "xterm"
def sequence_to_event_state_and_keycode(sequence: str) -> Optional[Tuple[int, int]]:
# remember handlers for certain shortcuts which require
# different treatment on non-latin keyboards
if sequence[0] != "<":
return None
parts = sequence.strip("<").strip(">").split("-")
# support only latin letters for now
if parts[-1].lower() not in list("abcdefghijklmnopqrstuvwxyz"):
return None
letter = parts.pop(-1)
if "Key" in parts:
parts.remove("Key")
if "key" in parts:
parts.remove("key")
modifiers = {part.lower() for part in parts}
if letter.isupper():
modifiers.add("shift")
if modifiers not in [{"control"}, {"control", "shift"}]:
# don't support others for now
return None
event_state = 0
# https://tkdocs.com/shipman/event-handlers.html
# https://stackoverflow.com/questions/32426250/python-documentation-and-or-lack-thereof-e-g-keyboard-event-state
for modifier in modifiers:
if modifier == "shift":
event_state |= 0x0001
elif modifier == "control":
event_state |= 0x0004
else:
# unsupported modifier
return None
# for latin letters keycode is same as its ascii code
return (event_state, ord(letter.upper()))
def select_sequence(win_version, mac_version, linux_version=None):
if running_on_windows():
return win_version
elif running_on_mac_os():
return mac_version
elif running_on_linux() and linux_version:
return linux_version
else:
return win_version
def try_remove_linenumbers(text, master):
try:
if has_line_numbers(text) and messagebox.askyesno(
title="Remove linenumbers",
message="Do you want to remove linenumbers from pasted text?",
default=messagebox.YES,
master=master,
):
return remove_line_numbers(text)
else:
return text
except Exception:
traceback.print_exc()
return text
def has_line_numbers(text):
lines = text.splitlines()
return len(lines) > 2 and all([len(split_after_line_number(line)) == 2 for line in lines])
def split_after_line_number(s):
parts = re.split(r"(^\s*\d+\.?)", s)
if len(parts) == 1:
return parts
else:
assert len(parts) == 3 and parts[0] == ""
return parts[1:]
def remove_line_numbers(s):
cleaned_lines = []
for line in s.splitlines():
parts = split_after_line_number(line)
if len(parts) != 2:
return s
else:
cleaned_lines.append(parts[1])
return textwrap.dedent(("\n".join(cleaned_lines)) + "\n")
# Place a toplevel window at the center of parent or screen
# It is a Python implementation of ::tk::PlaceWindow.
# Copied from tkinter.simpledialog of Python 3.10.2
def _place_window(w, parent=None):
w.wm_withdraw() # Remain invisible while we figure out the geometry
w.update_idletasks() # Actualize geometry information
minwidth = w.winfo_reqwidth()
minheight = w.winfo_reqheight()
maxwidth = w.winfo_vrootwidth()
maxheight = w.winfo_vrootheight()
if parent is not None and parent.winfo_ismapped():
x = parent.winfo_rootx() + (parent.winfo_width() - minwidth) // 2
y = parent.winfo_rooty() + (parent.winfo_height() - minheight) // 2
vrootx = w.winfo_vrootx()
vrooty = w.winfo_vrooty()
x = min(x, vrootx + maxwidth - minwidth)
x = max(x, vrootx)
y = min(y, vrooty + maxheight - minheight)
y = max(y, vrooty)
if w._windowingsystem == "aqua":
# Avoid the native menu bar which sits on top of everything.
y = max(y, 22)
else:
x = (w.winfo_screenwidth() - minwidth) // 2
y = (w.winfo_screenheight() - minheight) // 2
w.wm_maxsize(maxwidth, maxheight)
w.wm_geometry("+%d+%d" % (x, y))
w.wm_deiconify() # Become visible at the desired location
class WaitingDialog(CommonDialog):
def __init__(self, master, async_result, description, title="Please wait!", timeout=None):
self._async_result = async_result
super().__init__(master)
if misc_utils.running_on_mac_os():
self.configure(background="systemSheetBackground")
self.title(title)
self.resizable(height=tk.FALSE, width=tk.FALSE)
# self.protocol("WM_DELETE_WINDOW", self._close)
self.desc_label = ttk.Label(self, text=description, wraplength=300)
self.desc_label.grid(padx=20, pady=20)
self.update_idletasks()
self.timeout = timeout
self.start_time = time.time()
self.after(500, self._poll)
def _poll(self):
if self._async_result.ready():
self._close()
elif self.timeout and time.time() - self.start_time > self.timeout:
raise TimeoutError()
else:
self.after(500, self._poll)
self.desc_label["text"] = self.desc_label["text"] + "."
def _close(self):
self.destroy()
def run_with_waiting_dialog(master, action, args=(), description="Working"):
# http://stackoverflow.com/a/14299004/261181
from multiprocessing.pool import ThreadPool
pool = ThreadPool(processes=1)
async_result = pool.apply_async(action, args)
dlg = WaitingDialog(master, async_result, description=description)
show_dialog(dlg, master)
return async_result.get()
class FileCopyDialog(CommonDialog):
def __init__(self, master, source, destination, description=None, fsync=True):
self._source = source
self._destination = destination
self._old_bytes_copied = 0
self._bytes_copied = 0
self._fsync = fsync
self._done = False
self._cancelled = False
self._closed = False
super().__init__(master)
main_frame = ttk.Frame(self) # To get styled background
main_frame.grid(row=0, column=0, sticky="nsew")
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
self.title(tr("Copying"))
if description is None:
description = tr("Copying\n %s\nto\n %s") % (source, destination)
label = ttk.Label(main_frame, text=description)
label.grid(row=0, column=0, columnspan=2, sticky="nw", padx=15, pady=15)
self._bar = ttk.Progressbar(main_frame, maximum=os.path.getsize(source), length=200)
self._bar.grid(row=1, column=0, columnspan=2, sticky="nsew", padx=15, pady=0)
self._cancel_button = ttk.Button(main_frame, text=tr("Cancel"), command=self._cancel)
self._cancel_button.grid(row=2, column=1, sticky="ne", padx=15, pady=15)
self._bar.focus_set()
main_frame.columnconfigure(0, weight=1)
self._update_progress()
self.bind("<Escape>", self._cancel, True) # escape-close only if process has completed
self.protocol("WM_DELETE_WINDOW", self._cancel)
self._start()
def _start(self):
def work():
self._copy_progess = 0
with open(self._source, "rb") as fsrc:
with open(self._destination, "wb") as fdst:
while True:
buf = fsrc.read(16 * 1024)
if not buf:
break
fdst.write(buf)
fdst.flush()
if self._fsync:
os.fsync(fdst)
self._bytes_copied += len(buf)
self._done = True
threading.Thread(target=work, daemon=True).start()
def _update_progress(self):
if self._done:
if not self._closed:
self._close()
return
self._bar.step(self._bytes_copied - self._old_bytes_copied)
self._old_bytes_copied = self._bytes_copied
self.after(100, self._update_progress)
def _close(self):
self.destroy()
self._closed = True
def _cancel(self, event=None):
self._cancelled = True
self._close()
class ChoiceDialog(CommonDialogEx):
def __init__(
self,
master=None,
title="Choose one",
question: str = "Choose one:",
choices=[],
initial_choice_index=None,
) -> None:
super().__init__(master=master)
self.title(title)
self.resizable(False, False)
self.main_frame.columnconfigure(0, weight=1)
row = 0
question_label = ttk.Label(self.main_frame, text=question)
question_label.grid(row=row, column=0, columnspan=2, sticky="w", padx=20, pady=20)
row += 1
self.var = tk.StringVar(value="")
if initial_choice_index is not None:
self.var.set(choices[initial_choice_index])
for choice in choices:
rb = ttk.Radiobutton(self.main_frame, text=choice, variable=self.var, value=choice)
rb.grid(row=row, column=0, columnspan=2, sticky="w", padx=20)
row += 1
ok_button = ttk.Button(self.main_frame, text=tr("OK"), command=self._ok, default="active")
ok_button.grid(row=row, column=0, sticky="e", pady=20)
cancel_button = ttk.Button(self.main_frame, text=tr("Cancel"), command=self._cancel)
cancel_button.grid(row=row, column=1, sticky="e", padx=20, pady=20)
self.bind("<Escape>", self._cancel, True)
self.bind("<Return>", self._ok, True)
self.protocol("WM_DELETE_WINDOW", self._cancel)
def _ok(self):
self.result = self.var.get()
if not self.result:
self.result = None
self.destroy()
def _cancel(self):
self.result = None
self.destroy()
class LongTextDialog(CommonDialog):
def __init__(self, title, text_content, parent=None):
if parent is None:
parent = tk._default_root
super().__init__(master=parent)
self.title(title)
main_frame = ttk.Frame(self)
main_frame.grid(row=0, column=0, sticky="nsew")
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
default_font = tk.font.nametofont("TkDefaultFont")
self._text = tktextext.TextFrame(
main_frame,
read_only=True,
wrap="none",
font=default_font,
width=80,
height=10,
relief="sunken",
borderwidth=1,
)
self._text.grid(row=1, column=0, columnspan=2, sticky="nsew", padx=20, pady=20)
self._text.text.direct_insert("1.0", text_content)
self._text.text.see("1.0")
copy_button = ttk.Button(
main_frame, command=self._copy, text=tr("Copy to clipboard"), width=20
)
copy_button.grid(row=2, column=0, sticky="w", padx=20, pady=(0, 20))
close_button = ttk.Button(
main_frame, command=self._close, text=tr("Close"), default="active"
)
close_button.grid(row=2, column=1, sticky="w", padx=20, pady=(0, 20))
close_button.focus_set()
main_frame.columnconfigure(0, weight=1)
main_frame.rowconfigure(1, weight=1)
self.protocol("WM_DELETE_WINDOW", self._close)
self.bind("<Escape>", self._close, True)
def _copy(self, event=None):
self.clipboard_clear()
self.clipboard_append(self._text.text.get("1.0", "end"))
def _close(self, event=None):
self.destroy()
def ask_one_from_choices(
master=None,
title="Choose one",
question: str = "Choose one:",
choices=[],
initial_choice_index=None,
):
dlg = ChoiceDialog(master, title, question, choices, initial_choice_index)
show_dialog(dlg, master)
return dlg.result
def get_busy_cursor():
if running_on_windows():
return "wait"
elif running_on_mac_os():
return "spinning"
else:
return "watch"
def get_tk_version_str():
return tk._default_root.tk.call("info", "patchlevel")
def get_tk_version_info():
result = []
for part in get_tk_version_str().split("."):
try:
result.append(int(part))
except Exception:
result.append(0)
return tuple(result)
def get_style_configuration(style_name, default={}):
style = ttk.Style()
# NB! style.configure seems to reuse the returned dict
# Don't change it without copying first
result = style.configure(style_name)
if result is None:
return default
else:
return result
def lookup_style_option(style_name, option_name, default=None):
style = ttk.Style()
setting = style.lookup(style_name, option_name)
if setting in [None, ""]:
return default
elif setting == "True":
return True
elif setting == "False":
return False
else:
return setting
def scale(value):
return get_workbench().scale(value)
def open_path_in_system_file_manager(path):
if running_on_mac_os():
# http://stackoverflow.com/a/3520693/261181
# -R doesn't allow showing hidden folders
subprocess.Popen(["open", path])
elif running_on_linux():
subprocess.Popen(["xdg-open", path])
else:
assert running_on_windows()
subprocess.Popen(["explorer", path])
def _get_dialog_provider():
if platform.system() != "Linux" or get_workbench().get_option("file.avoid_zenity"):
return filedialog
import shutil
if shutil.which("zenity"):
return _ZenityDialogProvider
# fallback
return filedialog
def asksaveasfilename(**options):
# https://tcl.tk/man/tcl8.6/TkCmd/getOpenFile.htm
_check_dialog_parent(options)
return _get_dialog_provider().asksaveasfilename(**options)
def askopenfilename(**options):
# https://tcl.tk/man/tcl8.6/TkCmd/getOpenFile.htm
_check_dialog_parent(options)
return _get_dialog_provider().askopenfilename(**options)
def askopenfilenames(**options):
# https://tcl.tk/man/tcl8.6/TkCmd/getOpenFile.htm
_check_dialog_parent(options)
return _get_dialog_provider().askopenfilenames(**options)
def askdirectory(**options):
# https://tcl.tk/man/tcl8.6/TkCmd/chooseDirectory.htm
_check_dialog_parent(options)
return _get_dialog_provider().askdirectory(**options)
def _check_dialog_parent(options):
if options.get("parent") and options.get("master"):
parent = options["parent"].winfo_toplevel()
master = options["master"].winfo_toplevel()
if parent is not master:
logger.warning(
"Dialog with different parent/master toplevels:\n%s",
"".join(traceback.format_stack()),
)
elif options.get("parent"):
parent = options["parent"].winfo_toplevel()
master = options["parent"].winfo_toplevel()
elif options.get("master"):
parent = options["master"].winfo_toplevel()
master = options["master"].winfo_toplevel()
else:
logger.warning("Dialog without parent:\n%s", "".join(traceback.format_stack()))
parent = tk._default_root
master = tk._default_root
options["parent"] = parent
options["master"] = master
if running_on_mac_os():
# used to require master/parent (https://bugs.python.org/issue34927)
# but this is deprecated in Catalina (https://github.com/thonny/thonny/issues/840)
# TODO: Consider removing this when upgrading from Tk 8.6.8
del options["master"]
del options["parent"]
class _ZenityDialogProvider:
# https://www.writebash.com/bash-gui/zenity-create-file-selection-dialog-224.html
# http://linux.byexamples.com/archives/259/a-complete-zenity-dialog-examples-1/
# http://linux.byexamples.com/archives/265/a-complete-zenity-dialog-examples-2/
# another possibility is to use PyGobject: https://github.com/poulp/zenipy
@classmethod
def askopenfilename(cls, **options):
args = cls._convert_common_options("Open file", **options)
return cls._call(args)
@classmethod
def askopenfilenames(cls, **options):
args = cls._convert_common_options("Open files", **options)
return cls._call(args + ["--multiple"]).split("|")
@classmethod
def asksaveasfilename(cls, **options):
args = cls._convert_common_options("Save as", **options)
args.append("--save")
if options.get("confirmoverwrite", True):
args.append("--confirm-overwrite")
filename = cls._call(args)
if not filename:
return None
return filename
@classmethod
def askdirectory(cls, **options):
args = cls._convert_common_options("Select directory", **options)
args.append("--directory")
return cls._call(args)
@classmethod
def _convert_common_options(cls, default_title, **options):
args = ["--file-selection", "--title=%s" % options.get("title", default_title)]
filename = _options_to_zenity_filename(options)
if filename:
args.append("--filename=%s" % filename)
parent = options.get("parent", options.get("master", None))
if parent is not None:
args.append("--modal")
args.append("--attach=%s" % hex(parent.winfo_id()))
for desc, pattern in options.get("filetypes", ()):
# zenity requires star before extension
pattern = pattern.replace(" .", " *.")
if pattern.startswith("."):
pattern = "*" + pattern
if pattern == "*.*":
# ".*" was provided to make the pattern safe for Tk dialog
# not required with Zenity
pattern = "*"
args.append("--file-filter=%s | %s" % (desc, pattern))
return args
@classmethod
def _call(cls, args):
args = ["zenity", "--name=Thonny", "--class=Thonny"] + args
result = subprocess.run(
args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
)
if result.returncode == 0:
return result.stdout.strip()
else:
# TODO: log problems
print(result.stderr, file=sys.stderr)
# could check stderr, but it may contain irrelevant warnings
return None
def _options_to_zenity_filename(options):
if options.get("initialdir"):
if options.get("initialfile"):
return os.path.join(options["initialdir"], options["initialfile"])
else:
return options["initialdir"] + os.path.sep
return None
def register_latin_shortcut(
registry, sequence: str, handler: Callable, tester: Optional[Callable]
) -> None:
res = sequence_to_event_state_and_keycode(sequence)
if res is not None:
if res not in registry:
registry[res] = []
registry[res].append((handler, tester))
def handle_mistreated_latin_shortcuts(registry, event):
# tries to handle Ctrl+LatinLetter shortcuts
# given from non-Latin keyboards
# See: https://bitbucket.org/plas/thonny/issues/422/edit-keyboard-shortcuts-ctrl-c-ctrl-v-etc
# only consider events with Control held
if not event.state & 0x04:
return
if running_on_mac_os():
return
# consider only part of the state,
# because at least on Windows, Ctrl-shortcuts' state
# has something extra
simplified_state = 0x04
if shift_is_pressed(event):
simplified_state |= 0x01
# print(simplified_state, event.keycode)
if (simplified_state, event.keycode) in registry:
if event.keycode != ord(event.char) and event.keysym in (None, "??"):
# keycode and char doesn't match,
# this means non-latin keyboard
for handler, tester in registry[(simplified_state, event.keycode)]:
if tester is None or tester():
handler()
def show_dialog(dlg, master=None, geometry=None):
if getattr(dlg, "closed", False):
return
if master is None:
master = getattr(dlg, "parent", None) or getattr(dlg, "master", None) or tk._default_root
master = master.winfo_toplevel()
get_workbench().event_generate("WindowFocusOut")
# following order seems to give most smooth appearance
old_focused_widget = master.focus_get()
if master.winfo_toplevel().winfo_viewable():
dlg.transient(master.winfo_toplevel())
if isinstance(geometry, str):
dlg.geometry(geometry)
dlg.wm_deiconify()
else:
saved_size = get_workbench().get_option(get_size_option_name(dlg))
if saved_size:
width = min(max(saved_size[0], ems_to_pixels(10)), ems_to_pixels(1000))
height = min(max(saved_size[0], ems_to_pixels(8)), ems_to_pixels(800))
left = master.winfo_rootx() + master.winfo_width() // 2 - width // 2
top = master.winfo_rooty() + master.winfo_height() // 2 - height // 2
dlg.geometry("%dx%d+%d+%d" % (width, height, left, top))
dlg.wm_deiconify()
else:
_place_window(dlg, master)
dlg.lift()
dlg.wait_visibility()
try:
dlg.grab_set()
except TclError as e:
logger.warning("Can't grab: %s", e)
dlg.update_idletasks()
dlg.focus_set()
if hasattr(dlg, "set_initial_focus"):
dlg.set_initial_focus()
dlg.wait_window(dlg)
dlg.grab_release()
master.winfo_toplevel().lift()
master.winfo_toplevel().focus_force()
master.winfo_toplevel().grab_set()
if running_on_mac_os():
master.winfo_toplevel().grab_release()
if old_focused_widget is not None:
try:
old_focused_widget.focus_force()
except TclError:
pass
def popen_with_ui_thread_callback(*Popen_args, on_completion, poll_delay=0.1, **Popen_kwargs):
if "encoding" not in Popen_kwargs:
if "env" not in Popen_kwargs:
Popen_kwargs["env"] = os.environ.copy()
Popen_kwargs["env"]["PYTHONIOENCODING"] = "utf-8"
if sys.version_info >= (3, 6):
Popen_kwargs["encoding"] = "utf-8"
proc = subprocess.Popen(*Popen_args, **Popen_kwargs)
# Need to read in thread in order to avoid blocking because
# of full pipe buffer (see https://bugs.python.org/issue1256)
out_lines = []
err_lines = []
def read_stream(stream, target_list):
while True:
line = stream.readline()
if line:
target_list.append(line)
else:
break
t_out = threading.Thread(target=read_stream, daemon=True, args=(proc.stdout, out_lines))
t_err = threading.Thread(target=read_stream, daemon=True, args=(proc.stderr, err_lines))
t_out.start()
t_err.start()
def poll():
if proc.poll() is not None:
t_out.join(3)
t_err.join(3)
on_completion(proc, out_lines, err_lines)
return
tk._default_root.after(int(poll_delay * 1000), poll)
poll()
return proc
class MenuEx(tk.Menu):
def __init__(self, target):
self._testers = {}
super().__init__(
target, tearoff=False, postcommand=self.on_post, **get_style_configuration("Menu")
)
def on_post(self, *args):
self.update_item_availability()
def update_item_availability(self):
for i in range(self.index("end") + 1):
item_data = self.entryconfigure(i)
if "label" in item_data:
tester = self._testers.get(item_data["label"])
if tester and not tester():
self.entryconfigure(i, state=tk.DISABLED)
else:
self.entryconfigure(i, state=tk.NORMAL)
def add(self, itemType, cnf={}, **kw):
cnf = cnf or kw
tester = cnf.get("tester")
if "tester" in cnf:
del cnf["tester"]
super().add(itemType, cnf)
itemdata = self.entryconfigure(self.index("end"))
labeldata = itemdata.get("label")
if labeldata:
self._testers[labeldata] = tester
class TextMenu(MenuEx):
def __init__(self, target):
self.text = target
MenuEx.__init__(self, target)
self.add_basic_items()
self.add_extra_items()
def add_basic_items(self):
self.add_command(label=tr("Cut"), command=self.on_cut, tester=self.can_cut)
self.add_command(label=tr("Copy"), command=self.on_copy, tester=self.can_copy)
self.add_command(label=tr("Paste"), command=self.on_paste, tester=self.can_paste)
def add_extra_items(self):
self.add_separator()
self.add_command(label=tr("Select All"), command=self.on_select_all)
def on_cut(self):
self.text.event_generate("<<Cut>>")
def on_copy(self):
self.text.event_generate("<<Copy>>")
def on_paste(self):
self.text.event_generate("<<Paste>>")
def on_select_all(self):
self.text.event_generate("<<SelectAll>>")
def can_cut(self):
return self.get_selected_text() and not self.selection_is_read_only()
def can_copy(self):
return self.get_selected_text()
def can_paste(self):
return not self.selection_is_read_only()
def get_selected_text(self):
try:
return self.text.get("sel.first", "sel.last")
except TclError:
return ""
def selection_is_read_only(self):
if hasattr(self.text, "is_read_only"):
return self.text.is_read_only()
return False
def create_url_label(master, url, text=None, **kw):
import webbrowser
return create_action_label(master, text or url, lambda _: webbrowser.open(url), **kw)
def create_action_label(master, text, click_handler, **kw):
url_font = tkinter.font.nametofont("TkDefaultFont").copy()
url_font.configure(underline=1)
url_label = ttk.Label(
master, text=text, style="Url.TLabel", cursor=get_hyperlink_cursor(), font=url_font, **kw
)
url_label.bind("<Button-1>", click_handler)
return url_label
def get_size_option_name(window):
return "layout." + type(window).__name__ + "_size"
def get_default_basic_theme():
if running_on_windows():
return "vista"
else:
return "clam"
EM_WIDTH = None
def ems_to_pixels(x: float) -> int:
global EM_WIDTH
if EM_WIDTH is None:
EM_WIDTH = tkinter.font.nametofont("TkDefaultFont").measure("m")
return int(EM_WIDTH * x)
_btn_padding = None
def set_text_if_different(widget, text) -> bool:
if widget["text"] != text:
widget["text"] = text
return True
else:
return False
def tr_btn(s):
"""Translates button caption, adds padding to make sure text fits"""
global _btn_padding
if _btn_padding is None:
_btn_padding = get_button_padding()
return _btn_padding + tr(s) + _btn_padding
def add_messagebox_parent_checker():
def wrap_with_parent_checker(original):
def wrapper(*args, **options):
_check_dialog_parent(options)
return original(*args, **options)
return wrapper
from tkinter import messagebox
for name in [
"showinfo",
"showwarning",
"showerror",
"askquestion",
"askokcancel",
"askyesno",
"askyesnocancel",
"askretrycancel",
]:
fun = getattr(messagebox, name)
setattr(messagebox, name, wrap_with_parent_checker(fun))
def windows_known_extensions_are_hidden() -> bool:
assert running_on_windows()
import winreg
reg_key = winreg.OpenKey(
winreg.HKEY_CURRENT_USER,
r"SOFTWARE\Microsoft\Windows\CurrentVersion\Explorer\Advanced",
0,
winreg.KEY_READ,
)
try:
return winreg.QueryValueEx(reg_key, "HideFileExt")[0] == 1
finally:
reg_key.Close()
if __name__ == "__main__":
print(windows_known_extensions_are_hidden())
|
parmap_queue.py | #coding=utf-8
from multiprocessing import Queue, Process, cpu_count
def fib(n):
if n<= 2:
return 1
return fib(n-1) + fib(n-2)
def apply_func(f, q_in, q_out):
while not q_in.empty():
i, item = q_in.get()
q_out.put((i, f(item)))
def parmap(f, items, nprocs=cpu_count()):
q_in, q_out = Queue(), Queue()
proc = [Process(target=apply_func, args=(f, q_in, q_out))
for _ in range(nprocs)]
sent = [q_in.put((i, item)) for i, item in enumerate(items)]
[p.start() for p in proc]
res = [q_out.get() for _ in sent]
[p.join() for p in proc]
return [item for _, item in sorted(res)]
class CalculateFib(object):
@classmethod
def fib(cls, n):
if n<= 2:
return 1
return cls.fib(n-1) + cls.fib(n-2)
def map_run(self):
pool = Pool(2)
print pool.map(self.fib, [35] * 2)
def parmap_run(self):
print parmap(self.fib, [35] * 2, nprocs=2)
cl = CalculateFib()
cl.parmap_run()
|
launch.py | import threading
import time
import enquiries
import os
import sys
import re
import math
import multiprocessing
from src.components.server.server import Server
from src.components.client.client import Client
from src.core.utils.configuration import Configuration
from src.protocol.base import Message
from src.protocol.client.write.text_message import TextMessage
from src.protocol.client.write.initial import InitMessage
processes = {}
def clear():
os.system("clear")
def print_banner():
columns, rows = os.get_terminal_size(0)
print("*" * columns)
print("__ _____ _____ ___ _ _ ".center(columns, " "))
print("\ \ / / _ \/ __ \ / _ \ | | | | ".center(columns, " "))
print(" \ V / /_\ \ / \// /_\ \ | | __ _ _ _ _ __ ___| |__ ___ _ __ ".center(columns, " "))
print(" \ /| _ | | | _ | | | / _` | | | | '_ \ / __| '_ \ / _ \ '__|".center(columns, " "))
print(" | || | | | \__/\| | | | | |___| (_| | |_| | | | | (__| | | | __/ | ".center(columns, " "))
print(" \_/\_| |_/\____/\_| |_/ \_____/\__,_|\__,_|_| |_|\___|_| |_|\___|_| ".center(columns, " "))
print()
print("*" * columns)
def print_menu_banner(menu_text):
clear()
columns, rows = os.get_terminal_size(0)
print_banner()
print(menu_text.center(columns))
print("*" * columns)
def main_menu():
exiting = False
while not exiting:
print_menu_banner("Main Menu")
options = [
"1. Spawn Processes",
"2. Kill Processes",
"3. Run Tests",
"4. Exit (Ctrl+C)",
]
choice = enquiries.choose("Choose one of these options: ", options)
if choice == options[0]:
spawn_processes_menu()
elif choice == options[1]:
kill_processes_menu()
elif choice == options[3]:
return
def kill_processes_menu():
exiting = False
while not exiting:
print_menu_banner("Kill Processes")
options = []
counter = 1
for component in processes:
options.append(str(counter) + ". Kill " + component)
counter += 1
options.append(str(counter) + ". Return to Main Menu")
choice = enquiries.choose("Choose one of these options: ", options)
if choice == options[-1]:
return
counter = 1
for component in processes:
if choice == str(counter) + ". Kill " + component:
kill_processes_component_menu(component)
counter += 1
def kill_processes_component_menu(component):
print_menu_banner(
"Kill Processes (Selection: "
+ component
+ ") - Suggestion: Upper limit for Byzantine faults: "
+ str(math.ceil(len(processes[component]) / 4) - 1)
)
options = []
counter = 1
for p in processes[component]:
options.append(str(counter) + str(p))
counter += 1
options.append(str(counter) + ". Return")
choice = enquiries.choose("Choose multiple of these options (space to select): ", options, multi=True)
if options[-1] in choice:
return
terminating_procs = []
counter = 1
for p in processes[component]:
if str(counter) + str(p) in choice:
p.terminate()
terminating_procs.append(p)
counter += 1
while len(terminating_procs) > 0:
for p in terminating_procs:
if not p.is_alive():
p.join()
terminating_procs.remove(p)
processes[component].remove(p)
def spawn_processes_menu():
while True:
print_menu_banner("Spawn Processes")
options = [
"1. Launch Server",
"2. Launch Client",
"3. Return to Main Menu",
]
choice = enquiries.choose("Choose one of these options: ", options)
if choice == options[-1]:
return False
print("Number of Instances: ", end="")
instances = int(input())
if choice == options[0]:
for i in range(instances):
p = multiprocessing.Process(target=launch_server)
p.start()
processes["server"].append(p)
elif choice == options[1]:
for _ in range(instances):
p = multiprocessing.Process(target=launch_client)
p.start()
processes["client"].append(p)
def launch_server(initial=False, i=0):
sys.stdout = open("logs/server-" + str(os.getpid()) + ".out", "a", buffering=1)
sys.stderr = open("logs/server-" + str(os.getpid()) + ".out", "a", buffering=1)
server = Server(initial, i, verbose=True)
server.start()
def inc_string(string):
if string == "z" * len(string):
return "a" * (len(string) + 1)
new_string = ""
for i in range(len(string) - 1, -1, -1):
if string[i] == "z":
new_string = "a" + new_string
else:
new_string = string[:i] + chr(ord(string[i]) + 1) + new_string
return new_string
def max_string(val1 : str, val2 : str):
if len(val1) != len(val2):
return val1 if len(val1) > len(val2) else val2
for i in range(len(val1) - 1, -1, -1):
if val1[i] != val2[i]:
return val1 if val1[i] > val2[i] else val2
return val1 # val1 = val2
def launch_client():
sys.stdout = open("logs/client-" + str(os.getpid()) + ".out", "a", buffering=1)
sys.stderr = open("logs/client-" + str(os.getpid()) + ".out", "a", buffering=1)
client = Client()
client.start()
channel = client._delivery_channel
old_ts = 0
last_seen_text = ""
text = "a"
def timeout_handler():
channel.produce(None)
while True:
ts = time.time_ns() / 10**9
if ts - old_ts > 5:
old_ts = ts
client.send("(Reply to {}) {}".format(last_seen_text, text) if last_seen_text != "" else text)
text = inc_string(text)
timer = threading.Timer(5, timeout_handler)
timer.start()
data = channel.consume()
timer.cancel()
if data is not None:
msg = Message.initFromJSON(data)
msg.decode()
if msg.header == "Write: Initial":
msg = InitMessage.initFromJSON(data)
msg.decode()
print("{} joined the chat".format(msg.identifier))
if msg.header == "Write: TextMessage":
msg = TextMessage.initFromJSON(data)
msg.decode()
sender_id = msg.get_signature()[0]
print("{}: {}".format(sender_id, msg.text))
if sender_id != client._identifier:
last_seen_text = '{}: "{}"'.format(sender_id, msg.text.split(" ")[-1] if " " in msg.text else msg.text)
text = max_string(text, msg.text.split(" ")[-1] if " " in msg.text else msg.text)
def main():
if "-c" in sys.argv:
for f in os.listdir("logs/"):
os.remove(os.path.join("logs/", f))
processes["server"] = []
processes["client"] = []
if len(sys.argv) > 1 and "-i" in sys.argv[1]:
config = Configuration()
for i in range(config.data["initial"]["instances"]):
p = multiprocessing.Process(
target=launch_server,
args=(
True,
i,
),
)
p.start()
processes["server"].append(p)
main_menu()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("exiting...")
finally:
for component in processes:
for p in processes[component]:
p.terminate()
print("Waiting for processes to terminate...")
while sum([len(processes[component]) for component in processes]) > 0:
for component in processes:
for p in processes[component]:
if not p.is_alive():
p.join()
processes[component].remove(p)
|
vm_service_test.py | # TODO: Unused Code
# Might use it later for VM case
"""
actions_lock = threading.Lock()
NodeSMPCAction = namedtuple("NodeSMPCAction", ["node_lock", "smpc_actions"])
actions_to_run_per_node: Dict[Any, NodeSMPCAction] = defaultdict(
lambda: NodeSMPCAction(threading.Lock(), deque())
)
def consume_smpc_actions_round_robin() -> None:
# Queue keeps a list of actions
max_nr_retries = 10
last_msg_id: Optional[UID] = None
while 1:
# Get a list of nodes
with actions_lock:
nodes = list(actions_to_run_per_node.keys())
# Get one actions from each node in a Round Robin fashion and try to run it
for node in nodes:
with actions_to_run_per_node[node].node_lock:
if len(actions_to_run_per_node[node].smpc_actions) == 0:
continue
action = actions_to_run_per_node[node].smpc_actions[0]
node, msg, verify_key, nr_retries = action
if nr_retries > max_nr_retries:
raise ValueError(f"Retries to many times for {action}")
try:
# try to execute and pop if succeded
msg.execute_action(node, verify_key)
actions_to_run_per_node[node].smpc_actions.popleft()
except KeyError:
logger.warning(
f"Skip SMPC action {msg} since there was a key error when (probably) accessing the store"
)
if (last_msg_id is not None) and last_msg_id == msg.id:
# If there is only one action in all the lists
time.sleep(0.5)
last_msg_id = msg.id
thread_smpc_action = threading.Thread(
target=consume_smpc_actions_round_robin, args=(), daemon=True
)
thread_smpc_action.start()
"""
|
SocketPoller.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import socket
import select
import threading
import time
import datetime
from enum import Flag
#from threading import Lock
from Utils import DebugLock as Lock
from Utils import Utils
try:
from Event import Event
except Exception as ex:
Utils.print_exception(ex)
# abstract
class SocketPoller(object):
_instance = None
@classmethod
def get_instance(cls):
if not cls._instance:
cls._instance = SocketPollerLinux() if Utils.is_linux(os.name) else SocketPollerOther()
return cls._instance
def __init__(self):
self.lock = Lock()
self.descripters = {}
self.fd_map = {}
#self.timeout = 1.0
self.timeout = 5.0
self.thread = None
self.running = False
self.debug = True
self.started = Event('EventArgs')
self.stopped = Event('EventArgs')
self.idle = Event('EventArgs')
# public
def register(self, fd, flags, callback):
#print('register: %d' % fd.fileno())
with self.lock:
self.do_register(fd, flags, callback)
# public
def unregister(self, fd):
#print('unregister: %d' % fd.fileno())
with self.lock:
self.do_unregister(fd)
# protected virtual
def on_idle(self, e):
self.idle(self, e)
# protected virtual
def do_register(self, fd, flag, callback):
self.descripters[fd] = (flag, callback)
self.fd_map[fd.fileno()]= fd
# protected virtual
def do_unregister(self, fd):
self.descripters.pop(fd)
self.fd_map.pop(fd.fileno())
# public
def start(self):
with self.lock:
self.do_start();
# public
def stop(self):
with self.lock:
self.do_stop();
# protected abstract
def update_select_sockets(self):
pass
# protected virtual
def on_started(self, e):
self.started(self, e) # fire event
# protected virtual
def on_stopped(self, e):
self.stopped(self, e) # fire event
# protected virtual
def do_start(self):
Utils.assertion(self.lock.locked(), 'need lock')
if self.thread is not None:
raise Exception() # InvalidOperationException()
self.thread = threading.Thread(target = self.run, name = self.__class__.__name__)
self.running= True
self.thread.start()
# protected virtual
def do_stop(self):
Utils.assertion(self.lock.locked(), 'need lock')
if self.thread is None:
raise Exception() # InvalidOperationException()
self.running= False
# protected virtual
def run(self):
self.on_started(None)
while(self.running):
self.do_poll()
self.on_idle(None)
self.on_stopped(None)
with self.lock:
self.thread = None
def do_poll(self):
raise Exception('not implemented')
pass
def invoke_callback(self, fd, flag):
print('invoke_callback: %s:%s' % (str(flag), str(fd)))
callback = None
with self.lock:
if fd not in self.descripters:
return
flag2, callback = self.descripters[fd]
if callback is not None:
callback(fd, flag)
class SocketPollerOther(SocketPoller):
def __init__(self):
super().__init__()
self.lock = Lock()
self.read_sockets = {} # Dictionary<Socket, bool>
self.write_sockets = {} # Dictionary<Socket, bool
self.error_sockets = {} # Dictionary<Socket, bool>
# protected virtual
def do_register(self, fd, flag, callback):
super().do_register(fd, flag, callback)
if bool(flag & SocketPollFlag.Read):
self.read_sockets[fd] = True
if bool(flag & SocketPollFlag.Write):
self.write_sockets[fd] = True
if bool(flag & SocketPollFlag.Error):
self.error_sockets[fd] = True
# protected virtual
def do_unregister(self, fd):
super().do_unregister(fd)
fd in self.read_sockets and self.read_sockets.pop(fd)
fd in self.write_sockets and self.write_sockets.pop(fd)
fd in self.error_sockets and self.error_sockets.pop(fd)
def do_poll(self):
empty = True
try_read = None
try_write = None
try_error = None
with self.lock:
empty = len(self.read_sockets) == 0 \
and len(self.write_sockets) == 0 \
and len(self.error_sockets) == 0
if not empty:
try_read = self.read_sockets.keys()
try_write = self.write_sockets.keys()
try_error = self.error_sockets.keys()
try:
if(empty):
time.sleep(self.timeout)
return
read, write, error = select.select(try_read, try_write, try_error, self.timeout)
if len(read) == 0 and len(write) == 0 and len(error) == 0:
return
flags = {}
for fd in read:
flags[fd] = (flags[fd] | SocketPollFlag.Read) if fd in flags else SocketPollFlag.Read
for fd in write:
flags[fd] = (flags[fd] | SocketPollFlag.Write) if fd in flags else SocketPollFlag.Write
for fd in error:
flags[fd] = (flags[fd] | SocketPollFlag.Error) if fd in flags else SocketPollFlag.Error
for fd, flag in flags.items():
self.invoke_callback(fd, flag)
except Exception as ex:
# ThreadInterruptedException
# Exception
Utils.print_exception(ex)
self.running = False
class SocketPollerLinux(SocketPoller):
def __init__(self):
super().__init__()
self.lock = Lock()
self.poller = select.epoll()
# protected virtual
def do_register(self, fd, flag, callback):
super().do_register(fd, flag, callback)
pollflag = (select.EPOLLIN if bool(flag & SocketPollFlag.Read) else 0) \
| (select.EPOLLOUT if bool(flag & SocketPollFlag.Write) else 0) \
| (select.EPOLLERR if bool(flag & SocketPollFlag.Error) else 0)
#print('register: %d:%X' % (fd.fileno(), pollflag))
self.poller.register(fd, pollflag)
# protected virtual
def do_unregister(self, fd):
super().do_unregister(fd)
self.poller.unregister(fd)
def do_poll(self):
empty = True
with self.lock:
empty = len(self.descripters) == 0
try:
if(empty):
time.sleep(self.timeout)
return
if self.debug:
Utils.dbg_print('before poll')
#status = self.poller.poll(self.timeout)
status = Utils.poll(self.poller, self.timeout)
if self.debug:
Utils.dbg_print('after poll: count = %d' % len(status))
for fd, pollflag in status:
flag = (SocketPollFlag.Read if bool(pollflag & select.EPOLLIN) else SocketPollFlag.Zero) \
| (SocketPollFlag.Write if bool(pollflag & select.EPOLLOUT) else SocketPollFlag.Zero) \
| (SocketPollFlag.Error if bool(pollflag & select.EPOLLERR) else SocketPollFlag.Zero)
#print(' signaled fd: %d' % fd)
fd_obj = self.fd_map[fd]
self.invoke_callback(fd_obj, flag)
except Exception as ex:
# ThreadInterruptedException
# Exception
Utils.print_exception(ex)
self.running = False
class SocketPollFlag(Flag):
Zero = 0
Read = 1
Write = 2
ReadWrite = 3
Error = 4
ReadError = 5
WriteError = 6
All = 7
|
test_browser.py | # coding=utf-8
# Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
from __future__ import print_function
import argparse
import json
import multiprocessing
import os
import random
import re
import shlex
import shutil
import subprocess
import time
import unittest
import webbrowser
import zlib
from runner import BrowserCore, path_from_root, has_browser, EMTEST_BROWSER, no_wasm_backend, flaky, create_test_file
from tools import system_libs
from tools.shared import PYTHON, EMCC, WINDOWS, FILE_PACKAGER, PIPE, SPIDERMONKEY_ENGINE, JS_ENGINES
from tools.shared import try_delete, Building, run_process, run_js
try:
from http.server import BaseHTTPRequestHandler, HTTPServer
except ImportError:
# Python 2 compatibility
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
def test_chunked_synchronous_xhr_server(support_byte_ranges, chunkSize, data, checksum, port):
class ChunkedServerHandler(BaseHTTPRequestHandler):
def sendheaders(s, extra=[], length=len(data)):
s.send_response(200)
s.send_header("Content-Length", str(length))
s.send_header("Access-Control-Allow-Origin", "http://localhost:%s" % port)
s.send_header("Access-Control-Expose-Headers", "Content-Length, Accept-Ranges")
s.send_header("Content-type", "application/octet-stream")
if support_byte_ranges:
s.send_header("Accept-Ranges", "bytes")
for i in extra:
s.send_header(i[0], i[1])
s.end_headers()
def do_HEAD(s):
s.sendheaders()
def do_OPTIONS(s):
s.sendheaders([("Access-Control-Allow-Headers", "Range")], 0)
def do_GET(s):
if not support_byte_ranges:
s.sendheaders()
s.wfile.write(data)
else:
start, end = s.headers.get("range").split("=")[1].split("-")
start = int(start)
end = int(end)
end = min(len(data) - 1, end)
length = end - start + 1
s.sendheaders([], length)
s.wfile.write(data[start:end + 1])
# CORS preflight makes OPTIONS requests which we need to account for.
expectedConns = 22
httpd = HTTPServer(('localhost', 11111), ChunkedServerHandler)
for i in range(expectedConns + 1):
httpd.handle_request()
def shell_with_script(shell_file, output_file, replacement):
with open(path_from_root('src', shell_file)) as input:
with open(output_file, 'w') as output:
output.write(input.read().replace('{{{ SCRIPT }}}', replacement))
def is_chrome():
return EMTEST_BROWSER and 'chrom' in EMTEST_BROWSER.lower()
def no_chrome(note='chrome is not supported'):
if is_chrome():
return unittest.skip(note)
return lambda f: f
def is_firefox():
return EMTEST_BROWSER and 'firefox' in EMTEST_BROWSER.lower()
def no_firefox(note='firefox is not supported'):
if is_firefox():
return unittest.skip(note)
return lambda f: f
def no_swiftshader(f):
def decorated(self):
if is_chrome() and '--use-gl=swiftshader' in EMTEST_BROWSER:
self.skipTest('not compatible with swiftshader')
return f(self)
return decorated
def requires_threads(f):
def decorated(self):
if os.environ.get('EMTEST_LACKS_THREAD_SUPPORT'):
self.skipTest('EMTEST_LACKS_THREAD_SUPPORT is set')
# FIXME when the wasm backend gets threads
if is_chrome() and self.is_wasm_backend():
self.skipTest('wasm backend lacks threads')
return f(self)
return decorated
requires_graphics_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_GRAPHICS_HARDWARE'), "This test requires graphics hardware")
requires_sound_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_SOUND_HARDWARE'), "This test requires sound hardware")
requires_sync_compilation = unittest.skipIf(is_chrome(), "This test requires synchronous compilation, which does not work in Chrome (except for tiny wasms)")
class browser(BrowserCore):
@classmethod
def setUpClass(self):
super(browser, self).setUpClass()
self.browser_timeout = 20
print()
print('Running the browser tests. Make sure the browser allows popups from localhost.')
print()
def test_sdl1_in_emscripten_nonstrict_mode(self):
if 'EMCC_STRICT' in os.environ and int(os.environ['EMCC_STRICT']):
self.skipTest('This test requires being run in non-strict mode (EMCC_STRICT env. variable unset)')
# TODO: This test is verifying behavior that will be deprecated at some point in the future, remove this test once
# system JS libraries are no longer automatically linked to anymore.
self.btest('hello_world_sdl.cpp', reference='htmltest.png')
def test_sdl1(self):
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-lSDL', '-lGL'])
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-s', 'USE_SDL=1', '-lGL']) # is the default anyhow
# Deliberately named as test_zzz_* to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
@no_wasm_backend('wasm source maps')
def test_zzz_html_source_map(self):
if not has_browser():
self.skipTest('need a browser')
cpp_file = 'src.cpp'
html_file = 'src.html'
# browsers will try to 'guess' the corresponding original line if a
# generated line is unmapped, so if we want to make sure that our
# numbering is correct, we need to provide a couple of 'possible wrong
# answers'. thus, we add some printf calls so that the cpp file gets
# multiple mapped lines. in other words, if the program consists of a
# single 'throw' statement, browsers may just map any thrown exception to
# that line, because it will be the only mapped line.
with open(cpp_file, 'w') as f:
f.write(r'''
#include <cstdio>
int main() {
printf("Starting test\n");
try {
throw 42; // line 8
} catch (int e) { }
printf("done\n");
return 0;
}
''')
# use relative paths when calling emcc, because file:// URIs can only load
# sourceContent when the maps are relative paths
try_delete(html_file)
try_delete(html_file + '.map')
run_process([PYTHON, EMCC, 'src.cpp', '-o', 'src.html', '-g4', '-s', 'WASM=0'], cwd=self.get_dir())
self.assertExists(html_file)
self.assertExists(html_file + '.map')
webbrowser.open_new('file://' + html_file)
print('''
If manually bisecting:
Check that you see src.cpp among the page sources.
Even better, add a breakpoint, e.g. on the printf, then reload, then step
through and see the print (best to run with EMTEST_SAVE_DIR=1 for the reload).
''')
@no_wasm_backend('wasm source maps')
def test_emscripten_log(self):
# TODO: wasm support for source maps
src = 'src.cpp'
create_test_file(src, self.with_report_result(open(path_from_root('tests', 'emscripten_log', 'emscripten_log.cpp')).read()))
run_process([PYTHON, EMCC, src, '--pre-js', path_from_root('src', 'emscripten-source-map.min.js'), '-g', '-o', 'page.html', '-s', 'DEMANGLE_SUPPORT=1', '-s', 'WASM=0'])
self.run_browser('page.html', None, '/report_result?1')
def build_native_lzma(self):
lzma_native = path_from_root('third_party', 'lzma.js', 'lzma-native')
if os.path.isfile(lzma_native) and os.access(lzma_native, os.X_OK):
return
cwd = os.getcwd()
try:
os.chdir(path_from_root('third_party', 'lzma.js'))
# On Windows prefer using MinGW make if it exists, otherwise fall back to hoping we have cygwin make.
if WINDOWS and Building.which('mingw32-make'):
run_process(['doit.bat'])
else:
run_process(['sh', './doit.sh'])
finally:
os.chdir(cwd)
def test_preload_file(self):
absolute_src_path = os.path.join(self.get_dir(), 'somefile.txt').replace('\\', '/')
open(absolute_src_path, 'w').write('''load me right before running the code please''')
absolute_src_path2 = os.path.join(self.get_dir(), '.somefile.txt').replace('\\', '/')
open(absolute_src_path2, 'w').write('''load me right before running the code please''')
absolute_src_path3 = os.path.join(self.get_dir(), 'some@file.txt').replace('\\', '/')
open(absolute_src_path3, 'w').write('''load me right before running the code please''')
def make_main(path):
print('make main at', path)
path = path.replace('\\', '\\\\').replace('"', '\\"') # Escape tricky path name for use inside a C string.
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
REPORT_RESULT(result);
return 0;
}
''' % path))
test_cases = [
# (source preload-file string, file on target FS to load)
("somefile.txt", "somefile.txt"),
(".somefile.txt@somefile.txt", "somefile.txt"),
("./somefile.txt", "somefile.txt"),
("somefile.txt@file.txt", "file.txt"),
("./somefile.txt@file.txt", "file.txt"),
("./somefile.txt@./file.txt", "file.txt"),
("somefile.txt@/file.txt", "file.txt"),
("somefile.txt@/", "somefile.txt"),
(absolute_src_path + "@file.txt", "file.txt"),
(absolute_src_path + "@/file.txt", "file.txt"),
(absolute_src_path + "@/", "somefile.txt"),
("somefile.txt@/directory/file.txt", "/directory/file.txt"),
("somefile.txt@/directory/file.txt", "directory/file.txt"),
(absolute_src_path + "@/directory/file.txt", "directory/file.txt"),
("some@@file.txt@other.txt", "other.txt"),
("some@@file.txt@some@@otherfile.txt", "some@otherfile.txt")]
for test in test_cases:
(srcpath, dstpath) = test
print('Testing', srcpath, dstpath)
make_main(dstpath)
run_process([PYTHON, EMCC, 'main.cpp', '--preload-file', srcpath, '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test that '--no-heap-copy' works.
if WINDOWS:
# On Windows, the following non-alphanumeric non-control code ASCII characters are supported.
# The characters <, >, ", |, ?, * are not allowed, because the Windows filesystem doesn't support those.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~.txt'
else:
# All 7-bit non-alphanumeric non-control code ASCII characters except /, : and \ are allowed.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~ "*<>?|.txt'
open(os.path.join(self.get_dir(), tricky_filename), 'w').write('''load me right before running the code please''')
make_main(tricky_filename)
# As an Emscripten-specific feature, the character '@' must be escaped in the form '@@' to not confuse with the 'src@dst' notation.
run_process([PYTHON, EMCC, 'main.cpp', '--preload-file', tricky_filename.replace('@', '@@'), '--no-heap-copy', '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# By absolute path
make_main('somefile.txt') # absolute becomes relative
run_process([PYTHON, EMCC, 'main.cpp', '--preload-file', absolute_src_path, '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test subdirectory handling with asset packaging.
try_delete('assets')
os.makedirs('assets/sub/asset1/'.replace('\\', '/'))
os.makedirs('assets/sub/asset1/.git'.replace('\\', '/')) # Test adding directory that shouldn't exist.
os.makedirs('assets/sub/asset2/'.replace('\\', '/'))
create_test_file('assets/sub/asset1/file1.txt', '''load me right before running the code please''')
create_test_file('assets/sub/asset1/.git/shouldnt_be_embedded.txt', '''this file should not get embedded''')
create_test_file('assets/sub/asset2/file2.txt', '''load me right before running the code please''')
absolute_assets_src_path = 'assets'.replace('\\', '/')
def make_main_two_files(path1, path2, nonexistingpath):
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
f = fopen("%s", "r");
if (f == NULL)
result = 0;
fclose(f);
f = fopen("%s", "r");
if (f != NULL)
result = 0;
REPORT_RESULT(result);
return 0;
}
''' % (path1, path2, nonexistingpath)))
test_cases = [
# (source directory to embed, file1 on target FS to load, file2 on target FS to load, name of a file that *shouldn't* exist on VFS)
("assets", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@./", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/assets", "/assets/sub/asset1/file1.txt", "/assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt")]
for test in test_cases:
(srcpath, dstpath1, dstpath2, nonexistingpath) = test
make_main_two_files(dstpath1, dstpath2, nonexistingpath)
print(srcpath)
run_process([PYTHON, EMCC, 'main.cpp', '--preload-file', srcpath, '--exclude-file', '*/.*', '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Should still work with -o subdir/..
make_main('somefile.txt') # absolute becomes relative
try:
os.mkdir('dirrey')
except:
pass
run_process([PYTHON, EMCC, 'main.cpp', '--preload-file', absolute_src_path, '-o', 'dirrey/page.html'])
self.run_browser('dirrey/page.html', 'You should see |load me right before|.', '/report_result?1')
# With FS.preloadFile
create_test_file('pre.js', '''
Module.preRun = function() {
FS.createPreloadedFile('/', 'someotherfile.txt', 'somefile.txt', true, false); // we need --use-preload-plugins for this.
};
''')
make_main('someotherfile.txt')
run_process([PYTHON, EMCC, 'main.cpp', '--pre-js', 'pre.js', '-o', 'page.html', '--use-preload-plugins'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Tests that user .html shell files can manually download .data files created with --preload-file cmdline.
def test_preload_file_with_manual_data_download(self):
create_test_file('src.cpp', self.with_report_result(open(os.path.join(path_from_root('tests/manual_download_data.cpp'))).read()))
create_test_file('file.txt', '''Hello!''')
run_process([PYTHON, EMCC, 'src.cpp', '-o', 'manual_download_data.js', '--preload-file', 'file.txt@/file.txt'])
shutil.copyfile(path_from_root('tests', 'manual_download_data.html'), 'manual_download_data.html')
self.run_browser('manual_download_data.html', 'Hello!', '/report_result?1')
# Tests that if the output files have single or double quotes in them, that it will be handled by correctly escaping the names.
def test_output_file_escaping(self):
tricky_part = '\'' if WINDOWS else '\' and \"' # On Windows, files/directories may not contain a double quote character. On non-Windowses they can, so test that.
d = 'dir with ' + tricky_part
abs_d = os.path.join(self.get_dir(), d)
try:
os.mkdir(abs_d)
except:
pass
txt = 'file with ' + tricky_part + '.txt'
abs_txt = os.path.join(abs_d, txt)
open(abs_txt, 'w').write('load me right before')
cpp = os.path.join(d, 'file with ' + tricky_part + '.cpp')
open(cpp, 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("|load me right before|", buf);
REPORT_RESULT(result);
return 0;
}
''' % (txt.replace('\'', '\\\'').replace('\"', '\\"'))))
data_file = os.path.join(abs_d, 'file with ' + tricky_part + '.data')
data_js_file = os.path.join(abs_d, 'file with ' + tricky_part + '.js')
run_process([PYTHON, FILE_PACKAGER, data_file, '--use-preload-cache', '--indexedDB-name=testdb', '--preload', abs_txt + '@' + txt, '--js-output=' + data_js_file])
page_file = os.path.join(d, 'file with ' + tricky_part + '.html')
abs_page_file = os.path.join(self.get_dir(), page_file)
run_process([PYTHON, EMCC, cpp, '--pre-js', data_js_file, '-o', abs_page_file, '-s', 'FORCE_FILESYSTEM=1'])
self.run_browser(page_file, '|load me right before|.', '/report_result?0')
def test_preload_caching(self):
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % 'somefile.txt'))
create_test_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
# test caching of various sizes, including sizes higher than 128MB which is
# chrome's limit on IndexedDB item sizes, see
# https://cs.chromium.org/chromium/src/content/renderer/indexed_db/webidbdatabase_impl.cc?type=cs&q=%22The+serialized+value+is+too+large%22&sq=package:chromium&g=0&l=177
# https://cs.chromium.org/chromium/src/out/Debug/gen/third_party/blink/public/mojom/indexeddb/indexeddb.mojom.h?type=cs&sq=package:chromium&g=0&l=60
for extra_size in (0, 1 * 1024 * 1024, 100 * 1024 * 1024, 150 * 1024 * 1024):
if is_chrome() and extra_size >= 100 * 1024 * 1024:
continue
create_test_file('somefile.txt', '''load me right before running the code please''' + ('_' * extra_size))
print('size:', os.path.getsize('somefile.txt'))
run_process([PYTHON, EMCC, 'main.cpp', '--use-preload-cache', '--js-library', 'test.js', '--preload-file', 'somefile.txt', '-o', 'page.html', '-s', 'ALLOW_MEMORY_GROWTH=1'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_preload_caching_indexeddb_name(self):
create_test_file('somefile.txt', '''load me right before running the code please''')
def make_main(path):
print(path)
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % path))
create_test_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
run_process([PYTHON, FILE_PACKAGER, 'somefile.data', '--use-preload-cache', '--indexedDB-name=testdb', '--preload', 'somefile.txt', '--js-output=' + 'somefile.js'])
run_process([PYTHON, EMCC, 'main.cpp', '--js-library', 'test.js', '--pre-js', 'somefile.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM=1'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_multifile(self):
# a few files inside a directory
os.makedirs(os.path.join('subdirr', 'moar'))
create_test_file(os.path.join('subdirr', 'data1.txt'), '1214141516171819')
create_test_file(os.path.join('subdirr', 'moar', 'data2.txt'), '3.14159265358979')
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
FILE *f2 = fopen("subdirr/moar/data2.txt", "r");
fread(buf, 1, 16, f2);
buf[16] = 0;
fclose(f2);
printf("|%s|\n", buf);
result = result && !strcmp("3.14159265358979", buf);
REPORT_RESULT(result);
return 0;
}
'''))
# by individual files
run_process([PYTHON, EMCC, 'main.cpp', '--preload-file', 'subdirr/data1.txt', '--preload-file', 'subdirr/moar/data2.txt', '-o', 'page.html'])
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
os.remove('page.html')
# by directory, and remove files to make sure
run_process([PYTHON, EMCC, 'main.cpp', '--preload-file', 'subdirr', '-o', 'page.html'])
shutil.rmtree('subdirr')
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
def test_custom_file_package_url(self):
# a few files inside a directory
self.clear()
os.makedirs('subdirr')
os.makedirs('cdn')
create_test_file(os.path.join('subdirr', 'data1.txt'), '1214141516171819')
# change the file package base dir to look in a "cdn". note that normally
# you would add this in your own custom html file etc., and not by
# modifying the existing shell in this manner
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
REPORT_RESULT(result);
return 0;
}
'''))
run_process([PYTHON, EMCC, 'main.cpp', '--shell-file', 'shell.html', '--preload-file', 'subdirr/data1.txt', '-o', 'test.html'])
shutil.move('test.data', os.path.join('cdn', 'test.data'))
self.run_browser('test.html', '', '/report_result?1')
def test_missing_data_throws_error(self):
def setup(assetLocalization):
self.clear()
create_test_file('data.txt', 'data')
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
// This code should never be executed in terms of missing required dependency file.
REPORT_RESULT(0);
return 0;
}
'''))
create_test_file('on_window_error_shell.html', r'''
<html>
<center><canvas id='canvas' width='256' height='256'></canvas></center>
<hr><div id='output'></div><hr>
<script type='text/javascript'>
window.onerror = function(error) {
window.onerror = null;
var result = error.indexOf("test.data") >= 0 ? 1 : 0;
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + result, true);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}
var Module = {
locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "''' + assetLocalization + r'''" + path;}},
print: (function() {
var element = document.getElementById('output');
return function(text) { element.innerHTML += text.replace('\n', '<br>', 'g') + '<br>';};
})(),
canvas: document.getElementById('canvas')
};
</script>
{{{ SCRIPT }}}
</body>
</html>''')
def test():
# test test missing file should run xhr.onload with status different than 200, 304 or 206
setup("")
run_process([PYTHON, EMCC, 'main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
shutil.move('test.data', 'missing.data')
self.run_browser('test.html', '', '/report_result?1')
# test unknown protocol should go through xhr.onerror
setup("unknown_protocol://")
run_process([PYTHON, EMCC, 'main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
# test wrong protocol and port
setup("https://localhost:8800/")
run_process([PYTHON, EMCC, 'main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
test()
# TODO: CORS, test using a full url for locateFile
# create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path) {return "http:/localhost:8888/cdn/" + path;}, '))
# test()
def test_dev_random(self):
self.btest(os.path.join('filesystem', 'dev_random.cpp'), expected='0')
def test_sdl_swsurface(self):
self.btest('sdl_swsurface.c', args=['-lSDL', '-lGL'], expected='1')
def test_sdl_surface_lock_opts(self):
# Test Emscripten-specific extensions to optimize SDL_LockSurface and SDL_UnlockSurface.
self.btest('hello_world_sdl.cpp', reference='htmltest.png', message='You should see "hello, world!" and a colored cube.', args=['-DTEST_SDL_LOCK_OPTS', '-lSDL', '-lGL'])
def test_sdl_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
create_test_file('sdl_image.c', self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
run_process([
PYTHON, EMCC, 'sdl_image.c', '-o', 'page.html', '-O2', '-lSDL', '-lGL', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpeg')
create_test_file('sdl_image_jpeg.c', self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
run_process([
PYTHON, EMCC, 'sdl_image_jpeg.c', '-o', 'page.html', '-lSDL', '-lGL',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], also_proxied=True, manually_trigger_reftest=True)
def test_sdl_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], manually_trigger_reftest=True)
def test_sdl_image_must_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl_image_must_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.jpg', '-lSDL', '-lGL'])
def test_sdl_stb_image(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_bpp(self):
# load grayscale image without alpha
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp1.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp1.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load grayscale image with alpha
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp2.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp2.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGB image
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp3.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp3.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGBA image
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp4.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp4.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_data.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_cleanup(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_cleanup.c', expected='0', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL', '--memoryprofiler'])
def test_sdl_canvas(self):
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lSDL', '-lGL'])
# some extra coverage
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-O0', '-s', 'SAFE_HEAP=1', '-lSDL', '-lGL'])
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-O2', '-s', 'SAFE_HEAP=1', '-lSDL', '-lGL'])
def post_manual_reftest(self, reference=None):
self.reftest(path_from_root('tests', self.reference if reference is None else reference))
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
create_test_file('test.html', html)
def test_sdl_canvas_proxy(self):
create_test_file('data.txt', 'datum')
self.btest('sdl_canvas_proxy.c', reference='sdl_canvas_proxy.png', args=['--proxy-to-worker', '--preload-file', 'data.txt', '-lSDL', '-lGL'], manual_reference=True, post_build=self.post_manual_reftest)
@no_chrome('see #7930')
@requires_graphics_hardware
def test_glgears_proxy(self):
# we modify the asm.js, this is a non-wasm test
self.btest('hello_world_gles_proxy.c', reference='gears.png', args=['--proxy-to-worker', '-s', 'GL_TESTING=1', '-DSTATIC_GEARS=1', '-lGL', '-lglut', '-s', 'WASM=0'], manual_reference=True, post_build=self.post_manual_reftest)
# test noProxy option applied at runtime
# run normally (duplicates above test, but verifies we can run outside of the btest harness
self.run_browser('test.html', None, ['/report_result?0'])
# run with noProxy
self.run_browser('test.html?noProxy', None, ['/report_result?0'])
def copy(to, js_mod, html_mod=lambda x: x):
create_test_file(to + '.html', html_mod(open('test.html').read().replace('test.js', to + '.js')))
create_test_file(to + '.js', js_mod(open('test.js').read()))
# run with noProxy, but make main thread fail
copy('two', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WEB) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.port, original),
lambda original: original.replace('function doReftest() {', 'function doReftest() { return; ')) # don't reftest on main thread, it would race
self.run_browser('two.html?noProxy', None, ['/report_result?999'])
copy('two', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WEB) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.port, original))
self.run_browser('two.html', None, ['/report_result?0']) # this is still cool
# run without noProxy, so proxy, but make worker fail
copy('three', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WORKER) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.port, original),
lambda original: original.replace('function doReftest() {', 'function doReftest() { return; ')) # don't reftest on main thread, it would race
self.run_browser('three.html', None, ['/report_result?999'])
copy('three', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WORKER) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.port, original))
self.run_browser('three.html?noProxy', None, ['/report_result?0']) # this is still cool
@requires_graphics_hardware
def test_glgears_proxy_jstarget(self):
# test .js target with --proxy-worker; emits 2 js files, client and worker
run_process([PYTHON, EMCC, path_from_root('tests', 'hello_world_gles_proxy.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'GL_TESTING=1', '-lGL', '-lglut'])
shell_with_script('shell_minimal.html', 'test.html', '<script src="test.js"></script>')
self.post_manual_reftest('gears.png')
self.run_browser('test.html', None, '/report_result?0')
def test_sdl_canvas_alpha(self):
# N.B. On Linux with Intel integrated graphics cards, this test needs Firefox 49 or newer.
# See https://github.com/emscripten-core/emscripten/issues/4069.
create_test_file('flag_0.js', '''
Module['arguments'] = ['-0'];
''')
self.btest('sdl_canvas_alpha.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_alpha.png', reference_slack=12)
self.btest('sdl_canvas_alpha.c', args=['--pre-js', 'flag_0.js', '-lSDL', '-lGL'], reference='sdl_canvas_alpha_flag_0.png', reference_slack=12)
def test_sdl_key(self):
for delay in [0, 1]:
for defines in [
[],
['-DTEST_EMSCRIPTEN_SDL_SETEVENTHANDLER']
]:
for emterps in [
[],
['-DTEST_SLEEP', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-s', 'ASSERTIONS=1', '-s', "SAFE_HEAP=1"]
]:
print(delay, defines, emterps)
if emterps and self.is_wasm_backend():
return self.skipTest('no emterpretify with wasm backend')
create_test_file('pre.js', '''
function keydown(c) {
%s
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
function keyup(c) {
%s
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
''' % ('setTimeout(function() {' if delay else '', '}, 1);' if delay else '', 'setTimeout(function() {' if delay else '', '}, 1);' if delay else ''))
create_test_file('sdl_key.c', self.with_report_result(open(path_from_root('tests', 'sdl_key.c')).read()))
run_process([PYTHON, EMCC, 'sdl_key.c', '-o', 'page.html'] + defines + emterps + ['--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main']''', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?223092870')
def test_sdl_key_proxy(self):
create_test_file('pre.js', '''
var Module = {};
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
''')
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
keydown(1250);keydown(38);keyup(38);keyup(1250); // alt, up
keydown(1248);keydown(1249);keydown(40);keyup(40);keyup(1249);keyup(1248); // ctrl, shift, down
keydown(37);keyup(37); // left
keydown(39);keyup(39); // right
keydown(65);keyup(65); // a
keydown(66);keyup(66); // b
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_test_file('test.html', html)
self.btest('sdl_key_proxy.c', '223092870', args=['--proxy-to-worker', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-lSDL', '-lGL'], manual_reference=True, post_build=post)
def test_canvas_focus(self):
self.btest('canvas_focus.c', '1')
def test_keydown_preventdefault_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keypress(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function sendKey(c) {
// Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
if (keydown(c) === false) {
console.log('keydown prevent defaulted, NOT sending keypress!!!');
} else {
keypress(c);
}
keyup(c);
}
// Send 'a'. Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
sendKey(65);
// Send backspace. Keypress should not be sent over as default handling of
// the Keydown event should be prevented.
sendKey(8);
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_test_file('test.html', html)
self.btest('keydown_preventdefault_proxy.cpp', '300', args=['--proxy-to-worker', '-s', '''EXPORTED_FUNCTIONS=['_main']'''], manual_reference=True, post_build=post)
def test_sdl_text(self):
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
create_test_file('sdl_text.c', self.with_report_result(open(path_from_root('tests', 'sdl_text.c')).read()))
run_process([PYTHON, EMCC, 'sdl_text.c', '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('sdl_mouse.c', self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
run_process([PYTHON, EMCC, 'sdl_mouse.c', '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse_offsets(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl_mouse.js"></script>
</body>
</html>
''')
create_test_file('sdl_mouse.c', self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
run_process([PYTHON, EMCC, 'sdl_mouse.c', '-DTEST_SDL_MOUSE_OFFSETS', '-O2', '--minify', '0', '-o', 'sdl_mouse.js', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_glut_touchevents(self):
self.btest('glut_touchevents.c', '1', args=['-lglut'])
def test_glut_wheelevents(self):
self.btest('glut_wheelevents.c', '1', args=['-lglut'])
@requires_graphics_hardware
def test_glut_glutget_no_antialias(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_graphics_hardware
def test_glut_glutget(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
def test_sdl_joystick_1(self):
# Generates events corresponding to the Working Draft of the HTML5 Gamepad API.
# http://www.w3.org/TR/2012/WD-gamepad-20120529/#gamepad-interface
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = 0;
};
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button] = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button] = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
create_test_file('sdl_joystick.c', self.with_report_result(open(path_from_root('tests', 'sdl_joystick.c')).read()))
run_process([PYTHON, EMCC, 'sdl_joystick.c', '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
def test_sdl_joystick_2(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
create_test_file('sdl_joystick.c', self.with_report_result(open(path_from_root('tests', 'sdl_joystick.c')).read()))
run_process([PYTHON, EMCC, 'sdl_joystick.c', '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_glfw_joystick(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
var gamepad = {
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
};
gamepads.push(gamepad)
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
// Dispatch event (required for glfw joystick; note not used in SDL test)
var event = new Event('gamepadconnected');
event.gamepad = gamepad;
window.dispatchEvent(event);
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
create_test_file('test_glfw_joystick.c', self.with_report_result(open(path_from_root('tests', 'test_glfw_joystick.c')).read()))
run_process([PYTHON, EMCC, 'test_glfw_joystick.c', '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lGL', '-lglfw3', '-s', 'USE_GLFW=3'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_webgl_context_attributes(self):
# Javascript code to check the attributes support we want to test in the WebGL implementation
# (request the attribute, create a context and check its value afterwards in the context attributes).
# Tests will succeed when an attribute is not supported.
create_test_file('check_webgl_attributes_support.js', '''
mergeInto(LibraryManager.library, {
webglAntialiasSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {antialias: true});
attributes = context.getContextAttributes();
return attributes.antialias;
},
webglDepthSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {depth: true});
attributes = context.getContextAttributes();
return attributes.depth;
},
webglStencilSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {stencil: true});
attributes = context.getContextAttributes();
return attributes.stencil;
},
webglAlphaSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {alpha: true});
attributes = context.getContextAttributes();
return attributes.alpha;
}
});
''')
# Copy common code file to temporary directory
filepath = path_from_root('tests/test_webgl_context_attributes_common.c')
temp_filepath = os.path.join(self.get_dir(), os.path.basename(filepath))
shutil.copyfile(filepath, temp_filepath)
# perform tests with attributes activated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl2.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-s', 'USE_SDL=2', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglfw', '-lGLEW'])
# perform tests with attributes desactivated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglfw', '-lGLEW'])
# Test that -s GL_PREINITIALIZED_CONTEXT=1 works and allows user to set Module['preinitializedWebGLContext'] to a preinitialized WebGL context.
@requires_graphics_hardware
def test_preinitialized_webgl_context(self):
self.btest('preinitialized_webgl_context.cpp', '5', args=['-s', 'GL_PREINITIALIZED_CONTEXT=1', '--shell-file', path_from_root('tests/preinitialized_webgl_context.html')])
@requires_threads
def test_emscripten_get_now(self):
for args in [[], ['-s', 'USE_PTHREADS=1'], ['-s', 'ENVIRONMENT=web', '-O2', '--closure', '1']]:
self.btest('emscripten_get_now.cpp', '1', args=args)
def test_write_file_in_environment_web(self):
self.btest('write_file.cpp', '0', args=['-s', 'ENVIRONMENT=web', '-Os', '--closure', '1'])
@unittest.skip('Skipping due to https://github.com/emscripten-core/emscripten/issues/2770')
def test_fflush(self):
self.btest('test_fflush.cpp', '0', args=['--shell-file', path_from_root('tests', 'test_fflush.html')])
def test_file_db(self):
secret = str(time.time())
create_test_file('moar.txt', secret)
self.btest('file_db.cpp', '1', args=['--preload-file', 'moar.txt', '-DFIRST'])
shutil.copyfile('test.html', 'first.html')
self.btest('file_db.cpp', secret, args=['-s', 'FORCE_FILESYSTEM=1'])
shutil.copyfile('test.html', 'second.html')
create_test_file('moar.txt', 'aliantha')
self.btest('file_db.cpp', secret, args=['--preload-file', 'moar.txt']) # even with a file there, we load over it
shutil.move('test.html', 'third.html')
def test_fs_idbfs_sync(self):
for extra in [[], ['-DEXTRA_WORK']]:
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']'''])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']'''] + extra)
@no_wasm_backend('emterpretify')
def test_fs_idbfs_fsync(self):
# sync from persisted state into memory before main()
create_test_file('pre.js', '''
Module.preRun = function() {
addRunDependency('syncfs');
FS.mkdir('/working1');
FS.mount(IDBFS, {}, '/working1');
FS.syncfs(true, function (err) {
if (err) throw err;
removeRunDependency('syncfs');
});
};
''')
args = ['--pre-js', 'pre.js', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-lidbfs.js', '-s', 'EXIT_RUNTIME=1']
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', force_c=True, args=args + ['-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']'''])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', force_c=True, args=args + ['-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']'''])
@no_wasm_backend('emterpretify')
def test_fs_memfs_fsync(self):
args = ['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-s', 'EXIT_RUNTIME=1']
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_memfs_fsync.c'), '1', force_c=True, args=args + ['-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main']'''])
def test_fs_workerfs_read(self):
secret = 'a' * 10
secret2 = 'b' * 10
create_test_file('pre.js', '''
var Module = {};
Module.preRun = function() {
var blob = new Blob(['%s']);
var file = new File(['%s'], 'file.txt');
FS.mkdir('/work');
FS.mount(WORKERFS, {
blobs: [{ name: 'blob.txt', data: blob }],
files: [file],
}, '/work');
};
''' % (secret, secret2))
self.btest(path_from_root('tests', 'fs', 'test_workerfs_read.c'), '1', force_c=True, args=['-lworkerfs.js', '--pre-js', 'pre.js', '-DSECRET=\"' + secret + '\"', '-DSECRET2=\"' + secret2 + '\"', '--proxy-to-worker'])
def test_fs_workerfs_package(self):
create_test_file('file1.txt', 'first')
if not os.path.exists('sub'):
os.makedirs('sub')
open(os.path.join('sub', 'file2.txt'), 'w').write('second')
run_process([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', os.path.join('sub', 'file2.txt'), '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_workerfs_package.cpp'), '1', args=['-lworkerfs.js', '--proxy-to-worker'])
def test_fs_lz4fs_package(self):
# generate data
self.clear()
os.mkdir('subdir')
create_test_file('file1.txt', '0123456789' * (1024 * 128))
open(os.path.join('subdir', 'file2.txt'), 'w').write('1234567890' * (1024 * 128))
random_data = bytearray(random.randint(0, 255) for x in range(1024 * 128 * 10 + 1))
random_data[17] = ord('X')
open('file3.txt', 'wb').write(random_data)
# compress in emcc, -s LZ4=1 tells it to tell the file packager
print('emcc-normal')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt'], timeout=60)
assert os.path.getsize('file1.txt') + os.path.getsize(os.path.join('subdir', 'file2.txt')) + os.path.getsize('file3.txt') == 3 * 1024 * 128 * 10 + 1
assert os.path.getsize('test.data') < (3 * 1024 * 128 * 10) / 2 # over half is gone
print(' emcc-opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt', '-O2'], timeout=60)
# compress in the file packager, on the server. the client receives compressed data and can just use it. this is typical usage
print('normal')
out = subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--lz4'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1'], timeout=60)
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2'], timeout=60)
# load the data into LZ4FS manually at runtime. This means we compress on the client. This is generally not recommended
print('manual')
subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1'], timeout=60)
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2'], timeout=60)
print(' opts+closure')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2', '--closure', '1', '-g1'], timeout=60)
'''# non-lz4 for comparison
try:
os.mkdir('files')
except:
pass
shutil.copyfile('file1.txt', os.path.join('files', 'file1.txt'))
shutil.copyfile('file2.txt', os.path.join('files', 'file2.txt'))
shutil.copyfile('file3.txt', os.path.join('files', 'file3.txt'))
out = subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'files/file1.txt', 'files/file2.txt', 'files/file3.txt'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js'], timeout=60)'''
def test_separate_metadata_later(self):
# see issue #6654 - we need to handle separate-metadata both when we run before
# the main program, and when we are run later
create_test_file('data.dat', ' ')
run_process([PYTHON, FILE_PACKAGER, 'more.data', '--preload', 'data.dat', '--separate-metadata', '--js-output=more.js'])
self.btest(os.path.join('browser', 'separate_metadata_later.cpp'), '1', args=['-s', 'FORCE_FILESYSTEM=1'])
def test_idbstore(self):
secret = str(time.time())
for stage in [0, 1, 2, 3, 0, 1, 2, 0, 0, 1, 4, 2, 5]:
self.clear()
self.btest(path_from_root('tests', 'idbstore.c'), str(stage), force_c=True, args=['-lidbstore.js', '-DSTAGE=' + str(stage), '-DSECRET=\"' + secret + '\"'])
@no_wasm_backend('emterpretify')
def test_idbstore_sync(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync.c'), '6', force_c=True, args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '--memory-init-file', '1', '-O3', '-g2'])
@no_wasm_backend('emterpretify')
def test_idbstore_sync_worker(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync_worker.c'), '6', force_c=True, args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '--memory-init-file', '1', '-O3', '-g2', '--proxy-to-worker', '-s', 'TOTAL_MEMORY=80MB'])
def test_force_exit(self):
self.btest('force_exit.c', force_c=True, expected='17', args=['-s', 'EXIT_RUNTIME=1'])
def test_sdl_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_test_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest('sdl_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-lSDL', '-lGL'])
def test_sdl_canvas_size(self):
self.btest('sdl_canvas_size.c', expected='1',
args=['-O2', '--minify', '0', '--shell-file',
path_from_root('tests', 'sdl_canvas_size.html'), '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_gl_read(self):
# SDL, OpenGL, readPixels
create_test_file('sdl_gl_read.c', self.with_report_result(open(path_from_root('tests', 'sdl_gl_read.c')).read()))
run_process([PYTHON, EMCC, 'sdl_gl_read.c', '-o', 'something.html', '-lSDL', '-lGL'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl_gl_mapbuffers(self):
self.btest('sdl_gl_mapbuffers.c', expected='1', args=['-s', 'FULL_ES3=1', '-lSDL', '-lGL'],
message='You should see a blue triangle.')
@requires_graphics_hardware
def test_sdl_ogl(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_regal(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'USE_REGAL=1', '-DUSE_REGAL', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_defaultmatrixmode(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_defaultMatrixMode.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_p(self):
# Immediate mode with pointers
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_p.c', reference='screenshot-gray.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_proc_alias(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_proc_alias.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '-g2', '-s', 'INLINING_LIMIT=1', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_negative.c', reference='screenshot-fog-negative.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_density.c', reference='screenshot-fog-density.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_glfw(self):
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lglfw', '-lGL'])
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_minimal(self):
self.btest('glfw_minimal.c', '1', args=['-lglfw', '-lGL'])
self.btest('glfw_minimal.c', '1', args=['-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_time(self):
self.btest('test_glfw_time.c', '1', args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'])
def _test_egl_base(self, *args):
create_test_file('test_egl.c', self.with_report_result(open(path_from_root('tests', 'test_egl.c')).read()))
run_process([PYTHON, EMCC, '-O2', 'test_egl.c', '-o', 'page.html', '-lEGL', '-lGL'] + list(args))
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_egl(self):
self._test_egl_base()
@requires_threads
@requires_graphics_hardware
def test_egl_with_proxy_to_pthread(self):
self._test_egl_base('-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1')
def _test_egl_width_height_base(self, *args):
create_test_file('test_egl_width_height.c', self.with_report_result(open(path_from_root('tests', 'test_egl_width_height.c')).read()))
run_process([PYTHON, EMCC, '-O2', 'test_egl_width_height.c', '-o', 'page.html', '-lEGL', '-lGL'] + list(args))
self.run_browser('page.html', 'Should print "(300, 150)" -- the size of the canvas in pixels', '/report_result?1')
def test_egl_width_height(self):
self._test_egl_width_height_base()
@requires_threads
def test_egl_width_height_with_proxy_to_pthread(self):
self._test_egl_width_height_base('-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD=1')
def do_test_worker(self, args=[]):
# Test running in a web worker
create_test_file('file.dat', 'data for worker')
html_file = open('main.html', 'w')
html_file.write('''
<html>
<body>
Worker Test
<script>
var worker = new Worker('worker.js');
worker.onmessage = function(event) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
</script>
</body>
</html>
''' % self.port)
html_file.close()
for file_data in [1, 0]:
cmd = [PYTHON, EMCC, path_from_root('tests', 'hello_world_worker.cpp'), '-o', 'worker.js'] + (['--preload-file', 'file.dat'] if file_data else []) + args
print(cmd)
subprocess.check_call(cmd)
self.assertExists('worker.js')
self.run_browser('main.html', '', '/report_result?hello%20from%20worker,%20and%20|' + ('data%20for%20w' if file_data else '') + '|')
def test_worker(self):
self.do_test_worker()
self.assertContained('you should not see this text when in a worker!', run_js('worker.js')) # code should run standalone too
def test_chunked_synchronous_xhr(self):
main = 'chunked_sync_xhr.html'
worker_filename = "download_and_checksum_worker.js"
html_file = open(main, 'w')
html_file.write(r"""
<!doctype html>
<html>
<head><meta charset="utf-8"><title>Chunked XHR</title></head>
<html>
<body>
Chunked XHR Web Worker Test
<script>
var worker = new Worker(""" + json.dumps(worker_filename) + r""");
var buffer = [];
worker.onmessage = function(event) {
if (event.data.channel === "stdout") {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data.line);
xhr.send();
setTimeout(function() { window.close() }, 1000);
} else {
if (event.data.trace) event.data.trace.split("\n").map(function(v) { console.error(v); });
if (event.data.line) {
console.error(event.data.line);
} else {
var v = event.data.char;
if (v == 10) {
var line = buffer.splice(0);
console.error(line = line.map(function(charCode){return String.fromCharCode(charCode);}).join(''));
} else {
buffer.push(v);
}
}
}
};
</script>
</body>
</html>
""" % self.port)
html_file.close()
c_source_filename = "checksummer.c"
prejs_filename = "worker_prejs.js"
prejs_file = open(prejs_filename, 'w')
prejs_file.write(r"""
if (typeof(Module) === "undefined") Module = {};
Module["arguments"] = ["/bigfile"];
Module["preInit"] = function() {
FS.createLazyFile('/', "bigfile", "http://localhost:11111/bogus_file_path", true, false);
};
var doTrace = true;
Module["print"] = function(s) { self.postMessage({channel: "stdout", line: s}); };
Module["printErr"] = function(s) { self.postMessage({channel: "stderr", char: s, trace: ((doTrace && s === 10) ? new Error().stack : null)}); doTrace = false; };
""")
prejs_file.close()
# vs. os.path.join(self.get_dir(), filename)
# vs. path_from_root('tests', 'hello_world_gles.c')
run_process([PYTHON, EMCC, path_from_root('tests', c_source_filename), '-g', '-s', 'SMALL_XHR_CHUNKS=1', '-o', worker_filename,
'--pre-js', prejs_filename])
chunkSize = 1024
data = os.urandom(10 * chunkSize + 1) # 10 full chunks and one 1 byte chunk
checksum = zlib.adler32(data) & 0xffffffff # Python 2 compatibility: force bigint
server = multiprocessing.Process(target=test_chunked_synchronous_xhr_server, args=(True, chunkSize, data, checksum, self.port))
server.start()
self.run_browser(main, 'Chunked binary synchronous XHR in Web Workers!', '/report_result?' + str(checksum))
server.terminate()
# Avoid race condition on cleanup, wait a bit so that processes have released file locks so that test tearDown won't
# attempt to rmdir() files in use.
if WINDOWS:
time.sleep(2)
@requires_graphics_hardware
def test_glgears(self):
self.btest('hello_world_gles.c', reference='gears.png', reference_slack=3,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'], outfile='something.html',
message='You should see animating gears.')
@requires_graphics_hardware
def test_glgears_long(self):
for proxy in [0, 1]:
print('proxy', proxy)
self.btest('hello_world_gles.c', expected=list(map(str, range(30, 500))), args=['-DHAVE_BUILTIN_SINCOS', '-DLONGTEST', '-lGL', '-lglut'] + (['--proxy-to-worker'] if proxy else []), timeout=30)
@requires_graphics_hardware
def test_glgears_animation(self):
es2_suffix = ['', '_full', '_full_944']
for full_es2 in [0, 1, 2]:
print(full_es2)
run_process([PYTHON, EMCC, path_from_root('tests', 'hello_world_gles%s.c' % es2_suffix[full_es2]), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '-s', 'GL_TESTING=1', '-lGL', '-lglut',
'--shell-file', path_from_root('tests', 'hello_world_gles_shell.html')] +
(['-s', 'FULL_ES2=1'] if full_es2 else []))
self.run_browser('something.html', 'You should see animating gears.', '/report_gl_result?true')
@requires_graphics_hardware
def test_fulles2_sdlproc(self):
self.btest('full_es2_sdlproc.c', '1', args=['-s', 'GL_TESTING=1', '-DHAVE_BUILTIN_SINCOS', '-s', 'FULL_ES2=1', '-lGL', '-lSDL', '-lglut'])
@requires_graphics_hardware
def test_glgears_deriv(self):
self.btest('hello_world_gles_deriv.c', reference='gears.png', reference_slack=2,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'], outfile='something.html',
message='You should see animating gears.')
with open('something.html') as f:
assert 'gl-matrix' not in f.read(), 'Should not include glMatrix when not needed'
@requires_graphics_hardware
def test_glbook(self):
programs = self.get_library('glbook', [
os.path.join('Chapter_2', 'Hello_Triangle', 'CH02_HelloTriangle.bc'),
os.path.join('Chapter_8', 'Simple_VertexShader', 'CH08_SimpleVertexShader.bc'),
os.path.join('Chapter_9', 'Simple_Texture2D', 'CH09_SimpleTexture2D.bc'),
os.path.join('Chapter_9', 'Simple_TextureCubemap', 'CH09_TextureCubemap.bc'),
os.path.join('Chapter_9', 'TextureWrap', 'CH09_TextureWrap.bc'),
os.path.join('Chapter_10', 'MultiTexture', 'CH10_MultiTexture.bc'),
os.path.join('Chapter_13', 'ParticleSystem', 'CH13_ParticleSystem.bc'),
], configure=None)
def book_path(*pathelems):
return path_from_root('tests', 'glbook', *pathelems)
for program in programs:
print(program)
basename = os.path.basename(program)
args = ['-lGL', '-lEGL', '-lX11']
if basename == 'CH10_MultiTexture.bc':
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
args += ['--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga']
elif basename == 'CH13_ParticleSystem.bc':
shutil.copyfile(book_path('Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
args += ['--preload-file', 'smoke.tga', '-O2'] # test optimizations and closure here as well for more coverage
self.btest(program,
reference=book_path(basename.replace('.bc', '.png')),
args=args,
timeout=30)
@requires_graphics_hardware
def test_gles2_emulation(self):
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
for source, reference in [
(os.path.join('glbook', 'Chapter_2', 'Hello_Triangle', 'Hello_Triangle_orig.c'), path_from_root('tests', 'glbook', 'CH02_HelloTriangle.png')),
# (os.path.join('glbook', 'Chapter_8', 'Simple_VertexShader', 'Simple_VertexShader_orig.c'), path_from_root('tests', 'glbook', 'CH08_SimpleVertexShader.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'TextureWrap', 'TextureWrap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureWrap.png')),
# (os.path.join('glbook', 'Chapter_9', 'Simple_TextureCubemap', 'Simple_TextureCubemap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureCubemap.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'Simple_Texture2D', 'Simple_Texture2D_orig.c'), path_from_root('tests', 'glbook', 'CH09_SimpleTexture2D.png')),
(os.path.join('glbook', 'Chapter_10', 'MultiTexture', 'MultiTexture_orig.c'), path_from_root('tests', 'glbook', 'CH10_MultiTexture.png')),
(os.path.join('glbook', 'Chapter_13', 'ParticleSystem', 'ParticleSystem_orig.c'), path_from_root('tests', 'glbook', 'CH13_ParticleSystem.png')),
]:
print(source)
self.btest(source,
reference=reference,
args=['-I' + path_from_root('tests', 'glbook', 'Common'),
path_from_root('tests', 'glbook', 'Common', 'esUtil.c'),
path_from_root('tests', 'glbook', 'Common', 'esShader.c'),
path_from_root('tests', 'glbook', 'Common', 'esShapes.c'),
path_from_root('tests', 'glbook', 'Common', 'esTransform.c'),
'-s', 'FULL_ES2=1', '-lGL', '-lEGL', '-lX11',
'--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga', '--preload-file', 'smoke.tga'])
@requires_graphics_hardware
def test_clientside_vertex_arrays_es3(self):
# NOTE: Should FULL_ES3=1 imply client-side vertex arrays? The emulation needs FULL_ES2=1 for now.
self.btest('clientside_vertex_arrays_es3.c', reference='gl_triangle.png', args=['-s', 'USE_WEBGL2=1', '-s', 'FULL_ES2=1', '-s', 'FULL_ES3=1', '-s', 'USE_GLFW=3', '-lglfw', '-lGLESv2'])
def test_emscripten_api(self):
self.btest('emscripten_api_browser.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_third']''', '-lSDL'])
def test_emscripten_api2(self):
def setup():
create_test_file('script1.js', '''
Module._set(456);
''')
create_test_file('file1.txt', 'first')
create_test_file('file2.txt', 'second')
setup()
run_process([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']''', '-s', 'FORCE_FILESYSTEM=1'])
# check using file packager to another dir
self.clear()
setup()
os.mkdir('sub')
run_process([PYTHON, FILE_PACKAGER, 'sub/test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
shutil.copyfile(os.path.join('sub', 'test.data'), 'test.data')
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']''', '-s', 'FORCE_FILESYSTEM=1'])
def test_emscripten_api_infloop(self):
self.btest('emscripten_api_browser_infloop.cpp', '7')
def test_emscripten_fs_api(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png') # preloaded *after* run
self.btest('emscripten_fs_api_browser.cpp', '1', args=['-lSDL'])
def test_emscripten_fs_api2(self):
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=0"])
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=1"])
@requires_threads
def test_emscripten_main_loop(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'EXIT_RUNTIME=1']]:
self.btest('emscripten_main_loop.cpp', '0', args=args)
@requires_threads
def test_emscripten_main_loop_settimeout(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_main_loop_settimeout.cpp', '1', args=args)
@requires_threads
def test_emscripten_main_loop_and_blocker(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_main_loop_and_blocker.cpp', '0', args=args)
@requires_threads
def test_emscripten_main_loop_setimmediate(self):
for args in [[], ['--proxy-to-worker'], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_main_loop_setimmediate.cpp', '1', args=args)
def test_fs_after_main(self):
for args in [[], ['-O1']]:
self.btest('fs_after_main.cpp', '0', args=args)
def test_sdl_quit(self):
self.btest('sdl_quit.c', '1', args=['-lSDL', '-lGL'])
def test_sdl_resize(self):
self.btest('sdl_resize.c', '1', args=['-lSDL', '-lGL'])
def test_glshaderinfo(self):
self.btest('glshaderinfo.cpp', '1', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_glgetattachedshaders(self):
self.btest('glgetattachedshaders.c', '1', args=['-lGL', '-lEGL'])
# Covered by dEQP text suite (we can remove it later if we add coverage for that).
@requires_graphics_hardware
def test_glframebufferattachmentinfo(self):
self.btest('glframebufferattachmentinfo.c', '1', args=['-lGLESv2', '-lEGL'])
@requires_graphics_hardware
def test_sdlglshader(self):
self.btest('sdlglshader.c', reference='sdlglshader.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_sdlglshader2(self):
self.btest('sdlglshader2.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_gl_glteximage(self):
self.btest('gl_teximage.c', '1', args=['-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_threads
def test_gl_textures(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('gl_textures.cpp', '0', args=['-lGL'])
@requires_graphics_hardware
def test_gl_ps(self):
# pointers and a shader
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_packed(self):
# packed data that needs to be strided
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_packed.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_strides(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_strides.c', reference='gl_ps_strides.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_gl_ps_worker(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_worker.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1, also_proxied=True)
@requires_graphics_hardware
def test_gl_renderers(self):
self.btest('gl_renderers.c', reference='gl_renderers.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_stride(self):
self.btest('gl_stride.c', reference='gl_stride.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer_pre(self):
self.btest('gl_vertex_buffer_pre.c', reference='gl_vertex_buffer_pre.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer(self):
self.btest('gl_vertex_buffer.c', reference='gl_vertex_buffer.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], reference_slack=1)
@requires_graphics_hardware
def test_gles2_uniform_arrays(self):
self.btest('gles2_uniform_arrays.cpp', args=['-s', 'GL_ASSERTIONS=1', '-lGL', '-lSDL'], expected=['1'], also_proxied=True)
@requires_graphics_hardware
def test_gles2_conformance(self):
self.btest('gles2_conformance.cpp', args=['-s', 'GL_ASSERTIONS=1', '-lGL', '-lSDL'], expected=['1'])
@requires_graphics_hardware
def test_matrix_identity(self):
self.btest('gl_matrix_identity.c', expected=['-1882984448', '460451840', '1588195328'], args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre(self):
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_regal(self):
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'USE_REGAL=1', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_sync_compilation
def test_cubegeom_pre_relocatable(self):
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-s', 'RELOCATABLE=1'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2(self):
self.btest('cubegeom_pre2.c', reference='cubegeom_pre2.png', args=['-s', 'GL_DEBUG=1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL']) # some coverage for GL_DEBUG not breaking the build
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre3(self):
self.btest('cubegeom_pre3.c', reference='cubegeom_pre2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom(self):
self.btest('cubegeom.c', reference='cubegeom.png', args=['-O2', '-g', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_regal(self):
self.btest('cubegeom.c', reference='cubegeom.png', args=['-O2', '-g', '-DUSE_REGAL', '-s', 'USE_REGAL=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_proc(self):
create_test_file('side.c', r'''
extern void* SDL_GL_GetProcAddress(const char *);
void *glBindBuffer = 0; // same name as the gl function, to check that the collision does not break us
void *getBindBuffer() {
if (!glBindBuffer) glBindBuffer = SDL_GL_GetProcAddress("glBindBuffer");
return glBindBuffer;
}
''')
# also test -Os in wasm, which uses meta-dce, which should not break legacy gl emulation hacks
for opts in [[], ['-O1'], ['-Os', '-s', 'WASM=1']]:
self.btest('cubegeom_proc.c', reference='cubegeom.png', args=opts + ['side.c', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_glew(self):
self.btest('cubegeom_glew.c', reference='cubegeom.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lGLEW', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_color(self):
self.btest('cubegeom_color.c', reference='cubegeom_color.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal(self):
self.btest('cubegeom_normal.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap(self): # draw is given a direct pointer to clientside memory, no element array buffer
self.btest('cubegeom_normal_dap.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap_far(self): # indices do nto start from 0
self.btest('cubegeom_normal_dap_far.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_range(self): # glDrawRangeElements
self.btest('cubegeom_normal_dap_far_range.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_glda(self): # use glDrawArrays
self.btest('cubegeom_normal_dap_far_glda.c', reference='cubegeom_normal_dap_far_glda.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_glda_quad(self): # with quad
self.btest('cubegeom_normal_dap_far_glda_quad.c', reference='cubegeom_normal_dap_far_glda_quad.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_mt(self):
self.btest('cubegeom_mt.c', reference='cubegeom_mt.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL']) # multitexture
@requires_graphics_hardware
def test_cubegeom_color2(self):
self.btest('cubegeom_color2.c', reference='cubegeom_color2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_texturematrix(self):
self.btest('cubegeom_texturematrix.c', reference='cubegeom_texturematrix.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_fog(self):
self.btest('cubegeom_fog.c', reference='cubegeom_fog.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao(self):
self.btest('cubegeom_pre_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_regal(self):
self.btest('cubegeom_pre_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'USE_REGAL=1', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2_vao(self):
self.btest('cubegeom_pre2_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_pre2_vao2(self):
self.btest('cubegeom_pre2_vao2.c', reference='cubegeom_pre2_vao2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_es(self):
self.btest('cubegeom_pre_vao_es.c', reference='cubegeom_pre_vao.png', args=['-s', 'FULL_ES2=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_u4fv_2(self):
self.btest('cubegeom_u4fv_2.c', reference='cubegeom_u4fv_2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cube_explosion(self):
self.btest('cube_explosion.c', reference='cube_explosion.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_glgettexenv(self):
self.btest('glgettexenv.c', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], expected=['1'])
def test_sdl_canvas_blank(self):
self.btest('sdl_canvas_blank.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_blank.png')
def test_sdl_canvas_palette(self):
self.btest('sdl_canvas_palette.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_palette.png')
def test_sdl_canvas_twice(self):
self.btest('sdl_canvas_twice.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_twice.png')
def test_sdl_set_clip_rect(self):
self.btest('sdl_set_clip_rect.c', args=['-lSDL', '-lGL'], reference='sdl_set_clip_rect.png')
def test_sdl_maprgba(self):
self.btest('sdl_maprgba.c', args=['-lSDL', '-lGL'], reference='sdl_maprgba.png', reference_slack=3)
def test_sdl_create_rgb_surface_from(self):
self.btest('sdl_create_rgb_surface_from.c', args=['-lSDL', '-lGL'], reference='sdl_create_rgb_surface_from.png')
def test_sdl_rotozoom(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_rotozoom.c', reference='sdl_rotozoom.png', args=['--preload-file', 'screenshot.png', '--use-preload-plugins', '-lSDL', '-lGL'], reference_slack=3)
def test_sdl_gfx_primitives(self):
self.btest('sdl_gfx_primitives.c', args=['-lSDL', '-lGL'], reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl_canvas_palette_2(self):
create_test_file('pre.js', '''
Module['preRun'].push(function() {
SDL.defaults.copyOnLock = false;
});
''')
create_test_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_test_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_test_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-r.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-g.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-b.js', '-lSDL', '-lGL'])
def test_sdl_ttf_render_text_solid(self):
self.btest('sdl_ttf_render_text_solid.c', reference='sdl_ttf_render_text_solid.png', args=['-O2', '-s', 'TOTAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_alloctext(self):
self.btest('sdl_alloctext.c', expected='1', args=['-O2', '-s', 'TOTAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_surface_refcount(self):
self.btest('sdl_surface_refcount.c', args=['-lSDL'], expected='1')
def test_sdl_free_screen(self):
self.btest('sdl_free_screen.cpp', args=['-lSDL', '-lGL'], reference='htmltest.png')
@requires_graphics_hardware
def test_glbegin_points(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('glbegin_points.c', reference='glbegin_points.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_s3tc(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_s3tc_ffp_only(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1', '-s', 'GL_FFP_ONLY=1', '-lGL', '-lSDL'])
@no_chrome('see #7117')
@requires_graphics_hardware
def test_aniso(self):
if SPIDERMONKEY_ENGINE in JS_ENGINES:
# asm.js-ification check
run_process([PYTHON, EMCC, path_from_root('tests', 'aniso.c'), '-O2', '-g2', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-Wno-incompatible-pointer-types'])
self.set_setting('ASM_JS', 1)
self.run_generated_code(SPIDERMONKEY_ENGINE, 'a.out.js', assert_returncode=None)
print('passed asm test')
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
self.btest('aniso.c', reference='aniso.png', reference_slack=2, args=['--preload-file', 'water.dds', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-Wno-incompatible-pointer-types'])
@requires_graphics_hardware
def test_tex_nonbyte(self):
self.btest('tex_nonbyte.c', reference='tex_nonbyte.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_float_tex(self):
self.btest('float_tex.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_subdata(self):
self.btest('gl_subdata.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_perspective(self):
self.btest('perspective.c', reference='perspective.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_glerror(self):
self.btest('gl_error.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL'])
def test_openal_error(self):
for args in [[], ['--closure', '1']]:
print(args)
self.btest('openal_error.c', expected='1', args=args)
def test_openal_capture_sanity(self):
self.btest('openal_capture_sanity.c', expected='0')
@no_wasm_backend('dynamic linking')
def test_runtimelink(self):
for wasm in [0, 1]:
print(wasm)
main, supp = self.setup_runtimelink_test()
create_test_file('supp.cpp', supp)
run_process([PYTHON, EMCC, 'supp.cpp', '-o', 'supp.' + ('wasm' if wasm else 'js'), '-s', 'SIDE_MODULE=1', '-O2', '-s', 'WASM=%d' % wasm, '-s', 'EXPORT_ALL=1'])
self.btest(main, args=['-DBROWSER=1', '-s', 'MAIN_MODULE=1', '-O2', '-s', 'WASM=%d' % wasm, '-s', 'RUNTIME_LINKED_LIBS=["supp.' + ('wasm' if wasm else 'js') + '"]', '-s', 'EXPORT_ALL=1'], expected='76')
def test_pre_run_deps(self):
# Adding a dependency in preRun will delay run
create_test_file('pre.js', '''
Module.preRun = function() {
addRunDependency();
out('preRun called, added a dependency...');
setTimeout(function() {
Module.okk = 10;
removeRunDependency()
}, 2000);
};
''')
for mem in [0, 1]:
self.btest('pre_run_deps.cpp', expected='10', args=['--pre-js', 'pre.js', '--memory-init-file', str(mem)])
@no_wasm_backend('mem init file')
def test_mem_init(self):
create_test_file('pre.js', '''
function myJSCallback() { // called from main()
Module._note(1);
}
Module.preRun = function() {
addOnPreMain(function() {
Module._note(2);
});
};
''')
create_test_file('post.js', '''
var assert = function(check, text) {
if (!check) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?9');
xhr.onload = function() {
window.close();
};
xhr.send();
}
}
Module._note(4); // this happens too early! and is overwritten when the mem init arrives
''' % self.port)
# with assertions, we notice when memory was written to too early
self.btest('mem_init.cpp', expected='9', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1'])
# otherwise, we just overwrite
self.btest('mem_init.cpp', expected='3', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1', '-s', 'ASSERTIONS=0'])
@no_wasm_backend('mem init file')
def test_mem_init_request(self):
def test(what, status):
print(what, status)
create_test_file('pre.js', '''
var xhr = Module.memoryInitializerRequest = new XMLHttpRequest();
xhr.open('GET', "''' + what + '''", true);
xhr.responseType = 'arraybuffer';
xhr.send(null);
console.warn = function(x) {
if (x.indexOf('a problem seems to have happened with Module.memoryInitializerRequest') >= 0) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?0');
setTimeout(xhr.onload = function() {
console.log('close!');
window.close();
}, 1000);
xhr.send();
throw 'halt';
}
console.log('WARNING: ' + x);
};
''' % self.port)
self.btest('mem_init_request.cpp', expected=status, args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--memory-init-file', '1'])
test('test.html.mem', '1')
test('nothing.nowhere', '0')
def test_runtime_misuse(self):
post_prep = '''
var expected_ok = false;
function doCcall(n) {
ccall('note', 'string', ['number'], [n]);
}
var wrapped = cwrap('note', 'string', ['number']); // returns a string to suppress cwrap optimization
function doCwrapCall(n) {
var str = wrapped(n);
out('got ' + str);
assert(str === 'silly-string');
}
function doDirectCall(n) {
Module['_note'](n);
}
'''
post_test = '''
var ok = false;
try {
doCcall(1);
ok = true; // should fail and not reach here, runtime is not ready yet so ccall will abort
} catch(e) {
out('expected fail 1');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doCwrapCall(2);
ok = true; // should fail and not reach here, runtime is not ready yet so cwrap call will abort
} catch(e) {
out('expected fail 2');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doDirectCall(3);
ok = true; // should fail and not reach here, runtime is not ready yet so any code execution
} catch(e) {
out('expected fail 3');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
'''
post_hook = r'''
function myJSCallback() {
// called from main, this is an ok time
doCcall(100);
doCwrapCall(200);
doDirectCall(300);
}
setTimeout(function() {
var xhr = new XMLHttpRequest();
assert(Module.noted);
xhr.open('GET', 'http://localhost:%s/report_result?' + HEAP32[Module.noted>>2]);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}, 1000);
''' % self.port
create_test_file('pre_runtime.js', r'''
Module.onRuntimeInitialized = function(){
myJSCallback();
};
''')
for filename, extra_args, second_code in [
('runtime_misuse.cpp', [], 600),
('runtime_misuse_2.cpp', ['--pre-js', 'pre_runtime.js'], 601) # 601, because no main means we *do* run another call after exit()
]:
for mode in [['-s', 'WASM=0'], ['-s', 'WASM=1']]:
if 'WASM=0' in mode and self.is_wasm_backend():
continue
print('\n', filename, extra_args, mode)
print('mem init, so async, call too early')
create_test_file('post.js', post_prep + post_test + post_hook)
self.btest(filename, expected='600', args=['--post-js', 'post.js', '--memory-init-file', '1', '-s', 'EXIT_RUNTIME=1'] + extra_args + mode)
print('sync startup, call too late')
create_test_file('post.js', post_prep + 'Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected=str(second_code), args=['--post-js', 'post.js', '--memory-init-file', '0', '-s', 'EXIT_RUNTIME=1'] + extra_args + mode)
print('sync, runtime still alive, so all good')
create_test_file('post.js', post_prep + 'expected_ok = true; Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected='606', args=['--post-js', 'post.js', '--memory-init-file', '0'] + extra_args + mode)
def test_cwrap_early(self):
self.btest(os.path.join('browser', 'cwrap_early.cpp'), args=['-O2', '-s', 'ASSERTIONS=1', '--pre-js', path_from_root('tests', 'browser', 'cwrap_early.js'), '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["cwrap"]'], expected='0')
def test_worker_api(self):
run_process([PYTHON, EMCC, path_from_root('tests', 'worker_api_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]'])
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_2(self):
run_process([PYTHON, EMCC, path_from_root('tests', 'worker_api_2_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-O2', '--minify', '0', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two", "_three", "_four"]', '--closure', '1'])
self.btest('worker_api_2_main.cpp', args=['-O2', '--minify', '0'], expected='11')
def test_worker_api_3(self):
run_process([PYTHON, EMCC, path_from_root('tests', 'worker_api_3_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]'])
self.btest('worker_api_3_main.cpp', expected='5')
@no_wasm_backend('emterpretify')
def test_worker_api_sleep(self):
run_process([PYTHON, EMCC, path_from_root('tests', 'worker_api_worker_sleep.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1'])
self.btest('worker_api_main.cpp', expected='566')
def test_emscripten_async_wget2(self):
self.btest('http.cpp', expected='0', args=['-I' + path_from_root('tests')])
# TODO: test only worked in non-fastcomp
@unittest.skip('non-fastcomp is deprecated and fails in 3.5')
def test_module(self):
run_process([PYTHON, EMCC, path_from_root('tests', 'browser_module.cpp'), '-o', 'module.js', '-O2', '-s', 'SIDE_MODULE=1', '-s', 'DLOPEN_SUPPORT=1', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two"]'])
self.btest('browser_main.cpp', args=['-O2', '-s', 'MAIN_MODULE=1', '-s', 'DLOPEN_SUPPORT=1', '-s', 'EXPORT_ALL=1'], expected='8')
@no_wasm_backend('dynamic linking')
def test_preload_module(self):
create_test_file('library.c', r'''
#include <stdio.h>
int library_func() {
return 42;
}
''')
run_process([PYTHON, EMCC, 'library.c', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'library.wasm', '-s', 'WASM=1', '-s', 'EXPORT_ALL=1'])
os.rename('library.wasm', 'library.so')
main = r'''
#include <dlfcn.h>
#include <stdio.h>
#include <emscripten.h>
int main() {
int found = EM_ASM_INT(
return Module['preloadedWasm']['/library.so'] !== undefined;
);
if (!found) {
REPORT_RESULT(1);
return 1;
}
void *lib_handle = dlopen("/library.so", 0);
if (!lib_handle) {
REPORT_RESULT(2);
return 2;
}
typedef int (*voidfunc)();
voidfunc x = (voidfunc)dlsym(lib_handle, "library_func");
if (!x || x() != 42) {
REPORT_RESULT(3);
return 3;
}
REPORT_RESULT(0);
return 0;
}
'''
self.btest(
main,
args=['-s', 'MAIN_MODULE=1', '--preload-file', '.@/', '-O2', '-s', 'WASM=1', '--use-preload-plugins', '-s', 'EXPORT_ALL=1'],
expected='0')
def test_mmap_file(self):
create_test_file('data.dat', 'data from the file ' + ('.' * 9000))
for extra_args in [[], ['--no-heap-copy']]:
self.btest(path_from_root('tests', 'mmap_file.c'), expected='1', args=['--preload-file', 'data.dat'] + extra_args)
def test_emrun_info(self):
if not has_browser():
self.skipTest('need a browser')
result = run_process([PYTHON, path_from_root('emrun'), '--system_info', '--browser_info'], stdout=PIPE).stdout
assert 'CPU' in result
assert 'Browser' in result
assert 'Traceback' not in result
result = run_process([PYTHON, path_from_root('emrun'), '--list_browsers'], stdout=PIPE).stdout
assert 'Traceback' not in result
# Deliberately named as test_zzz_emrun to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
def test_zzz_emrun(self):
run_process([PYTHON, EMCC, path_from_root('tests', 'test_emrun.c'), '--emrun', '-o', 'hello_world.html'])
outdir = os.getcwd()
if not has_browser():
self.skipTest('need a browser')
# We cannot run emrun from the temp directory the suite will clean up afterwards, since the browser that is launched will have that directory as startup directory,
# and the browser will not close as part of the test, pinning down the cwd on Windows and it wouldn't be possible to delete it. Therefore switch away from that directory
# before launching.
os.chdir(path_from_root())
args = [PYTHON, path_from_root('emrun'), '--timeout', '30', '--safe_firefox_profile', '--port', '6939', '--verbose', '--log_stdout', os.path.join(outdir, 'stdout.txt'), '--log_stderr', os.path.join(outdir, 'stderr.txt')]
if EMTEST_BROWSER is not None:
# If EMTEST_BROWSER carried command line arguments to pass to the browser,
# (e.g. "firefox -profile /path/to/foo") those can't be passed via emrun,
# so strip them out.
browser_cmd = shlex.split(EMTEST_BROWSER)
browser_path = browser_cmd[0]
args += ['--browser', browser_path]
if len(browser_cmd) > 1:
browser_args = browser_cmd[1:]
if 'firefox' in browser_path and '-profile' in browser_args:
# emrun uses its own -profile, strip it out
parser = argparse.ArgumentParser(add_help=False) # otherwise it throws with -headless
parser.add_argument('-profile')
browser_args = parser.parse_known_args(browser_args)[1]
if browser_args:
args += ['--browser_args', ' ' + ' '.join(browser_args)]
args += [os.path.join(outdir, 'hello_world.html'), '1', '2', '--3']
proc = run_process(args, check=False)
stdout = open(os.path.join(outdir, 'stdout.txt'), 'r').read()
stderr = open(os.path.join(outdir, 'stderr.txt'), 'r').read()
assert proc.returncode == 100
assert 'argc: 4' in stdout
assert 'argv[3]: --3' in stdout
assert 'hello, world!' in stdout
assert 'Testing ASCII characters: !"$%&\'()*+,-./:;<=>?@[\\]^_`{|}~' in stdout
assert 'Testing char sequences: %20%21 ä' in stdout
assert 'hello, error stream!' in stderr
# This does not actually verify anything except that --cpuprofiler and --memoryprofiler compiles.
# Run interactive.test_cpuprofiler_memoryprofiler for interactive testing.
@requires_graphics_hardware
def test_cpuprofiler_memoryprofiler(self):
self.btest('hello_world_gles.c', expected='0', args=['-DLONGTEST=1', '-DTEST_MEMORYPROFILER_ALLOCATIONS_MAP=1', '-O2', '--cpuprofiler', '--memoryprofiler', '-lGL', '-lglut'], timeout=30)
def test_uuid(self):
# Run with ./runner.py browser.test_uuid
# We run this test in Node/SPIDERMONKEY and browser environments because we try to make use of
# high quality crypto random number generators such as crypto.getRandomValues or randomBytes (if available).
# First run tests in Node and/or SPIDERMONKEY using run_js. Use closure compiler so we can check that
# require('crypto').randomBytes and window.crypto.getRandomValues doesn't get minified out.
run_process([PYTHON, EMCC, '-O2', '--closure', '1', path_from_root('tests', 'uuid', 'test.c'), '-o', 'test.js', '-luuid'], stdout=PIPE, stderr=PIPE)
test_js_closure = open('test.js').read()
# Check that test.js compiled with --closure 1 contains ").randomBytes" and "window.crypto.getRandomValues"
assert ").randomBytes" in test_js_closure
assert "window.crypto.getRandomValues" in test_js_closure
out = run_js('test.js', full_output=True)
print(out)
# Tidy up files that might have been created by this test.
try_delete(path_from_root('tests', 'uuid', 'test.js'))
try_delete(path_from_root('tests', 'uuid', 'test.js.map'))
# Now run test in browser
self.btest(path_from_root('tests', 'uuid', 'test.c'), '1', args=['-luuid'])
@requires_graphics_hardware
def test_glew(self):
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION=1'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-DGLEW_MX'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION=1', '-DGLEW_MX'], expected='1')
def test_doublestart_bug(self):
create_test_file('pre.js', r'''
if (!Module['preRun']) Module['preRun'] = [];
Module["preRun"].push(function () {
addRunDependency('test_run_dependency');
removeRunDependency('test_run_dependency');
});
''')
self.btest('doublestart.c', args=['--pre-js', 'pre.js', '-o', 'test.html'], expected='1')
@no_chrome('see #7930')
@requires_threads
def test_html5(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
print(opts)
self.btest(path_from_root('tests', 'test_html5.c'), args=['-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1'] + opts, expected='0', timeout=20)
@requires_threads
def test_html5_gamepad(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
print(opts)
self.btest(path_from_root('tests', 'test_gamepad.c'), args=['-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1'] + opts, expected='0', timeout=20)
@requires_graphics_hardware
def test_html5_webgl_create_context_no_antialias(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_create_context.cpp'), args=opts + ['-DNO_ANTIALIAS', '-lGL'], expected='0', timeout=20)
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_threads
@requires_graphics_hardware
def test_html5_webgl_create_context(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1'], ['-s', 'USE_PTHREADS=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_create_context.cpp'), args=opts + ['-lGL'], expected='0', timeout=20)
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
def test_html5_webgl_create_context2(self):
self.btest(path_from_root('tests', 'webgl_create_context2.cpp'), args=['--shell-file', path_from_root('tests', 'webgl_create_context2_shell.html'), '-lGL'], expected='0', timeout=20)
@requires_graphics_hardware
def test_html5_webgl_destroy_context(self):
for opts in [[], ['-O2', '-g1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_destroy_context.cpp'), args=opts + ['-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1', '--shell-file', path_from_root('tests/webgl_destroy_context_shell.html'), '-lGL'], expected='0', timeout=20)
@no_chrome('see #7373')
@requires_graphics_hardware
def test_webgl_context_params(self):
if WINDOWS:
self.skipTest('SKIPPED due to bug https://bugzilla.mozilla.org/show_bug.cgi?id=1310005 - WebGL implementation advertises implementation defined GL_IMPLEMENTATION_COLOR_READ_TYPE/FORMAT pair that it cannot read with')
self.btest(path_from_root('tests', 'webgl_color_buffer_readpixels.cpp'), args=['-lGL'], expected='0', timeout=20)
# Test for PR#5373 (https://github.com/emscripten-core/emscripten/pull/5373)
def test_webgl_shader_source_length(self):
for opts in [[], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_shader_source_length.cpp'), args=opts + ['-lGL'], expected='0', timeout=20)
def test_webgl2(self):
for opts in [[], ['-O2', '-g1', '--closure', '1', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl2.cpp'), args=['-s', 'USE_WEBGL2=1', '-lGL'] + opts, expected='0')
def test_webgl2_objects(self):
self.btest(path_from_root('tests', 'webgl2_objects.cpp'), args=['-s', 'USE_WEBGL2=1', '-lGL'], expected='0')
def test_webgl2_ubos(self):
self.btest(path_from_root('tests', 'webgl2_ubos.cpp'), args=['-s', 'USE_WEBGL2=1', '-lGL'], expected='0')
@requires_graphics_hardware
def test_webgl2_garbage_free_entrypoints(self):
self.btest(path_from_root('tests', 'webgl2_garbage_free_entrypoints.cpp'), args=['-s', 'USE_WEBGL2=1', '-DTEST_WEBGL2=1'], expected='1')
self.btest(path_from_root('tests', 'webgl2_garbage_free_entrypoints.cpp'), expected='1')
@requires_graphics_hardware
def test_webgl2_backwards_compatibility_emulation(self):
self.btest(path_from_root('tests', 'webgl2_backwards_compatibility_emulation.cpp'), args=['-s', 'USE_WEBGL2=1', '-s', 'WEBGL2_BACKWARDS_COMPATIBILITY_EMULATION=1'], expected='0')
@requires_graphics_hardware
def test_webgl_with_closure(self):
self.btest(path_from_root('tests', 'webgl_with_closure.cpp'), args=['-O2', '-s', 'USE_WEBGL2=1', '--closure', '1', '-lGL'], expected='0')
# Tests that -s GL_ASSERTIONS=1 and glVertexAttribPointer with packed types works
@requires_graphics_hardware
def test_webgl2_packed_types(self):
self.btest(path_from_root('tests', 'webgl2_draw_packed_triangle.c'), args=['-lGL', '-s', 'USE_WEBGL2=1', '-s', 'GL_ASSERTIONS=1'], expected='0')
@requires_graphics_hardware
def test_webgl2_pbo(self):
self.btest(path_from_root('tests', 'webgl2_pbo.cpp'), args=['-s', 'USE_WEBGL2=1', '-lGL'], expected='0')
def test_sdl_touch(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'sdl_touch.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_html5_mouse(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_html5_mouse.c'), args=opts + ['-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1', '-DAUTOMATE_SUCCESS=1'], expected='0')
def test_sdl_mousewheel(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_sdl_mousewheel.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
@no_wasm_backend('asm.js-specific')
def test_codemods(self):
# tests asm.js client-side code modifications
for opt_level in [0, 2]:
print('opt level', opt_level)
opts = ['-O' + str(opt_level), '-s', 'WASM=0']
# sanity checks, building with and without precise float semantics generates different results
self.btest(path_from_root('tests', 'codemods.cpp'), expected='2', args=opts)
self.btest(path_from_root('tests', 'codemods.cpp'), expected='1', args=opts + ['-s', 'PRECISE_F32=1'])
self.btest(path_from_root('tests', 'codemods.cpp'), expected='1', args=opts + ['-s', 'PRECISE_F32=2', '--separate-asm']) # empty polyfill, but browser has support, so semantics are like float
@no_wasm_backend('emterpretify')
def test_wget(self):
with open('test.txt', 'w') as f:
f.write('emscripten')
self.btest(path_from_root('tests', 'test_wget.c'), expected='1', args=['-s', 'ASYNCIFY=1'])
print('asyncify+emterpreter')
self.btest(path_from_root('tests', 'test_wget.c'), expected='1', args=['-s', 'ASYNCIFY=1', '-s', 'EMTERPRETIFY=1'])
print('emterpreter by itself')
self.btest(path_from_root('tests', 'test_wget.c'), expected='1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1'])
@no_wasm_backend('emterpretify')
def test_wget_data(self):
with open('test.txt', 'w') as f:
f.write('emscripten')
self.btest(path_from_root('tests', 'test_wget_data.c'), expected='1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O2', '-g2'])
self.btest(path_from_root('tests', 'test_wget_data.c'), expected='1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O2', '-g2', '-s', 'ASSERTIONS=1'])
def test_locate_file(self):
for wasm in ([0, 1] if not self.is_wasm_backend() else [1]):
print('wasm', wasm)
self.clear()
create_test_file('src.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
int main() {
FILE *f = fopen("data.txt", "r");
assert(f && "could not open file");
char buf[100];
int num = fread(buf, 1, 20, f);
assert(num == 20 && "could not read 20 bytes");
buf[20] = 0;
fclose(f);
int result = !strcmp("load me right before", buf);
printf("|%s| : %d\n", buf, result);
REPORT_RESULT(result);
return 0;
}
'''))
create_test_file('data.txt', 'load me right before...')
create_test_file('pre.js', 'Module.locateFile = function(x) { return "sub/" + x };')
run_process([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'data.txt'], stdout=open('data.js', 'w'))
# put pre.js first, then the file packager data, so locateFile is there for the file loading code
run_process([PYTHON, EMCC, 'src.cpp', '-O2', '-g', '--pre-js', 'pre.js', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM=1', '-s', 'WASM=' + str(wasm)])
os.mkdir('sub')
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
shutil.move('test.data', os.path.join('sub', 'test.data'))
self.run_browser('page.html', None, '/report_result?1')
# alternatively, put locateFile in the HTML
print('in html')
create_test_file('shell.html', '''
<body>
<script>
var Module = {
locateFile: function(x) { return "sub/" + x }
};
</script>
{{{ SCRIPT }}}
</body>
''')
def in_html(expected, args=[]):
run_process([PYTHON, EMCC, 'src.cpp', '-O2', '-g', '--shell-file', 'shell.html', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'SAFE_HEAP=1', '-s', 'ASSERTIONS=1', '-s', 'FORCE_FILESYSTEM=1', '-s', 'WASM=' + str(wasm)] + args)
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
self.run_browser('page.html', None, '/report_result?' + expected)
in_html('1')
# verify that the mem init request succeeded in the latter case
if not wasm:
create_test_file('src.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
int result = EM_ASM_INT({
return Module['memoryInitializerRequest'].status;
});
printf("memory init request: %d\n", result);
REPORT_RESULT(result);
return 0;
}
'''))
in_html('200')
@requires_graphics_hardware
def test_glfw3(self):
for opts in [[], ['-Os', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'glfw3.c'), args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'USE_GLFW=3', '-lglfw', '-lGL'] + opts, expected='1')
@requires_graphics_hardware
def test_glfw_events(self):
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=2', "-DUSE_GLFW=2", '-lglfw', '-lGL'], expected='1')
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=3', "-DUSE_GLFW=3", '-lglfw', '-lGL'], expected='1')
@no_wasm_backend('asm.js')
def test_asm_swapping(self):
self.clear()
create_test_file('run.js', r'''
Module['onRuntimeInitialized'] = function() {
// test proper initial result
var result = Module._func();
console.log('first: ' + result);
if (result !== 10) throw 'bad first result';
// load second module to be swapped in
var second = document.createElement('script');
second.onload = function() { console.log('loaded second') };
second.src = 'second.js';
document.body.appendChild(second);
console.log('second appended');
Module['onAsmSwap'] = function() {
console.log('swapped');
// verify swapped-in result
var result = Module._func();
console.log('second: ' + result);
if (result !== 22) throw 'bad second result';
Module._report(999);
console.log('reported');
};
};
''')
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2']]:
print(opts)
opts += ['-s', 'WASM=0', '--pre-js', 'run.js', '-s', 'SWAPPABLE_ASM_MODULE=1'] # important that both modules are built with the same opts
create_test_file('second.cpp', self.with_report_result(open(path_from_root('tests', 'asm_swap2.cpp')).read()))
run_process([PYTHON, EMCC, 'second.cpp'] + opts)
run_process([PYTHON, path_from_root('tools', 'distill_asm.py'), 'a.out.js', 'second.js', 'swap-in'])
self.assertExists('second.js')
if SPIDERMONKEY_ENGINE in JS_ENGINES:
out = run_js('second.js', engine=SPIDERMONKEY_ENGINE, stderr=PIPE, full_output=True, assert_returncode=None)
self.validate_asmjs(out)
else:
print('Skipping asm validation check, spidermonkey is not configured')
self.btest(path_from_root('tests', 'asm_swap.cpp'), args=opts, expected='999')
@requires_graphics_hardware
def test_sdl2_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
create_test_file('sdl2_image.c', self.with_report_result(open(path_from_root('tests', 'sdl2_image.c')).read()))
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
run_process([
PYTHON, EMCC, 'sdl2_image.c', '-o', 'page.html', '-O2', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpeg')
create_test_file('sdl2_image_jpeg.c', self.with_report_result(open(path_from_root('tests', 'sdl2_image.c')).read()))
run_process([
PYTHON, EMCC, 'sdl2_image_jpeg.c', '-o', 'page.html',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_formats(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_image.c', expected='512', args=['--preload-file', 'screenshot.png', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.png"',
'-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["png"]'])
def test_sdl2_key(self):
for defines in [[]]:
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
var prevented = !document.dispatchEvent(event);
//send keypress if not prevented
if (!prevented) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
create_test_file('sdl2_key.c', self.with_report_result(open(path_from_root('tests', 'sdl2_key.c')).read()))
run_process([PYTHON, EMCC, 'sdl2_key.c', '-o', 'page.html'] + defines + ['-s', 'USE_SDL=2', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']'''])
self.run_browser('page.html', '', '/report_result?37182145')
def test_sdl2_text(self):
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
create_test_file('sdl2_text.c', self.with_report_result(open(path_from_root('tests', 'sdl2_text.c')).read()))
run_process([PYTHON, EMCC, 'sdl2_text.c', '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@flaky
@requires_graphics_hardware
def test_sdl2_mouse(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('sdl2_mouse.c', self.with_report_result(open(path_from_root('tests', 'sdl2_mouse.c')).read()))
run_process([PYTHON, EMCC, 'sdl2_mouse.c', '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1', timeout=30)
@requires_graphics_hardware
def test_sdl2_mouse_offsets(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl2_mouse.js"></script>
</body>
</html>
''')
create_test_file('sdl2_mouse.c', self.with_report_result(open(path_from_root('tests', 'sdl2_mouse.c')).read()))
run_process([PYTHON, EMCC, 'sdl2_mouse.c', '-DTEST_SDL_MOUSE_OFFSETS=1', '-O2', '--minify', '0', '-o', 'sdl2_mouse.js', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2glshader(self):
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '--closure', '1', '-g1', '-s', 'LEGACY_GL_EMULATION=1'])
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True) # XXX closure fails on proxy
@requires_graphics_hardware
def test_sdl2_canvas_blank(self):
self.btest('sdl2_canvas_blank.c', reference='sdl_canvas_blank.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_palette(self):
self.btest('sdl2_canvas_palette.c', reference='sdl_canvas_palette.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_twice(self):
self.btest('sdl2_canvas_twice.c', reference='sdl_canvas_twice.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gfx(self):
self.btest('sdl2_gfx.cpp', args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_GFX=2'], reference='sdl2_gfx.png', reference_slack=2)
@requires_graphics_hardware
def test_sdl2_canvas_palette_2(self):
create_test_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_test_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_test_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-r.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-g.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-b.js'])
def test_sdl2_swsurface(self):
self.btest('sdl2_swsurface.c', expected='1', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_canvas_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
create_test_file('test.html', html)
create_test_file('data.txt', 'datum')
self.btest('sdl2_canvas_proxy.c', reference='sdl2_canvas.png', args=['-s', 'USE_SDL=2', '--proxy-to-worker', '--preload-file', 'data.txt', '-s', 'GL_TESTING=1'], manual_reference=True, post_build=post)
def test_sdl2_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_test_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest('sdl2_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
def test_sdl2_timer(self):
self.btest('sdl2_timer.c', expected='5', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_size(self):
self.btest('sdl2_canvas_size.c', expected='1', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_read(self):
# SDL, OpenGL, readPixels
create_test_file('sdl2_gl_read.c', self.with_report_result(open(path_from_root('tests', 'sdl2_gl_read.c')).read()))
run_process([PYTHON, EMCC, 'sdl2_gl_read.c', '-o', 'something.html', '-s', 'USE_SDL=2'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_negative.c', reference='screenshot-fog-negative.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_density.c', reference='screenshot-fog-density.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_unwasteful(self):
self.btest('sdl2_unwasteful.cpp', expected='1', args=['-s', 'USE_SDL=2', '-O1'])
def test_sdl2_canvas_write(self):
self.btest('sdl2_canvas_write.cpp', expected='0', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_frames_swap(self):
def post_build(*args):
self.post_manual_reftest(*args)
html = open('test.html').read()
html2 = html.replace('''Module['postRun'] = doReftest;''', '') # we don't want the very first frame
assert html != html2
create_test_file('test.html', html2)
self.btest('sdl2_gl_frames_swap.c', reference='sdl2_gl_frames_swap.png', args=['--proxy-to-worker', '-s', 'GL_TESTING=1', '-s', 'USE_SDL=2'], manual_reference=True, post_build=post_build)
@requires_graphics_hardware
def test_sdl2_ttf(self):
shutil.copy2(path_from_root('tests', 'freetype', 'LiberationSansBold.ttf'), self.get_dir())
self.btest('sdl2_ttf.c', reference='sdl2_ttf.png',
args=['-O2', '-s', 'USE_SDL=2', '-s', 'USE_SDL_TTF=2', '--embed-file', 'LiberationSansBold.ttf'],
message='You should see colorful "hello" and "world" in the window',
timeout=30)
def test_sdl2_custom_cursor(self):
shutil.copyfile(path_from_root('tests', 'cursor.bmp'), 'cursor.bmp')
self.btest('sdl2_custom_cursor.c', expected='1', args=['--preload-file', 'cursor.bmp', '-s', 'USE_SDL=2'])
def test_sdl2_misc(self):
self.btest('sdl2_misc.c', expected='1', args=['-s', 'USE_SDL=2'])
print('also test building to object files first')
src = open(path_from_root('tests', 'sdl2_misc.c')).read()
create_test_file('test.c', self.with_report_result(src))
run_process([PYTHON, EMCC, 'test.c', '-s', 'USE_SDL=2', '-o', 'test.o'])
run_process([PYTHON, EMCC, 'test.o', '-s', 'USE_SDL=2', '-o', 'test.html'])
self.run_browser('test.html', '...', '/report_result?1')
@requires_sound_hardware
def test_sdl2_mixer(self):
shutil.copyfile(path_from_root('tests', 'sounds', 'alarmvictory_1.ogg'), 'sound.ogg')
self.btest('sdl2_mixer.c', expected='1', args=['--preload-file', 'sound.ogg', '-s', 'USE_SDL=2', '-s', 'USE_SDL_MIXER=2', '-s', 'TOTAL_MEMORY=33554432'])
@requires_sound_hardware
def test_sdl2_mixer_wav(self):
shutil.copyfile(path_from_root('tests', 'sounds', 'the_entertainer.wav'), 'sound.wav')
self.btest('sdl2_mixer_wav.c', expected='1', args=['--preload-file', 'sound.wav', '-s', 'USE_SDL=2', '-s', 'USE_SDL_MIXER=2', '-s', 'TOTAL_MEMORY=33554432'])
@no_wasm_backend('cocos2d needs to be ported')
@requires_graphics_hardware
def test_cocos2d_hello(self):
cocos2d_root = os.path.join(system_libs.Ports.get_build_dir(), 'cocos2d')
preload_file = os.path.join(cocos2d_root, 'samples', 'HelloCpp', 'Resources') + '@'
self.btest('cocos2d_hello.cpp', reference='cocos2d_hello.png', reference_slack=1,
args=['-s', 'USE_COCOS2D=3', '-s', 'ERROR_ON_UNDEFINED_SYMBOLS=0', '--std=c++11', '--preload-file', preload_file, '--use-preload-plugins'],
message='You should see Cocos2d logo',
timeout=30)
@no_wasm_backend('emterpretify')
def test_emterpreter_async(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-g2'])
@no_wasm_backend('emterpretify')
def test_emterpreter_async_2(self):
# Error.stackTraceLimit default to 10 in chrome but this test relies on more
# than 40 stack frames being reported.
create_test_file('pre.js', 'Error.stackTraceLimit = 80;\n')
self.btest('emterpreter_async_2.cpp', '40', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O3', '--pre-js', 'pre.js', ])
@no_wasm_backend('emterpretify')
def test_emterpreter_async_virtual(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async_virtual.cpp', '5', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-profiling'])
@no_wasm_backend('emterpretify')
def test_emterpreter_async_virtual_2(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async_virtual_2.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'ASSERTIONS=1', '-s', 'SAFE_HEAP=1', '-profiling'])
@no_wasm_backend('emterpretify')
def test_emterpreter_async_bad(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async_bad.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'EMTERPRETIFY_BLACKLIST=["_middle"]', '-s', 'ASSERTIONS=1'])
@no_wasm_backend('emterpretify')
def test_emterpreter_async_bad_2(self):
for opts in [0, 1, 2, 3]:
for assertions in [0, 1]:
# without assertions, we end up continuing to run more non-emterpreted code in this testcase, returning 1
# with assertions, we hit the emterpreter-async assertion on that, and report a clear error
expected = '2' if assertions else '1'
print(opts, assertions, expected)
self.btest('emterpreter_async_bad_2.cpp', expected, args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'EMTERPRETIFY_BLACKLIST=["_middle"]', '-s', 'ASSERTIONS=%s' % assertions])
@no_wasm_backend('emterpretify')
def test_emterpreter_async_mainloop(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async_mainloop.cpp', '121', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts)], timeout=20)
@no_wasm_backend('emterpretify')
def test_emterpreter_async_with_manual(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async_with_manual.cpp', '121', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'EMTERPRETIFY_BLACKLIST=["_acall"]'], timeout=20)
@no_wasm_backend('emterpretify')
def test_emterpreter_async_sleep2(self):
self.btest('emterpreter_async_sleep2.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Oz'])
@no_wasm_backend('emterpretify')
def test_emterpreter_async_sleep2_safeheap(self):
# check that safe-heap machinery does not cause errors in async operations
self.btest('emterpreter_async_sleep2_safeheap.cpp', '17', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Oz', '-profiling', '-s', 'SAFE_HEAP=1', '-s', 'ASSERTIONS=1', '-s', 'EMTERPRETIFY_WHITELIST=["_main","_callback","_fix"]', '-s', 'EXIT_RUNTIME=1'])
@no_wasm_backend('emterpretify')
@requires_sound_hardware
def test_sdl_audio_beep_sleep(self):
self.btest('sdl_audio_beep_sleep.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Os', '-s', 'ASSERTIONS=1', '-s', 'DISABLE_EXCEPTION_CATCHING=0', '-profiling', '-s', 'SAFE_HEAP=1', '-lSDL'], timeout=90)
@no_wasm_backend('emterpretify')
def test_mainloop_reschedule(self):
self.btest('mainloop_reschedule.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Os'], timeout=30)
@no_wasm_backend('emterpretify')
def test_mainloop_infloop(self):
self.btest('mainloop_infloop.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1'], timeout=30)
@no_wasm_backend('emterpretify')
def test_emterpreter_async_iostream(self):
self.btest('emterpreter_async_iostream.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1'])
@requires_sync_compilation
def test_modularize(self):
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure', '1']]:
for args, code in [
([], 'Module();'), # defaults
# use EXPORT_NAME
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
if (typeof Module !== "undefined") throw "what?!"; // do not pollute the global scope, we are modularized!
HelloWorld.noInitialRun = true; // errorneous module capture will load this and cause timeout
HelloWorld();
'''),
# pass in a Module option (which prevents main(), which we then invoke ourselves)
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
var hello = HelloWorld({ noInitialRun: true, onRuntimeInitialized: function() {
setTimeout(function() { hello._main(); }); // must be async, because onRuntimeInitialized may be called synchronously, so |hello| is not yet set!
} });
'''),
# similar, but without a mem init file, everything is sync and simple
(['-s', 'EXPORT_NAME="HelloWorld"', '--memory-init-file', '0'], '''
var hello = HelloWorld({ noInitialRun: true});
hello._main();
'''),
# use the then() API
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
HelloWorld({ noInitialRun: true }).then(function(hello) {
hello._main();
});
'''),
# then() API, also note the returned value
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
var helloOutside = HelloWorld({ noInitialRun: true }).then(function(hello) {
setTimeout(function() {
hello._main();
if (hello !== helloOutside) throw 'helloOutside has not been set!'; // as we are async, helloOutside must have been set
});
});
'''),
]:
print('test on', opts, args, code)
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
create_test_file('test.c', self.with_report_result(src))
# this test is synchronous, so avoid async startup due to wasm features
run_process([PYTHON, EMCC, 'test.c', '-s', 'MODULARIZE=1', '-s', 'BINARYEN_ASYNC_COMPILATION=0', '-s', 'SINGLE_FILE=1'] + args + opts)
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
%s
</script>
''' % code)
self.run_browser('a.html', '...', '/report_result?0')
# test illustrating the regression on the modularize feature since commit c5af8f6
# when compiling with the --preload-file option
@no_wasm_backend('cannot customize TOTAL_MEMORY in wasm at runtime')
def test_modularize_and_preload_files(self):
# amount of memory different from the default one that will be allocated for the emscripten heap
totalMemory = 33554432
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure', '1']]:
# the main function simply checks that the amount of allocated heap memory is correct
src = r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
EM_ASM({
// use eval here in order for the test with closure compiler enabled to succeed
var totalMemory = Module['TOTAL_MEMORY'];
assert(totalMemory === %d, 'bad memory size');
});
REPORT_RESULT(0);
return 0;
}
''' % totalMemory
create_test_file('test.c', self.with_report_result(src))
# generate a dummy file
create_test_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
# no wasm, since this tests customizing total memory at runtime
run_process([PYTHON, EMCC, 'test.c', '-s', 'WASM=0', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file'] + opts)
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
// instantiate the Foo module with custom TOTAL_MEMORY value
var foo = Foo({ TOTAL_MEMORY: %d });
</script>
''' % totalMemory)
self.run_browser('a.html', '...', '/report_result?0')
def test_webidl(self):
# see original in test_core.py
run_process([PYTHON, path_from_root('tools', 'webidl_binder.py'),
path_from_root('tests', 'webidl', 'test.idl'),
'glue'])
self.assertExists('glue.cpp')
self.assertExists('glue.js')
for opts in [[], ['-O1'], ['-O2']]:
print(opts)
self.btest(os.path.join('webidl', 'test.cpp'), '1', args=['--post-js', 'glue.js', '-I.', '-DBROWSER'] + opts)
@no_wasm_backend('dynamic linking')
@requires_sync_compilation
def test_dynamic_link(self):
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <emscripten.h>
char *side(const char *data);
int main() {
char *temp = side("hello through side\n");
char *ret = (char*)malloc(strlen(temp)+1);
strcpy(ret, temp);
temp[1] = 'x';
EM_ASM({
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = x;
Module.realPrint(x);
};
});
puts(ret);
EM_ASM({ assert(Module.printed === 'hello through side', ['expected', Module.printed]); });
REPORT_RESULT(2);
return 0;
}
''')
create_test_file('side.cpp', r'''
#include <stdlib.h>
#include <string.h>
char *side(const char *data);
char *side(const char *data) {
char *ret = (char*)malloc(strlen(data)+1);
strcpy(ret, data);
return ret;
}
''')
run_process([PYTHON, EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.wasm', '-s', 'EXPORT_ALL=1'])
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL=1'])
print('wasm in worker (we can read binary data synchronously there)')
create_test_file('pre.js', '''
var Module = { dynamicLibraries: ['side.wasm'] };
''')
run_process([PYTHON, EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.wasm', '-s', 'WASM=1', '-s', 'EXPORT_ALL=1'])
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js', '-s', 'WASM=1', '--proxy-to-worker', '-s', 'EXPORT_ALL=1'])
print('wasm (will auto-preload since no sync binary reading)')
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
# same wasm side module works
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js', '-s', 'WASM=1', '-s', 'EXPORT_ALL=1'])
# verify that dynamic linking works in all kinds of in-browser environments.
# don't mix different kinds in a single test.
def test_dylink_dso_needed_wasm(self):
self._test_dylink_dso_needed(1, 0)
def test_dylink_dso_needed_wasm_inworker(self):
self._test_dylink_dso_needed(1, 1)
def test_dylink_dso_needed_asmjs(self):
self._test_dylink_dso_needed(0, 0)
def test_dylink_dso_needed_asmjs_inworker(self):
self._test_dylink_dso_needed(0, 1)
@requires_sync_compilation
def _test_dylink_dso_needed(self, wasm, inworker):
# here we reuse runner._test_dylink_dso_needed, but the code is run via browser.
print('\n# wasm=%d inworker=%d' % (wasm, inworker))
self.set_setting('WASM', wasm)
self.emcc_args += ['-O2']
def do_run(src, expected_output):
# XXX there is no infrastructure (yet ?) to retrieve stdout from browser in tests.
# -> do the assert about expected output inside browser.
#
# we have to put the hook into post.js because in main it is too late
# (in main we won't be able to catch what static constructors inside
# linked dynlibs printed), and in pre.js it is too early (out is not yet
# setup by the shell).
create_test_file('post.js', r'''
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = "";
Module.printed += x + '\n'; // out is passed str without last \n
Module.realPrint(x);
};
''')
src += r'''
int main() {
_main();
EM_ASM({
var expected = %r;
assert(Module.printed === expected, ['stdout expected:', expected]);
});
REPORT_RESULT(0);
}
''' % (expected_output,)
# --proxy-to-worker only on main
if inworker:
self.emcc_args += ['--proxy-to-worker']
self.btest(src, '0', args=self.get_emcc_args() + ['--post-js', 'post.js'])
super(browser, self)._test_dylink_dso_needed(do_run)
@no_wasm_backend('dynamic linking')
@requires_graphics_hardware
@requires_sync_compilation
def test_dynamic_link_glemu(self):
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
const char *side();
int main() {
const char *exts = side();
puts(side());
assert(strstr(exts, "GL_EXT_texture_env_combine"));
REPORT_RESULT(1);
return 0;
}
''')
create_test_file('side.cpp', r'''
#include "SDL/SDL.h"
#include "SDL/SDL_opengl.h"
const char *side() {
SDL_Init(SDL_INIT_VIDEO);
SDL_SetVideoMode(600, 600, 16, SDL_OPENGL);
return (const char *)glGetString(GL_EXTENSIONS);
}
''')
run_process([PYTHON, EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.wasm', '-lSDL', '-s', 'EXPORT_ALL=1'])
self.btest(self.in_dir('main.cpp'), '1', args=['-s', 'MAIN_MODULE=1', '-O2', '-s', 'LEGACY_GL_EMULATION=1', '-lSDL', '-lGL', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL=1'])
def test_memory_growth_during_startup(self):
create_test_file('data.dat', 'X' * (30 * 1024 * 1024))
self.btest('browser_test_hello_world.c', '0', args=['-s', 'ASSERTIONS=1', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'TOTAL_MEMORY=16MB', '-s', 'TOTAL_STACK=16384', '--preload-file', 'data.dat'])
# pthreads tests
def prep_no_SAB(self):
create_test_file('html.html', open(path_from_root('src', 'shell_minimal.html')).read().replace('''<body>''', '''<body>
<script>
SharedArrayBuffer = undefined;
Atomics = undefined;
</script>
'''))
# Test that the emscripten_ atomics api functions work.
@requires_threads
def test_pthread_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test 64-bit atomics.
@requires_threads
def test_pthread_64bit_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_atomics.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test 64-bit C++11 atomics.
@requires_threads
def test_pthread_64bit_cxx11_atomics(self):
for opt in [['-O0'], ['-O3']]:
for pthreads in [[], ['-s', 'USE_PTHREADS=1']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_cxx11_atomics.cpp'), expected='0', args=opt + pthreads + ['-std=c++11'])
# Test the old GCC atomic __sync_fetch_and_op builtin operations.
@requires_threads
def test_pthread_gcc_atomic_fetch_and_op(self):
# We need to resort to using regexes to optimize out SharedArrayBuffer when pthreads are not supported, which is brittle!
# Therefore perform very extensive testing of different codegen modes to catch any problems.
for opt in [[], ['-O1'], ['-O2'], ['-O3'], ['-O3', '-s', 'AGGRESSIVE_VARIABLE_ELIMINATION=1'], ['-Os'], ['-Oz']]:
for debug in [[], ['-g1'], ['-g2'], ['-g4']]:
for f32 in [[], ['-s', 'PRECISE_F32=1', '--separate-asm', '-s', 'WASM=0']]:
print(opt, debug, f32)
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_fetch_and_op.cpp'), expected='0', args=opt + debug + f32 + ['-s', 'TOTAL_MEMORY=64MB', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# 64 bit version of the above test.
@requires_threads
def test_pthread_gcc_64bit_atomic_fetch_and_op(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_fetch_and_op.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Test the old GCC atomic __sync_op_and_fetch builtin operations.
@requires_threads
def test_pthread_gcc_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# 64 bit version of the above test.
@requires_threads
def test_pthread_gcc_64bit_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Tests the rest of the remaining GCC atomics after the two above tests.
@requires_threads
def test_pthread_gcc_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomics.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the __sync_lock_test_and_set and __sync_lock_release primitives.
@requires_threads
def test_pthread_gcc_spinlock(self):
for arg in [[], ['-DUSE_EMSCRIPTEN_INTRINSICS']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_spinlock.cpp'), expected='800', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + arg, also_asmjs=True)
# Test that basic thread creation works.
@requires_threads
def test_pthread_create(self):
for opt in [['-O0'], ['-O3']]:
print(str(opt))
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create.cpp'), expected='0', args=opt + ['-s', 'TOTAL_MEMORY=64MB', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Tests the -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_pthread_proxy_to_pthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxy_to_pthread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'], timeout=30)
# Test that a pthread can spawn another pthread of its own.
@requires_threads
def test_pthread_create_pthread(self):
for modularize in [[], ['-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')]]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create_pthread.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'] + modularize)
# Test another case of pthreads spawning pthreads, but this time the callers immediately join on the threads they created.
@requires_threads
def test_pthread_nested_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_nested_spawns.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that main thread can wait for a pthread to finish via pthread_join().
@requires_threads
def test_pthread_join(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_join.cpp'), expected='6765', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test pthread_cancel() operation
@requires_threads
def test_pthread_cancel(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cancel.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test pthread_kill() operation
@no_chrome('pthread_kill hangs chrome renderer, and keep subsequent tests from passing')
@requires_threads
def test_pthread_kill(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_kill.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthread cleanup stack (pthread_cleanup_push/_pop) works.
@requires_threads
def test_pthread_cleanup(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cleanup.cpp'), expected='907640832', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Tests the pthread mutex api.
@requires_threads
def test_pthread_mutex(self):
for arg in [[], ['-DSPINLOCK_TEST']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_mutex.cpp'), expected='50', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
# Test that memory allocation is thread-safe.
@requires_threads
def test_pthread_malloc(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Stress test pthreads allocating memory that will call to sbrk(), and main thread has to free up the data.
@requires_threads
def test_pthread_malloc_free(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc_free.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'TOTAL_MEMORY=256MB'])
# Test that the pthread_barrier API works ok.
@requires_threads
def test_pthread_barrier(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_barrier.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the pthread_once() function.
@requires_threads
def test_pthread_once(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_once.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test against a certain thread exit time handling bug by spawning tons of threads.
@requires_threads
def test_pthread_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_spawns.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# It is common for code to flip volatile global vars for thread control. This is a bit lax, but nevertheless, test whether that
# kind of scheme will work with Emscripten as well.
@requires_threads
def test_pthread_volatile(self):
for arg in [[], ['-DUSE_C_VOLATILE']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_volatile.cpp'), expected='1', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
# Test thread-specific data (TLS).
@requires_threads
def test_pthread_thread_local_storage(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_thread_local_storage.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the pthread condition variable creation and waiting.
@requires_threads
def test_pthread_condition_variable(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_condition_variable.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthreads are able to do printf.
@requires_threads
def test_pthread_printf(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_printf.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that pthreads are able to do cout. Failed due to https://bugzilla.mozilla.org/show_bug.cgi?id=1154858.
@requires_threads
def test_pthread_iostream(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_iostream.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that the main thread is able to use pthread_set/getspecific.
@requires_threads
def test_pthread_setspecific_mainthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_setspecific_mainthread.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1'], also_asmjs=True)
# Test the -s PTHREAD_HINT_NUM_CORES=x command line variable.
@requires_threads
def test_pthread_num_logical_cores(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_num_logical_cores.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_HINT_NUM_CORES=2'], also_asmjs=True)
# Test that pthreads have access to filesystem.
@requires_threads
def test_pthread_file_io(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_file_io.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that the pthread_create() function operates benignly in the case that threading is not supported.
@requires_threads
def test_pthread_supported(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_supported.cpp'), expected='0', args=['-O3'] + args)
# Test that --separate-asm works with -s USE_PTHREADS=1.
@no_wasm_backend('asm.js')
@requires_threads
def test_pthread_separate_asm_pthreads(self):
for modularize in [[], ['-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')]]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '--separate-asm', '--profiling'] + modularize)
# Test the operation of Module.pthreadMainPrefixURL variable
@requires_threads
def test_pthread_custom_pthread_main_url(self):
self.clear()
os.makedirs('cdn')
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten/emscripten.h>
#include <emscripten/threading.h>
#include <pthread.h>
int result = 0;
void *thread_main(void *arg) {
emscripten_atomic_store_u32(&result, 1);
pthread_exit(0);
}
int main() {
pthread_t t;
if (emscripten_has_threading_support()) {
pthread_create(&t, 0, thread_main, 0);
pthread_join(t, 0);
} else {
result = 1;
}
REPORT_RESULT(result);
}
'''))
# Test that it is possible to define "Module.locateFile" string to locate where worker.js will be loaded from.
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
run_process([PYTHON, EMCC, 'main.cpp', '--shell-file', 'shell.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS=1', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-o', 'test.html'])
shutil.move('test.worker.js', os.path.join('cdn', 'test.worker.js'))
self.run_browser('test.html', '', '/report_result?1')
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
create_test_file('shell2.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.worker.js") return "cdn/test.worker.js"; else return filename; }, '))
run_process([PYTHON, EMCC, 'main.cpp', '--shell-file', 'shell2.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS=1', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-o', 'test2.html'])
try_delete('test.worker.js')
self.run_browser('test2.html', '', '/report_result?1')
# Test that if the main thread is performing a futex wait while a pthread needs it to do a proxied operation (before that pthread would wake up the main thread), that it's not a deadlock.
@requires_threads
def test_pthread_proxying_in_futex_wait(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxying_in_futex_wait.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that sbrk() operates properly in multithreaded conditions
@requires_threads
def test_pthread_sbrk(self):
for aborting_malloc in [0, 1]:
print('aborting malloc=' + str(aborting_malloc))
# With aborting malloc = 1, test allocating memory in threads
# With aborting malloc = 0, allocate so much memory in threads that some of the allocations fail.
self.btest(path_from_root('tests', 'pthread', 'test_pthread_sbrk.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '--separate-asm', '-s', 'ABORTING_MALLOC=' + str(aborting_malloc), '-DABORTING_MALLOC=' + str(aborting_malloc), '-s', 'TOTAL_MEMORY=128MB'])
# Test that -s ABORTING_MALLOC=0 works in both pthreads and non-pthreads builds. (sbrk fails gracefully)
@requires_threads
def test_pthread_gauge_available_memory(self):
for opts in [[], ['-O2']]:
for args in [[], ['-s', 'USE_PTHREADS=1']]:
self.btest(path_from_root('tests', 'gauge_available_memory.cpp'), expected='1', args=['-s', 'ABORTING_MALLOC=0'] + args + opts)
# Test that the proxying operations of user code from pthreads to main thread work
@requires_threads
def test_pthread_run_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_on_main_thread.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test how a lot of back-to-back called proxying operations behave.
@requires_threads
def test_pthread_run_on_main_thread_flood(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_on_main_thread_flood.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that it is possible to synchronously call a JavaScript function on the main thread and get a return value back.
@requires_threads
def test_pthread_call_sync_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-DPROXY_TO_PTHREAD=1', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-DPROXY_TO_PTHREAD=1', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS=1', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
# Tests that spawning a new thread does not cause a reinitialization of the global data section of the application memory area.
@requires_threads
def test_pthread_global_data_initialization(self):
for mem_init_mode in [[], ['--memory-init-file', '0'], ['--memory-init-file', '1'], ['-s', 'MEM_INIT_METHOD=2', '-s', 'WASM=0']]:
for args in [['-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')], ['-O3']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args + mem_init_mode + ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'PTHREAD_POOL_SIZE=1'])
@requires_threads
@requires_sync_compilation
def test_pthread_global_data_initialization_in_sync_compilation_mode(self):
for mem_init_mode in [[], ['--memory-init-file', '0'], ['--memory-init-file', '1'], ['-s', 'MEM_INIT_METHOD=2', '-s', 'WASM=0']]:
args = ['-s', 'BINARYEN_ASYNC_COMPILATION=0']
self.btest(path_from_root('tests', 'pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args + mem_init_mode + ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that emscripten_get_now() reports coherent wallclock times across all pthreads, instead of each pthread independently reporting wallclock times since the launch of that pthread.
@requires_threads
def test_pthread_clock_drift(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_clock_drift.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_pthread_utf8_funcs(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_utf8_funcs.cpp'), expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Tests MAIN_THREAD_EM_ASM_INT() function call signatures.
@no_wasm_backend('MAIN_THREAD_EM_ASM() not yet implemented in Wasm backend')
def test_main_thread_em_asm_signatures(self):
self.btest(path_from_root('tests', 'core', 'test_em_asm_signatures.cpp'), expected='121', args=[])
@no_wasm_backend('MAIN_THREAD_EM_ASM() not yet implemented in Wasm backend')
@requires_threads
def test_main_thread_em_asm_signatures_pthreads(self):
self.btest(path_from_root('tests', 'core', 'test_em_asm_signatures.cpp'), expected='121', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'ASSERTIONS=1'])
# test atomicrmw i64
@no_wasm_backend('uses an asm.js .ll file')
@requires_threads
def test_atomicrmw_i64(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
run_process([PYTHON, EMCC, path_from_root('tests', 'atomicrmw_i64.ll'), '-s', 'USE_PTHREADS=1', '-s', 'IN_TEST_HARNESS=1', '-o', 'test.html', '-s', 'WASM=0'])
self.run_browser('test.html', None, '/report_result?0')
# Test that it is possible to send a signal via calling alarm(timeout), which in turn calls to the signal handler set by signal(SIGALRM, func);
def test_sigalrm(self):
self.btest(path_from_root('tests', 'sigalrm.cpp'), expected='0', args=['-O3'], timeout=30)
@no_wasm_backend('mem init file')
def test_meminit_pairs(self):
d = 'const char *data[] = {\n "'
d += '",\n "'.join(''.join('\\x{:02x}\\x{:02x}'.format(i, j)
for j in range(256)) for i in range(256))
with open(path_from_root('tests', 'meminit_pairs.c')) as f:
d += '"\n};\n' + f.read()
args = ["-O2", "--memory-init-file", "0", "-s", "MEM_INIT_METHOD=2", "-s", "ASSERTIONS=1", '-s', 'WASM=0']
self.btest(d, expected='0', args=args + ["--closure", "0"])
self.btest(d, expected='0', args=args + ["--closure", "0", "-g"])
self.btest(d, expected='0', args=args + ["--closure", "1"])
@no_wasm_backend('mem init file')
def test_meminit_big(self):
d = 'const char *data[] = {\n "'
d += '",\n "'.join([''.join('\\x{:02x}\\x{:02x}'.format(i, j)
for j in range(256)) for i in range(256)] * 256)
with open(path_from_root('tests', 'meminit_pairs.c')) as f:
d += '"\n};\n' + f.read()
assert len(d) > (1 << 27) # more than 32M memory initializer
args = ["-O2", "--memory-init-file", "0", "-s", "MEM_INIT_METHOD=2", "-s", "ASSERTIONS=1", '-s', 'WASM=0']
self.btest(d, expected='0', args=args + ["--closure", "0"])
self.btest(d, expected='0', args=args + ["--closure", "0", "-g"])
self.btest(d, expected='0', args=args + ["--closure", "1"])
def test_canvas_style_proxy(self):
self.btest('canvas_style_proxy.c', expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests/canvas_style_proxy_shell.html'), '--pre-js', path_from_root('tests/canvas_style_proxy_pre.js')])
def test_canvas_size_proxy(self):
self.btest(path_from_root('tests', 'canvas_size_proxy.c'), expected='0', args=['--proxy-to-worker'])
def test_custom_messages_proxy(self):
self.btest(path_from_root('tests', 'custom_messages_proxy.c'), expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests', 'custom_messages_proxy_shell.html'), '--post-js', path_from_root('tests', 'custom_messages_proxy_postjs.js')])
@no_wasm_backend('asm.js')
def test_separate_asm(self):
for opts in [['-O0'], ['-O1'], ['-O2'], ['-O2', '--closure', '1']]:
print(opts)
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
run_process([PYTHON, EMCC, 'src.cpp', '-o', 'test.html', '-s', 'WASM=0'] + opts)
self.run_browser('test.html', None, '/report_result?0')
print('run one')
create_test_file('one.html', '<script src="test.js"></script>')
self.run_browser('one.html', None, '/report_result?0')
print('run two')
run_process([PYTHON, path_from_root('tools', 'separate_asm.py'), 'test.js', 'asm.js', 'rest.js'])
create_test_file('two.html', '''
<script>
var Module = {};
</script>
<script src="asm.js"></script>
<script src="rest.js"></script>
''')
self.run_browser('two.html', None, '/report_result?0')
print('run hello world')
self.clear()
assert not os.path.exists('tests.asm.js')
self.btest('browser_test_hello_world.c', expected='0', args=opts + ['-s', 'WASM=0', '--separate-asm'])
self.assertExists('test.asm.js')
os.unlink('test.asm.js')
print('see a fail')
self.run_browser('test.html', None, '[no http server activity]', timeout=5) # fail without the asm
@no_wasm_backend('emterpretify')
def test_emterpretify_file(self):
create_test_file('shell.html', '''
<!--
{{{ SCRIPT }}} // ignore this, we do it ourselves
-->
<script>
var Module = {};
var xhr = new XMLHttpRequest();
xhr.open('GET', 'code.dat', true);
xhr.responseType = 'arraybuffer';
xhr.onload = function() {
Module.emterpreterFile = xhr.response;
var script = document.createElement('script');
script.src = "test.js";
document.body.appendChild(script);
};
xhr.send(null);
</script>
''')
try_delete('code.dat')
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_FILE="code.dat"', '-O2', '-g', '--shell-file', 'shell.html', '-s', 'ASSERTIONS=1'])
self.assertExists('code.dat')
try_delete('code.dat')
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_FILE="code.dat"', '-O2', '-g', '-s', 'ASSERTIONS=1'])
self.assertExists('code.dat')
def test_vanilla_html_when_proxying(self):
for opts in [0, 1, 2]:
print(opts)
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
run_process([PYTHON, EMCC, 'src.cpp', '-o', 'test.js', '-O' + str(opts), '--proxy-to-worker'])
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
@no_wasm_backend('mem init file')
def test_in_flight_memfile_request(self):
# test the XHR for an asm.js mem init file being in flight already
for o in [0, 1, 2]:
print(o)
opts = ['-O' + str(o), '-s', 'WASM=0']
print('plain html')
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'in_flight_memfile_request.c')).read()))
run_process([PYTHON, EMCC, 'src.cpp', '-o', 'test.js'] + opts)
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0') # never when we provide our own HTML like this.
print('default html')
self.btest('in_flight_memfile_request.c', expected='0' if o < 2 else '1', args=opts) # should happen when there is a mem init file (-O2+)
@requires_sync_compilation
def test_binaryen_async(self):
# notice when we use async compilation
script = '''
<script>
// note if we do async compilation
var real_wasm_instantiate = WebAssembly.instantiate;
var real_wasm_instantiateStreaming = WebAssembly.instantiateStreaming;
if (typeof real_wasm_instantiateStreaming === 'function') {
WebAssembly.instantiateStreaming = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiateStreaming(a, b);
};
} else {
WebAssembly.instantiate = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiate(a, b);
};
}
// show stderr for the viewer's fun
err = function(x) {
out('<<< ' + x + ' >>>');
console.log(x);
};
</script>
{{{ SCRIPT }}}
'''
shell_with_script('shell.html', 'shell.html', script)
common_args = ['--shell-file', 'shell.html']
for opts, expect in [
([], 1),
(['-O1'], 1),
(['-O2'], 1),
(['-O3'], 1),
(['-s', 'BINARYEN_ASYNC_COMPILATION=1'], 1), # force it on
(['-O1', '-s', 'BINARYEN_ASYNC_COMPILATION=0'], 0), # force it off
]:
print(opts, expect)
self.btest('binaryen_async.c', expected=str(expect), args=common_args + opts)
# Ensure that compilation still works and is async without instantiateStreaming available
no_streaming = ' <script> WebAssembly.instantiateStreaming = undefined;</script>'
shell_with_script('shell.html', 'shell.html', no_streaming + script)
self.btest('binaryen_async.c', expected='1', args=common_args)
# Test that implementing Module.instantiateWasm() callback works.
def test_manual_wasm_instantiate(self):
src = 'src.cpp'
create_test_file(src, self.with_report_result(open(os.path.join(path_from_root('tests/manual_wasm_instantiate.cpp'))).read()))
run_process([PYTHON, EMCC, 'src.cpp', '-o', 'manual_wasm_instantiate.js', '-s', 'BINARYEN=1'])
shutil.copyfile(path_from_root('tests', 'manual_wasm_instantiate.html'), 'manual_wasm_instantiate.html')
self.run_browser('manual_wasm_instantiate.html', 'wasm instantiation succeeded', '/report_result?1')
def test_binaryen_worker(self):
self.do_test_worker(['-s', 'WASM=1'])
def test_wasm_locate_file(self):
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
self.clear()
os.makedirs('cdn')
create_test_file('shell2.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.wasm") return "cdn/test.wasm"; else return filename; }, '))
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
subprocess.check_call([PYTHON, EMCC, 'src.cpp', '--shell-file', 'shell2.html', '-s', 'WASM=1', '-o', 'test.html'])
shutil.move('test.wasm', os.path.join('cdn', 'test.wasm'))
self.run_browser('test.html', '', '/report_result?0')
def test_utf8_textdecoder(self):
self.btest('benchmark_utf8.cpp', expected='0', args=['--embed-file', path_from_root('tests/utf8_corpus.txt') + '@/utf8_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["UTF8ToString"]'])
def test_utf16_textdecoder(self):
self.btest('benchmark_utf16.cpp', expected='0', args=['--embed-file', path_from_root('tests/utf16_corpus.txt') + '@/utf16_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["UTF16ToString","stringToUTF16","lengthBytesUTF16"]'])
def test_TextDecoder(self):
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=0'])
just_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0')
td_with_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=2'])
td_without_fallback = os.path.getsize('test.js')
self.assertLess(td_without_fallback, just_fallback)
self.assertLess(just_fallback, td_with_fallback)
# Tests that it is possible to initialize and render WebGL content in a pthread by using OffscreenCanvas.
# -DTEST_CHAINED_WEBGL_CONTEXT_PASSING: Tests that it is possible to transfer WebGL canvas in a chain from main thread -> thread 1 -> thread 2 and then init and render WebGL content there.
@no_chrome('see #7374')
@requires_threads
def test_webgl_offscreen_canvas_in_pthread(self):
for args in [[], ['-DTEST_CHAINED_WEBGL_CONTEXT_PASSING']]:
self.btest('gl_in_pthread.cpp', expected='1', args=args + ['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL', '-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1'])
# Tests that it is possible to render WebGL content on a <canvas> on the main thread, after it has once been used to render WebGL content in a pthread first
# -DTEST_MAIN_THREAD_EXPLICIT_COMMIT: Test the same (WebGL on main thread after pthread), but by using explicit .commit() to swap on the main thread instead of implicit "swap when rAF ends" logic
@no_chrome('see #7374')
@requires_threads
def test_webgl_offscreen_canvas_in_mainthread_after_pthread(self):
for args in [[], ['-DTEST_MAIN_THREAD_EXPLICIT_COMMIT']]:
self.btest('gl_in_mainthread_after_pthread.cpp', expected='0', args=args + ['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL', '-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1'])
@no_chrome('see #7374')
@requires_threads
def test_webgl_offscreen_canvas_only_in_pthread(self):
self.btest('gl_only_in_pthread.cpp', expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL', '-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1'])
# Tests that rendering from client side memory without default-enabling extensions works.
@requires_graphics_hardware
def test_webgl_from_client_side_memory_without_default_enabled_extensions(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DEXPLICIT_SWAP=1', '-DDRAW_FROM_CLIENT_MEMORY=1', '-s', 'FULL_ES2=1'])
# Tests that -s OFFSCREEN_FRAMEBUFFER=1 rendering works.
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DEXPLICIT_SWAP=1'])
# Tests that offscreen framebuffer state restoration works
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer_state_restoration(self):
for args in [
# full state restoration path on WebGL 1.0
['-s', 'USE_WEBGL2=0', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH=1'],
# VAO path on WebGL 1.0
['-s', 'USE_WEBGL2=0'],
['-s', 'USE_WEBGL2=1', '-DTEST_WEBGL2=0'],
# VAO path on WebGL 2.0
['-s', 'USE_WEBGL2=1', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-DTEST_REQUIRE_VAO=1'],
# full state restoration path on WebGL 2.0
['-s', 'USE_WEBGL2=1', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH=1'],
# blitFramebuffer path on WebGL 2.0 (falls back to VAO on Firefox < 67)
['-s', 'USE_WEBGL2=1', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=0'],
]:
cmd = args + ['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DEXPLICIT_SWAP=1']
self.btest('webgl_offscreen_framebuffer_swap_with_bad_state.c', '0', args=cmd)
# Tests that -s WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1 rendering works.
@requires_graphics_hardware
def test_webgl_workaround_webgl_uniform_upload_bug(self):
self.btest('webgl_draw_triangle_with_uniform_color.c', '0', args=['-lGL', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1'])
# Tests that using an array of structs in GL uniforms works.
@requires_graphics_hardware
def test_webgl_array_of_structs_uniform(self):
self.btest('webgl_array_of_structs_uniform.c', '0', args=['-lGL', '-s', 'USE_WEBGL2=1'], reference='webgl_array_of_structs_uniform.png')
# Tests that if a WebGL context is created in a pthread on a canvas that has not been transferred to that pthread, WebGL calls are then proxied to the main thread
# -DTEST_OFFSCREEN_CANVAS=1: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via using Emscripten's EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES="#canvas", then OffscreenCanvas is used
# -DTEST_OFFSCREEN_CANVAS=2: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via automatic transferring of Module.canvas when EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES is not defined, then OffscreenCanvas is also used
@no_chrome('see #7374')
def test_webgl_offscreen_canvas_in_proxied_pthread(self):
for args in [[], ['-DTEST_OFFSCREEN_CANVAS=1'], ['-DTEST_OFFSCREEN_CANVAS=2']]:
cmd = args + ['-s', 'USE_PTHREADS=1', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL', '-s', 'GL_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1']
print(str(cmd))
self.btest('gl_in_proxy_pthread.cpp', expected='1', args=cmd)
@requires_threads
@requires_graphics_hardware
@no_chrome('see #7374')
def test_webgl_resize_offscreencanvas_from_main_thread(self):
for args1 in [[], ['-s', 'PROXY_TO_PTHREAD=1']]:
for args2 in [[], ['-DTEST_SYNC_BLOCKING_LOOP=1']]:
for args3 in [[], ['-s', 'OFFSCREENCANVAS_SUPPORT=1']]:
cmd = args1 + args2 + args3 + ['-s', 'USE_PTHREADS=1', '-lGL', '-s', 'GL_DEBUG=1', '-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1']
print(str(cmd))
self.btest('resize_offscreencanvas_from_main_thread.cpp', expected='1', args=cmd)
# Tests the feature that shell html page can preallocate the typed array and place it to Module.buffer before loading the script page.
# In this build mode, the -s TOTAL_MEMORY=xxx option will be ignored.
# Preallocating the buffer in this was is asm.js only (wasm needs a Memory).
@no_wasm_backend('asm.js feature')
def test_preallocated_heap(self):
self.btest('test_preallocated_heap.cpp', expected='1', args=['-s', 'WASM=0', '-s', 'TOTAL_MEMORY=16MB', '-s', 'ABORTING_MALLOC=0', '--shell-file', path_from_root('tests', 'test_preallocated_heap_shell.html')])
# Tests emscripten_fetch() usage to XHR data directly to memory without persisting results to IndexedDB.
def test_fetch_to_memory(self):
# Test error reporting in the negative case when the file URL doesn't exist. (http 404)
self.btest('fetch/to_memory.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-DFILE_DOES_NOT_EXIST'],
also_asmjs=True)
# Test the positive case when the file URL exists. (http 200)
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
for arg in [[], ['-s', 'FETCH_SUPPORT_INDEXEDDB=0']]:
self.btest('fetch/to_memory.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1'] + arg,
also_asmjs=True)
def test_fetch_to_indexdb(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/to_indexeddb.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1'],
also_asmjs=True)
# Tests emscripten_fetch() usage to persist an XHR into IndexedDB and subsequently load up from there.
def test_fetch_cached_xhr(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/cached_xhr.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1'],
also_asmjs=True)
# Tests that response headers get set on emscripten_fetch_t values.
@requires_threads
def test_fetch_response_headers(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/response_headers.cpp', expected='1', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'], also_asmjs=True)
# Test emscripten_fetch() usage to stream a XHR in to memory without storing the full file in memory
@no_chrome('depends on moz-chunked-arraybuffer')
def test_fetch_stream_file(self):
# Strategy: create a large 128MB file, and compile with a small 16MB Emscripten heap, so that the tested file
# won't fully fit in the heap. This verifies that streaming works properly.
s = '12345678'
for i in range(14):
s = s[::-1] + s # length of str will be 2^17=128KB
with open('largefile.txt', 'w') as f:
for i in range(1024):
f.write(s)
self.btest('fetch/stream_file.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'TOTAL_MEMORY=536870912'],
also_asmjs=True)
# Tests emscripten_fetch() usage in synchronous mode when used from the main
# thread proxied to a Worker with -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_fetch_sync_xhr(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_xhr.cpp', expected='1', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Tests emscripten_fetch() usage when user passes none of the main 3 flags (append/replace/no_download).
# In that case, in append is implicitly understood.
@requires_threads
def test_fetch_implicit_append(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/example_synchronous_fetch.cpp', expected='200', args=['-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Tests synchronous emscripten_fetch() usage from wasm pthread in fastcomp.
@no_wasm_backend("fetch API uses an asm.js based web worker to run synchronous XHRs and IDB operations")
def test_fetch_sync_xhr_in_wasm(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/example_synchronous_fetch.cpp', expected='200', args=['-s', 'FETCH=1', '-s', 'WASM=1', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Tests that the Fetch API works for synchronous XHRs when used with --proxy-to-worker.
@requires_threads
def test_fetch_sync_xhr_in_proxy_to_worker(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_xhr.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '--proxy-to-worker'],
also_asmjs=True)
# Tests waiting on EMSCRIPTEN_FETCH_WAITABLE request from a worker thread
@requires_threads
def test_fetch_sync_fetch_in_main_thread(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_fetch_in_main_thread.cpp', expected='0', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_fetch_idb_store(self):
self.btest('fetch/idb_store.cpp', expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_fetch_idb_delete(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/idb_delete.cpp', expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_asmfs_hello_file(self):
# Test basic file loading and the valid character set for files.
os.mkdir('dirrey')
shutil.copyfile(path_from_root('tests', 'asmfs', 'hello_file.txt'), os.path.join(self.get_dir(), 'dirrey', 'hello file !#$%&\'()+,-.;=@[]^_`{}~ %%.txt'))
self.btest('asmfs/hello_file.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_asmfs_read_file_twice(self):
shutil.copyfile(path_from_root('tests', 'asmfs', 'hello_file.txt'), 'hello_file.txt')
self.btest('asmfs/read_file_twice.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_asmfs_fopen_write(self):
self.btest('asmfs/fopen_write.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_asmfs_mkdir_create_unlink_rmdir(self):
self.btest('cstdio/test_remove.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_asmfs_dirent_test_readdir(self):
self.btest('dirent/test_readdir.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_asmfs_dirent_test_readdir_empty(self):
self.btest('dirent/test_readdir_empty.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_asmfs_unistd_close(self):
self.btest('unistd/close.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_asmfs_unistd_access(self):
self.btest('unistd/access.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_asmfs_unistd_unlink(self):
# TODO: Once symlinks are supported, remove -DNO_SYMLINK=1
self.btest('unistd/unlink.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-DNO_SYMLINK=1'])
@requires_threads
def test_asmfs_test_fcntl_open(self):
self.btest('fcntl-open/src.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_asmfs_relative_paths(self):
self.btest('asmfs/relative_paths.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_pthread_locale(self):
for args in [
[],
['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'],
['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'],
]:
print("Testing with: ", args)
self.btest('pthread/test_pthread_locale.c', expected='1', args=args)
# Tests the Emscripten HTML5 API emscripten_set_canvas_element_size() and emscripten_get_canvas_element_size() functionality in singlethreaded programs.
def test_emscripten_set_canvas_element_size(self):
self.btest('emscripten_set_canvas_element_size.c', expected='1')
# Test that emscripten_get_device_pixel_ratio() is callable from pthreads (and proxies to main thread to obtain the proper window.devicePixelRatio value).
@requires_threads
def test_emscripten_get_device_pixel_ratio(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_get_device_pixel_ratio.c', expected='1', args=args)
# Tests that emscripten_run_script() variants of functions work in pthreads.
@requires_threads
def test_pthread_run_script(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_script.cpp'), expected='1', args=['-O3', '--separate-asm'] + args, timeout=30)
# Tests emscripten_set_canvas_element_size() and OffscreenCanvas functionality in different build configurations.
@requires_threads
@requires_graphics_hardware
def test_emscripten_animate_canvas_element_size(self):
for args in [
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'USE_PTHREADS=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'USE_PTHREADS=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_EXPLICIT_CONTEXT_SWAP=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'USE_PTHREADS=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'USE_PTHREADS=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_MANUALLY_SET_ELEMENT_CSS_SIZE=1'],
]:
cmd = ['-lGL', '-O3', '-g2', '--shell-file', path_from_root('tests', 'canvas_animate_resize_shell.html'), '--separate-asm', '-s', 'GL_DEBUG=1', '--threadprofiler'] + args
print(' '.join(cmd))
self.btest('canvas_animate_resize.cpp', expected='1', args=cmd)
# Tests the absolute minimum pthread-enabled application.
@requires_threads
def test_pthread_hello_thread(self):
for opts in [[], ['-O3']]:
for modularize in [[], ['-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')]]:
self.btest(path_from_root('tests', 'pthread', 'hello_thread.c'), expected='1', args=['-s', 'USE_PTHREADS=1'] + modularize + opts)
# Tests that it is possible to load the main .js file of the application manually via a Blob URL, and still use pthreads.
@requires_threads
def test_load_js_from_blob_with_pthreads(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
src = 'src.c'
create_test_file(src, self.with_report_result(open(path_from_root('tests', 'pthread', 'hello_thread.c')).read()))
run_process([PYTHON, EMCC, 'src.c', '-s', 'USE_PTHREADS=1', '-o', 'hello_thread_with_blob_url.js', '-s', 'WASM=0'])
shutil.copyfile(path_from_root('tests', 'pthread', 'main_js_as_blob_loader.html'), 'hello_thread_with_blob_url.html')
self.run_browser('hello_thread_with_blob_url.html', 'hello from thread!', '/report_result?1')
# Tests that base64 utils work in browser with no native atob function
def test_base64_atob_fallback(self):
opts = ['-s', 'SINGLE_FILE=1', '-s', 'WASM=1']
src = r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
REPORT_RESULT(0);
return 0;
}
'''
create_test_file('test.c', self.with_report_result(src))
# generate a dummy file
create_test_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
run_process([PYTHON, EMCC, 'test.c', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file'] + opts)
create_test_file('a.html', '''
<script>
atob = undefined;
fetch = undefined;
</script>
<script src="a.out.js"></script>
<script>
var foo = Foo();
</script>
''')
self.run_browser('a.html', '...', '/report_result?0')
# Tests that SINGLE_FILE works as intended in generated HTML (with and without Worker)
def test_single_file_html(self):
self.btest('emscripten_main_loop_setimmediate.cpp', '1', args=['-s', 'SINGLE_FILE=1', '-s', 'WASM=1'], also_proxied=True)
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that SINGLE_FILE works when built with ENVIRONMENT=web and Closure enabled (#7933)
def test_single_file_in_web_environment_with_closure(self):
self.btest('minimal_hello.c', '0', args=['-s', 'SINGLE_FILE=1', '-s', 'ENVIRONMENT=web', '-O2', '--closure', '1'])
# Tests that SINGLE_FILE works as intended with locateFile
def test_single_file_locate_file(self):
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
for wasm_enabled in [True, False]:
args = [PYTHON, EMCC, 'src.cpp', '-o', 'test.js', '-s', 'SINGLE_FILE=1']
if wasm_enabled:
args += ['-s', 'WASM=1']
run_process(args)
create_test_file('test.html', '''
<script>
var Module = {
locateFile: function (path) {
if (path.indexOf('data:') === 0) {
throw new Error('Unexpected data URI.');
}
return path;
}
};
</script>
<script src="test.js"></script>
''')
self.run_browser('test.html', None, '/report_result?0')
# Tests that SINGLE_FILE works as intended in a Worker in JS output
def test_single_file_worker_js(self):
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
run_process([PYTHON, EMCC, 'src.cpp', '-o', 'test.js', '--proxy-to-worker', '-s', 'SINGLE_FILE=1', '-s', 'WASM=1'])
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
self.assertExists('test.js')
self.assertNotExists('test.worker.js')
def test_access_file_after_heap_resize(self):
create_test_file('test.txt', 'hello from file')
create_test_file('page.c', self.with_report_result(open(path_from_root('tests', 'access_file_after_heap_resize.c'), 'r').read()))
run_process([PYTHON, EMCC, 'page.c', '-s', 'WASM=1', '-s', 'ALLOW_MEMORY_GROWTH=1', '--preload-file', 'test.txt', '-o', 'page.html'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
# with separate file packager invocation, letting us affect heap copying
# or lack thereof
for file_packager_args in [[], ['--no-heap-copy']]:
print(file_packager_args)
run_process([PYTHON, FILE_PACKAGER, 'data.js', '--preload', 'test.txt', '--js-output=' + 'data.js'] + file_packager_args)
run_process([PYTHON, EMCC, 'page.c', '-s', 'WASM=1', '-s', 'ALLOW_MEMORY_GROWTH=1', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM=1'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
def test_unicode_html_shell(self):
create_test_file('main.cpp', self.with_report_result(r'''
int main() {
REPORT_RESULT(0);
return 0;
}
'''))
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('Emscripten-Generated Code', 'Emscripten-Generated Emoji 😅'))
subprocess.check_output([PYTHON, EMCC, 'main.cpp', '--shell-file', 'shell.html', '-o', 'test.html'])
self.run_browser('test.html', None, '/report_result?0')
# Tests the functionality of the emscripten_thread_sleep() function.
@requires_threads
def test_emscripten_thread_sleep(self):
self.btest(path_from_root('tests', 'pthread', 'emscripten_thread_sleep.c'), expected='1', args=['-s', 'USE_PTHREADS=1', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["print"]'])
# Tests that Emscripten-compiled applications can be run from a relative path in browser that is different than the address of the current page
def test_browser_run_from_different_directory(self):
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
create_test_file('test.c', self.with_report_result(src))
run_process([PYTHON, EMCC, 'test.c', '-o', 'test.html', '-O3'])
if not os.path.exists('subdir'):
os.mkdir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
src = open('test.html').read()
# Make sure JS is loaded from subdirectory
create_test_file('test-subdir.html', src.replace('test.js', 'subdir/test.js'))
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but asynchronous because of `-s MODULARIZE=1`
def test_browser_run_from_different_directory_async(self):
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
create_test_file('test.c', self.with_report_result(src))
for args, creations in [
(['-s', 'MODULARIZE=1'], [
'Module();', # documented way for using modularize
'new Module();' # not documented as working, but we support it
]),
(['-s', 'MODULARIZE_INSTANCE=1'], ['']) # instance: no need to create anything
]:
print(args)
# compile the code with the modularize feature and the preload-file option enabled
run_process([PYTHON, EMCC, 'test.c', '-o', 'test.js', '-O3'] + args)
if not os.path.exists('subdir'):
os.mkdir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
for creation in creations:
print(creation)
# Make sure JS is loaded from subdirectory
create_test_file('test-subdir.html', '''
<script src="subdir/test.js"></script>
<script>
%s
</script>
''' % creation)
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but
# also also we eval the initial code, so currentScript is not present. That prevents us
# from finding the file in a subdir, but here we at least check we do not regress compared to the
# normal case of finding in the current dir.
def test_browser_modularize_no_current_script(self):
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
create_test_file('test.c', self.with_report_result(src))
# test both modularize (and creating an instance) and modularize-instance
# (which creates by itself)
for path, args, creation in [
([], ['-s', 'MODULARIZE=1'], 'Module();'),
([], ['-s', 'MODULARIZE_INSTANCE=1'], ''),
(['subdir'], ['-s', 'MODULARIZE=1'], 'Module();'),
(['subdir'], ['-s', 'MODULARIZE_INSTANCE=1'], ''),
]:
print(path, args, creation)
filesystem_path = os.path.join('.', *path)
if not os.path.exists(filesystem_path):
os.makedirs(filesystem_path)
# compile the code with the modularize feature and the preload-file option enabled
run_process([PYTHON, EMCC, 'test.c', '-o', 'test.js'] + args)
shutil.move('test.js', os.path.join(filesystem_path, 'test.js'))
shutil.move('test.wasm', os.path.join(filesystem_path, 'test.wasm'))
open(os.path.join(filesystem_path, 'test.html'), 'w').write('''
<script>
setTimeout(function() {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'test.js', false);
xhr.send(null);
eval(xhr.responseText);
%s
}, 1);
</script>
''' % creation)
self.run_browser('/'.join(path + ['test.html']), None, '/report_result?0')
def test_modularize_Module_input(self):
self.btest(path_from_root('tests', 'browser', 'modularize_Module_input.cpp'), '0', args=['--shell-file', path_from_root('tests', 'browser', 'modularize_Module_input.html'), '-s', 'MODULARIZE_INSTANCE=1'])
def test_emscripten_request_animation_frame(self):
self.btest(path_from_root('tests', 'emscripten_request_animation_frame.c'), '0')
def test_emscripten_request_animation_frame_loop(self):
self.btest(path_from_root('tests', 'emscripten_request_animation_frame_loop.c'), '0')
@requires_threads
def test_emscripten_set_timeout(self):
self.btest(path_from_root('tests', 'emscripten_set_timeout.c'), '0', args=['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_emscripten_set_timeout_loop(self):
self.btest(path_from_root('tests', 'emscripten_set_timeout_loop.c'), '0', args=['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
def test_emscripten_set_immediate(self):
self.btest(path_from_root('tests', 'emscripten_set_immediate.c'), '0')
def test_emscripten_set_immediate_loop(self):
self.btest(path_from_root('tests', 'emscripten_set_immediate_loop.c'), '0')
@requires_threads
def test_emscripten_set_interval(self):
self.btest(path_from_root('tests', 'emscripten_set_interval.c'), '0', args=['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Test emscripten_performance_now() and emscripten_date_now()
@requires_threads
def test_emscripten_performance_now(self):
self.btest(path_from_root('tests', 'emscripten_performance_now.c'), '0', args=['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Test emscripten_console_log(), emscripten_console_warn() and emscripten_console_error()
def test_emscripten_console_log(self):
self.btest(path_from_root('tests', 'emscripten_console_log.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_console_log_pre.js')])
def test_emscripten_throw_number(self):
self.btest(path_from_root('tests', 'emscripten_throw_number.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_throw_number_pre.js')])
def test_emscripten_throw_string(self):
self.btest(path_from_root('tests', 'emscripten_throw_string.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_throw_string_pre.js')])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a minimal console.log() application
def test_closure_in_web_only_target_environment_console_log(self):
self.btest('minimal_hello.c', '0', args=['-s', 'ENVIRONMENT=web', '-O3', '--closure', '1'])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a small WebGL application
@requires_graphics_hardware
def test_closure_in_web_only_target_environment_webgl(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1'])
# Tests that it is possible to load two asm.js compiled programs to one page when both --separate-asm and MODULARIZE=1 is used, by assigning
# the pages different asm module names to ensure they do not conflict when being XHRed in.
@no_wasm_backend('this tests asm.js support')
def test_two_separate_asm_files_on_same_page(self):
html_file = open('main.html', 'w')
html_file.write(open(path_from_root('tests', 'two_separate_asm_files.html')).read().replace('localhost:8888', 'localhost:%s' % self.port))
html_file.close()
cmd = [PYTHON, EMCC, path_from_root('tests', 'modularize_separate_asm.c'), '-o', 'page1.js', '-s', 'WASM=0', '--separate-asm', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=Module1', '-s', 'SEPARATE_ASM_MODULE_NAME=ModuleForPage1["asm"]']
print(cmd)
subprocess.check_call(cmd)
cmd = [PYTHON, EMCC, path_from_root('tests', 'modularize_separate_asm.c'), '-o', 'page2.js', '-s', 'WASM=0', '--separate-asm', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=Module2', '-s', 'SEPARATE_ASM_MODULE_NAME=ModuleForPage2["asm"]']
print(cmd)
subprocess.check_call(cmd)
self.run_browser('main.html', None, '/report_result?1')
# Tests that it is possible to encapsulate asm.js compiled programs by using --separate-asm + MODULARIZE=1. See
# encapsulated_asmjs_page_load.html for the example.
@no_wasm_backend('this tests asm.js support')
def test_encapsulated_asmjs_page_load(self):
html_file = open('main.html', 'w')
html_file.write(open(path_from_root('tests', 'encapsulated_asmjs_page_load.html')).read().replace('localhost:8888', 'localhost:%s' % self.port))
html_file.close()
cmd = [PYTHON, EMCC, path_from_root('tests', 'modularize_separate_asm.c'), '-o', 'a.js', '-s', 'WASM=0', '--separate-asm', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=EmscriptenCode', '-s', 'SEPARATE_ASM_MODULE_NAME="var EmscriptenCode"']
print(cmd)
subprocess.check_call(cmd)
self.run_browser('main.html', None, '/report_result?1')
@no_wasm_backend('MINIMAL_RUNTIME not yet available in Wasm backend')
def test_no_declare_asm_module_exports_asmjs(self):
for minimal_runtime in [[], ['-s', 'MINIMAL_RUNTIME=1']]:
self.btest(path_from_root('tests', 'declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1', '-s', 'WASM=0'] + minimal_runtime)
@no_wasm_backend('MINIMAL_RUNTIME not yet available in Wasm backend')
def test_no_declare_asm_module_exports_wasm_minimal_runtime(self):
self.btest(path_from_root('tests', 'declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1', '-s', 'MINIMAL_RUNTIME=1'])
# Tests that the different code paths in src/shell_minimal_runtime.html all work ok.
@no_wasm_backend('MINIMAL_RUNTIME not yet available in Wasm backend')
def test_minimal_runtime_loader_shell(self):
args = ['-s', 'MINIMAL_RUNTIME=2']
for wasm in [[], ['-s', 'WASM=0', '--memory-init-file', '0'], ['-s', 'WASM=0', '--memory-init-file', '1']]:
for modularize in [[], ['-s', 'MODULARIZE=1']]:
print(str(args + wasm + modularize))
self.btest('minimal_hello.c', '0', args=args + wasm + modularize)
|
test.py | import json
import pytest
import random
import re
import string
import threading
import time
from multiprocessing.dummy import Pool
from helpers.client import QueryRuntimeException
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import TSV
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1',
config_dir='configs',
main_configs=['configs/logs_config.xml'],
with_zookeeper=True,
tmpfs=['/jbod1:size=40M', '/jbod2:size=40M', '/external:size=200M'],
macros={"shard": 0, "replica": 1} )
node2 = cluster.add_instance('node2',
config_dir='configs',
main_configs=['configs/logs_config.xml'],
with_zookeeper=True,
tmpfs=['/jbod1:size=40M', '/jbod2:size=40M', '/external:size=200M'],
macros={"shard": 0, "replica": 2} )
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def get_random_string(length):
symbols = bytes(string.ascii_uppercase + string.digits)
result_list = bytearray([0])*length
for i in range(length):
result_list[i] = random.choice(symbols)
return str(result_list)
def get_used_disks_for_table(node, table_name, partition=None):
if partition is None:
suffix = ""
else:
suffix = "and partition='{}'".format(partition)
return node.query("""
SELECT disk_name
FROM system.parts
WHERE table == '{name}' AND active=1 {suffix}
ORDER BY modification_time
""".format(name=table_name, suffix=suffix)).strip().split('\n')
@pytest.mark.skip(reason="Flappy test")
@pytest.mark.parametrize("name,engine,alter", [
("mt_test_rule_with_invalid_destination","MergeTree()",0),
("replicated_mt_test_rule_with_invalid_destination","ReplicatedMergeTree('/clickhouse/replicated_test_rule_with_invalid_destination', '1')",0),
("mt_test_rule_with_invalid_destination","MergeTree()",1),
("replicated_mt_test_rule_with_invalid_destination","ReplicatedMergeTree('/clickhouse/replicated_test_rule_with_invalid_destination', '1')",1),
])
def test_rule_with_invalid_destination(started_cluster, name, engine, alter):
try:
def get_command(x, policy):
x = x or ""
if alter and x:
return """
ALTER TABLE {name} MODIFY TTL {expression}
""".format(expression=x, name=name)
else:
return """
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
{expression}
SETTINGS storage_policy='{policy}'
""".format(expression=x, name=name, engine=engine, policy=policy)
if alter:
node1.query(get_command(None, "small_jbod_with_external"))
with pytest.raises(QueryRuntimeException):
node1.query(get_command("TTL d1 TO DISK 'unknown'", "small_jbod_with_external"))
node1.query("DROP TABLE IF EXISTS {}".format(name))
if alter:
node1.query(get_command(None, "small_jbod_with_external"))
with pytest.raises(QueryRuntimeException):
node1.query(get_command("TTL d1 TO VOLUME 'unknown'", "small_jbod_with_external"))
node1.query("DROP TABLE IF EXISTS {}".format(name))
if alter:
node1.query(get_command(None, "only_jbod2"))
with pytest.raises(QueryRuntimeException):
node1.query(get_command("TTL d1 TO DISK 'jbod1'", "only_jbod2"))
node1.query("DROP TABLE IF EXISTS {}".format(name))
if alter:
node1.query(get_command(None, "only_jbod2"))
with pytest.raises(QueryRuntimeException):
node1.query(get_command("TTL d1 TO VOLUME 'external'", "only_jbod2"))
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.skip(reason="Flappy test")
@pytest.mark.parametrize("name,engine,positive", [
("mt_test_inserts_to_disk_do_not_work","MergeTree()",0),
("replicated_mt_test_inserts_to_disk_do_not_work","ReplicatedMergeTree('/clickhouse/replicated_test_inserts_to_disk_do_not_work', '1')",0),
("mt_test_inserts_to_disk_work","MergeTree()",1),
("replicated_mt_test_inserts_to_disk_work","ReplicatedMergeTree('/clickhouse/replicated_test_inserts_to_disk_work', '1')",1),
])
def test_inserts_to_disk_work(started_cluster, name, engine, positive):
try:
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
data = [] # 10MB in total
for i in range(10):
data.append(("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time.time()-1 if i > 0 or positive else time.time()+300))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external" if positive else "jbod1"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.skip(reason="Flappy test")
@pytest.mark.parametrize("name,engine,positive", [
("mt_test_moves_to_disk_do_not_work","MergeTree()",0),
("replicated_mt_test_moves_to_disk_do_not_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_to_disk_do_not_work', '1')",0),
("mt_test_moves_to_disk_work","MergeTree()",1),
("replicated_mt_test_moves_to_disk_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_to_disk_work', '1')",1),
])
def test_moves_to_disk_work(started_cluster, name, engine, positive):
try:
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
wait_expire_1 = 6
wait_expire_2 = 4
time_1 = time.time() + wait_expire_1
time_2 = time.time() + wait_expire_1 + wait_expire_2
wait_expire_1_thread = threading.Thread(target=time.sleep, args=(wait_expire_1,))
wait_expire_1_thread.start()
data = [] # 10MB in total
for i in range(10):
data.append(("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time_1 if i > 0 or positive else time_2))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"}
wait_expire_1_thread.join()
time.sleep(wait_expire_2/2)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external" if positive else "jbod1"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.skip(reason="Flappy test")
@pytest.mark.parametrize("name,engine", [
("mt_test_moves_to_volume_work","MergeTree()"),
("replicated_mt_test_moves_to_volume_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_to_volume_work', '1')"),
])
def test_moves_to_volume_work(started_cluster, name, engine):
try:
node1.query("""
CREATE TABLE {name} (
p1 Int64,
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
PARTITION BY p1
TTL d1 TO VOLUME 'external'
SETTINGS storage_policy='jbods_with_external'
""".format(name=name, engine=engine))
wait_expire_1 = 10
time_1 = time.time() + wait_expire_1
wait_expire_1_thread = threading.Thread(target=time.sleep, args=(wait_expire_1,))
wait_expire_1_thread.start()
for p in range(2):
data = [] # 10MB in total
for i in range(5):
data.append((str(p), "'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time_1))) # 1MB row
node1.query("INSERT INTO {} (p1, s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {'jbod1', 'jbod2'}
wait_expire_1_thread.join()
time.sleep(1)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.skip(reason="Flappy test")
@pytest.mark.parametrize("name,engine,positive", [
("mt_test_inserts_to_volume_do_not_work","MergeTree()",0),
("replicated_mt_test_inserts_to_volume_do_not_work","ReplicatedMergeTree('/clickhouse/replicated_test_inserts_to_volume_do_not_work', '1')",0),
("mt_test_inserts_to_volume_work","MergeTree()",1),
("replicated_mt_test_inserts_to_volume_work","ReplicatedMergeTree('/clickhouse/replicated_test_inserts_to_volume_work', '1')",1),
])
def test_inserts_to_volume_work(started_cluster, name, engine, positive):
try:
node1.query("""
CREATE TABLE {name} (
p1 Int64,
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
PARTITION BY p1
TTL d1 TO VOLUME 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
node1.query("SYSTEM STOP MOVES {name}".format(name=name))
for p in range(2):
data = [] # 20MB in total
for i in range(10):
data.append((str(p), "'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time.time()-1 if i > 0 or positive else time.time()+300))) # 1MB row
node1.query("INSERT INTO {} (p1, s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external" if positive else "jbod1"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "20"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.skip(reason="Flappy test")
@pytest.mark.parametrize("name,engine", [
("mt_test_moves_to_disk_eventually_work","MergeTree()"),
("replicated_mt_test_moves_to_disk_eventually_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_to_disk_eventually_work', '1')"),
])
def test_moves_to_disk_eventually_work(started_cluster, name, engine):
try:
name_temp = name + "_temp"
node1.query("""
CREATE TABLE {name} (
s1 String
) ENGINE = MergeTree()
ORDER BY tuple()
SETTINGS storage_policy='only_jbod2'
""".format(name=name_temp))
data = [] # 35MB in total
for i in range(35):
data.append(get_random_string(1024 * 1024)) # 1MB row
node1.query("INSERT INTO {} VALUES {}".format(name_temp, ",".join(["('" + x + "')" for x in data])))
used_disks = get_used_disks_for_table(node1, name_temp)
assert set(used_disks) == {"jbod2"}
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'jbod2'
SETTINGS storage_policy='jbod1_with_jbod2'
""".format(name=name, engine=engine))
data = [] # 10MB in total
for i in range(10):
data.append(("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time.time()-1))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"}
node1.query("DROP TABLE {}".format(name_temp))
time.sleep(2)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod2"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name_temp))
node1.query("DROP TABLE IF EXISTS {}".format(name))
def test_replicated_download_ttl_info(started_cluster):
name = "test_replicated_ttl_info"
engine = "ReplicatedMergeTree('/clickhouse/test_replicated_download_ttl_info', '{replica}')"
try:
for i, node in enumerate((node1, node2), start=1):
node.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
node1.query("SYSTEM STOP MOVES {}".format(name))
node2.query("INSERT INTO {} (s1, d1) VALUES ('{}', toDateTime({}))".format(name, get_random_string(1024 * 1024), time.time()-100))
assert set(get_used_disks_for_table(node2, name)) == {"external"}
time.sleep(1)
assert node1.query("SELECT count() FROM {}".format(name)).splitlines() == ["1"]
assert set(get_used_disks_for_table(node1, name)) == {"external"}
finally:
for node in (node1, node2):
try:
node.query("DROP TABLE IF EXISTS {}".format(name))
except:
continue
@pytest.mark.skip(reason="Flappy test")
@pytest.mark.parametrize("name,engine,positive", [
("mt_test_merges_to_disk_do_not_work","MergeTree()",0),
("replicated_mt_test_merges_to_disk_do_not_work","ReplicatedMergeTree('/clickhouse/replicated_test_merges_to_disk_do_not_work', '1')",0),
("mt_test_merges_to_disk_work","MergeTree()",1),
("replicated_mt_test_merges_to_disk_work","ReplicatedMergeTree('/clickhouse/replicated_test_merges_to_disk_work', '1')",1),
])
def test_merges_to_disk_work(started_cluster, name, engine, positive):
try:
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
node1.query("SYSTEM STOP MERGES {}".format(name))
node1.query("SYSTEM STOP MOVES {}".format(name))
wait_expire_1 = 10
wait_expire_2 = 4
time_1 = time.time() + wait_expire_1
time_2 = time.time() + wait_expire_1 + wait_expire_2
wait_expire_1_thread = threading.Thread(target=time.sleep, args=(wait_expire_1,))
wait_expire_1_thread.start()
for _ in range(2):
data = [] # 16MB in total
for i in range(8):
data.append(("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time_1 if i > 0 or positive else time_2))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"}
assert "2" == node1.query("SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format(name)).strip()
wait_expire_1_thread.join()
time.sleep(wait_expire_2/2)
node1.query("SYSTEM START MERGES {}".format(name))
node1.query("OPTIMIZE TABLE {}".format(name))
time.sleep(1)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external" if positive else "jbod1"}
assert "1" == node1.query("SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format(name)).strip()
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "16"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.skip(reason="Flappy test")
@pytest.mark.parametrize("name,engine", [
("mt_test_merges_with_full_disk_work","MergeTree()"),
("replicated_mt_test_merges_with_full_disk_work","ReplicatedMergeTree('/clickhouse/replicated_test_merges_with_full_disk_work', '1')"),
])
def test_merges_with_full_disk_work(started_cluster, name, engine):
try:
name_temp = name + "_temp"
node1.query("""
CREATE TABLE {name} (
s1 String
) ENGINE = MergeTree()
ORDER BY tuple()
SETTINGS storage_policy='only_jbod2'
""".format(name=name_temp))
data = [] # 35MB in total
for i in range(35):
data.append(get_random_string(1024 * 1024)) # 1MB row
node1.query("INSERT INTO {} VALUES {}".format(name_temp, ",".join(["('" + x + "')" for x in data])))
used_disks = get_used_disks_for_table(node1, name_temp)
assert set(used_disks) == {"jbod2"}
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'jbod2'
SETTINGS storage_policy='jbod1_with_jbod2'
""".format(name=name, engine=engine))
wait_expire_1 = 10
time_1 = time.time() + wait_expire_1
wait_expire_1_thread = threading.Thread(target=time.sleep, args=(wait_expire_1,))
wait_expire_1_thread.start()
for _ in range(2):
data = [] # 12MB in total
for i in range(6):
data.append(("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time_1))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"}
assert "2" == node1.query("SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format(name)).strip()
wait_expire_1_thread.join()
node1.query("OPTIMIZE TABLE {}".format(name))
time.sleep(1)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"} # Merged to the same disk against the rule.
assert "1" == node1.query("SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format(name)).strip()
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "12"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name_temp))
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.skip(reason="Flappy test")
@pytest.mark.parametrize("name,engine,positive", [
("mt_test_moves_after_merges_do_not_work","MergeTree()",0),
("replicated_mt_test_moves_after_merges_do_not_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_merges_do_not_work', '1')",0),
("mt_test_moves_after_merges_work","MergeTree()",1),
("replicated_mt_test_moves_after_merges_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_merges_work', '1')",1),
])
def test_moves_after_merges_work(started_cluster, name, engine, positive):
try:
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
wait_expire_1 = 10
wait_expire_2 = 4
time_1 = time.time() + wait_expire_1
time_2 = time.time() + wait_expire_1 + wait_expire_2
wait_expire_1_thread = threading.Thread(target=time.sleep, args=(wait_expire_1,))
wait_expire_1_thread.start()
for _ in range(2):
data = [] # 14MB in total
for i in range(7):
data.append(("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time_1 if i > 0 or positive else time_2))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
node1.query("OPTIMIZE TABLE {}".format(name))
time.sleep(1)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"}
assert "1" == node1.query("SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format(name)).strip()
wait_expire_1_thread.join()
time.sleep(wait_expire_2/2)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external" if positive else "jbod1"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "14"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.skip(reason="Flappy test")
@pytest.mark.parametrize("name,engine,positive,bar", [
("mt_test_moves_after_alter_do_not_work","MergeTree()",0,"DELETE"),
("replicated_mt_test_moves_after_alter_do_not_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_alter_do_not_work', '1')",0,"DELETE"),
("mt_test_moves_after_alter_work","MergeTree()",1,"DELETE"),
("replicated_mt_test_moves_after_alter_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_alter_work', '1')",1,"DELETE"),
("mt_test_moves_after_alter_do_not_work","MergeTree()",0,"TO DISK 'external'"),
("replicated_mt_test_moves_after_alter_do_not_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_alter_do_not_work', '1')",0,"TO DISK 'external'"),
("mt_test_moves_after_alter_work","MergeTree()",1,"TO DISK 'external'"),
("replicated_mt_test_moves_after_alter_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_alter_work', '1')",1,"TO DISK 'external'"),
])
def test_ttls_do_not_work_after_alter(started_cluster, name, engine, positive, bar):
try:
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
if positive:
node1.query("""
ALTER TABLE {name}
MODIFY TTL
d1 + INTERVAL 15 MINUTE {bar}
""".format(name=name, bar=bar)) # That shall disable TTL.
data = [] # 10MB in total
for i in range(10):
data.append(("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time.time()-1))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1" if positive else "external"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.parametrize("name,engine", [
("mt_test_materialize_ttl_in_partition","MergeTree()"),
("replicated_mt_test_materialize_ttl_in_partition","ReplicatedMergeTree('/clickhouse/test_materialize_ttl_in_partition', '1')"),
])
def test_materialize_ttl_in_partition(started_cluster, name, engine):
try:
node1.query("""
CREATE TABLE {name} (
p1 Int8,
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY p1
PARTITION BY p1
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
data = [] # 5MB in total
for i in range(5):
data.append((str(i), "'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time.time()-1))) # 1MB row
node1.query("INSERT INTO {} (p1, s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
time.sleep(0.5)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"}
node1.query("""
ALTER TABLE {name}
MODIFY TTL
d1 TO DISK 'external'
""".format(name=name))
time.sleep(0.5)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"}
node1.query("""
ALTER TABLE {name}
MATERIALIZE TTL IN PARTITION 2
""".format(name=name))
node1.query("""
ALTER TABLE {name}
MATERIALIZE TTL IN PARTITION 4
""".format(name=name))
time.sleep(0.5)
used_disks_sets = []
for i in range(len(data)):
used_disks_sets.append(set(get_used_disks_for_table(node1, name, partition=i)))
assert used_disks_sets == [{"jbod1"}, {"jbod1"}, {"external"}, {"jbod1"}, {"external"}]
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == str(len(data))
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.skip(reason="Flappy test")
@pytest.mark.parametrize("name,engine,positive", [
("mt_test_alter_multiple_ttls_positive", "MergeTree()", True),
("mt_replicated_test_alter_multiple_ttls_positive", "ReplicatedMergeTree('/clickhouse/replicated_test_alter_multiple_ttls_positive', '1')", True),
("mt_test_alter_multiple_ttls_negative", "MergeTree()", False),
("mt_replicated_test_alter_multiple_ttls_negative", "ReplicatedMergeTree('/clickhouse/replicated_test_alter_multiple_ttls_negative', '1')", False),
])
def test_alter_multiple_ttls(started_cluster, name, engine, positive):
"""Copyright 2019, Altinity LTD
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
"""Check that when multiple TTL expressions are set
and before any parts are inserted the TTL expressions
are changed with ALTER command then all old
TTL expressions are removed and the
the parts are moved to the specified disk or volume or
deleted if the new TTL expression is triggered
and are not moved or deleted when it is not.
"""
now = time.time()
try:
node1.query("""
CREATE TABLE {name} (
p1 Int64,
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
PARTITION BY p1
TTL d1 + INTERVAL 30 SECOND TO DISK 'jbod2',
d1 + INTERVAL 60 SECOND TO VOLUME 'external'
SETTINGS storage_policy='jbods_with_external', merge_with_ttl_timeout=0
""".format(name=name, engine=engine))
node1.query("""
ALTER TABLE {name} MODIFY
TTL d1 + INTERVAL 0 SECOND TO DISK 'jbod2',
d1 + INTERVAL 5 SECOND TO VOLUME 'external',
d1 + INTERVAL 10 SECOND DELETE
""".format(name=name))
for p in range(3):
data = [] # 6MB in total
now = time.time()
for i in range(2):
p1 = p
s1 = get_random_string(1024 * 1024) # 1MB
d1 = now - 1 if i > 0 or positive else now + 300
data.append("({}, '{}', toDateTime({}))".format(p1, s1, d1))
node1.query("INSERT INTO {name} (p1, s1, d1) VALUES {values}".format(name=name, values=",".join(data)))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod2"} if positive else {"jbod1", "jbod2"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).splitlines() == ["6"]
time.sleep(5)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external"} if positive else {"jbod1", "jbod2"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).splitlines() == ["6"]
time.sleep(5)
node1.query("OPTIMIZE TABLE {name} FINAL".format(name=name))
assert node1.query("SELECT count() FROM {name}".format(name=name)).splitlines() == ["0"] if positive else ["3"]
finally:
node1.query("DROP TABLE IF EXISTS {name}".format(name=name))
@pytest.mark.skip(reason="Flappy test")
@pytest.mark.parametrize("name,engine", [
("concurrently_altering_ttl_mt","MergeTree()"),
("concurrently_altering_ttl_replicated_mt","ReplicatedMergeTree('/clickhouse/concurrently_altering_ttl_replicated_mt', '1')",),
])
def test_concurrent_alter_with_ttl_move(started_cluster, name, engine):
try:
node1.query("""
CREATE TABLE {name} (
EventDate Date,
number UInt64
) ENGINE = {engine}
ORDER BY tuple()
PARTITION BY toYYYYMM(EventDate)
SETTINGS storage_policy='jbods_with_external'
""".format(name=name, engine=engine))
values = list({ random.randint(1, 1000000) for _ in range(0, 1000) })
def insert(num):
for i in range(num):
day = random.randint(11, 30)
value = values.pop()
month = '0' + str(random.choice([3, 4]))
node1.query("INSERT INTO {} VALUES(toDate('2019-{m}-{d}'), {v})".format(name, m=month, d=day, v=value))
def alter_move(num):
def produce_alter_move(node, name):
move_type = random.choice(["PART", "PARTITION"])
if move_type == "PART":
for _ in range(10):
try:
parts = node1.query("SELECT name from system.parts where table = '{}' and active = 1".format(name)).strip().split('\n')
break
except QueryRuntimeException:
pass
else:
raise Exception("Cannot select from system.parts")
move_part = random.choice(["'" + part + "'" for part in parts])
else:
move_part = random.choice([201903, 201904])
move_disk = random.choice(["DISK", "VOLUME"])
if move_disk == "DISK":
move_volume = random.choice(["'external'", "'jbod1'", "'jbod2'"])
else:
move_volume = random.choice(["'main'", "'external'"])
try:
node1.query("ALTER TABLE {} MOVE {mt} {mp} TO {md} {mv}".format(
name, mt=move_type, mp=move_part, md=move_disk, mv=move_volume))
except QueryRuntimeException as ex:
pass
for i in range(num):
produce_alter_move(node1, name)
def alter_update(num):
for i in range(num):
node1.query("ALTER TABLE {} UPDATE number = number + 1 WHERE 1".format(name))
def alter_modify_ttl(num):
for i in range(num):
ttls = []
for j in range(random.randint(1, 10)):
what = random.choice(["TO VOLUME 'main'", "TO VOLUME 'external'", "TO DISK 'jbod1'", "TO DISK 'jbod2'", "TO DISK 'external'"])
when = "now()+{}".format(random.randint(-1, 5))
ttls.append("{} {}".format(when, what))
node1.query("ALTER TABLE {} MODIFY TTL {}".format(name, ", ".join(ttls)))
def optimize_table(num):
for i in range(num):
node1.query("OPTIMIZE TABLE {} FINAL".format(name))
p = Pool(15)
tasks = []
for i in range(5):
tasks.append(p.apply_async(insert, (100,)))
tasks.append(p.apply_async(alter_move, (100,)))
tasks.append(p.apply_async(alter_update, (100,)))
tasks.append(p.apply_async(alter_modify_ttl, (100,)))
tasks.append(p.apply_async(optimize_table, (100,)))
for task in tasks:
task.get(timeout=120)
assert node1.query("SELECT 1") == "1\n"
assert node1.query("SELECT COUNT() FROM {}".format(name)) == "500\n"
finally:
node1.query("DROP TABLE IF EXISTS {name}".format(name=name))
@pytest.mark.skip(reason="Flappy test")
@pytest.mark.parametrize("name,positive", [
("test_double_move_while_select_negative", 0),
("test_double_move_while_select_positive", 1),
])
def test_double_move_while_select(started_cluster, name, positive):
try:
node1.query("""
CREATE TABLE {name} (
n Int64,
s String
) ENGINE = MergeTree
ORDER BY tuple()
PARTITION BY n
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name))
node1.query("INSERT INTO {name} VALUES (1, '{string}')".format(name=name, string=get_random_string(10 * 1024 * 1024)))
parts = node1.query("SELECT name FROM system.parts WHERE table = '{name}' AND active = 1".format(name=name)).splitlines()
assert len(parts) == 1
node1.query("ALTER TABLE {name} MOVE PART '{part}' TO DISK 'external'".format(name=name, part=parts[0]))
def long_select():
if positive:
node1.query("SELECT sleep(3), sleep(2), sleep(1), n FROM {name}".format(name=name))
thread = threading.Thread(target=long_select)
thread.start()
node1.query("ALTER TABLE {name} MOVE PART '{part}' TO DISK 'jbod1'".format(name=name, part=parts[0]))
# Fill jbod1 to force ClickHouse to make move of partition 1 to external.
node1.query("INSERT INTO {name} VALUES (2, '{string}')".format(name=name, string=get_random_string(9 * 1024 * 1024)))
node1.query("INSERT INTO {name} VALUES (3, '{string}')".format(name=name, string=get_random_string(9 * 1024 * 1024)))
node1.query("INSERT INTO {name} VALUES (4, '{string}')".format(name=name, string=get_random_string(9 * 1024 * 1024)))
# If SELECT locked old part on external, move shall fail.
assert node1.query("SELECT disk_name FROM system.parts WHERE table = '{name}' AND active = 1 AND name = '{part}'"
.format(name=name, part=parts[0])).splitlines() == ["jbod1" if positive else "external"]
thread.join()
assert node1.query("SELECT n FROM {name} ORDER BY n".format(name=name)).splitlines() == ["1", "2", "3", "4"]
finally:
node1.query("DROP TABLE IF EXISTS {name}".format(name=name))
|
DataTransformer_local_onlineExpert.py |
import csv
import os
import sys
import shutil
import time
import numpy as np
import scipy.io as sio
import yaml
from easydict import EasyDict
from os.path import dirname, realpath, pardir
from hashids import Hashids
import hashlib
sys.path.append(os.path.join(dirname(realpath(__file__)), pardir))
import utils.graphUtils.graphTools as graph
# from utils.graphUtils.graphTools import isConnected
# from dataloader.statetransformer import AgentState
# from dataloader.statetransformer_localGuidance import AgentState
# from dataloader.statetransformer_localGuidance_SDObs import AgentState
# from dataloader.statetransformer_localGuidance_SemiLocal import AgentState
# from dataloader.statetransformer_globalGuidance import AgentState
from dataloader.statetransformer_Guidance import AgentState
from scipy.spatial.distance import squareform, pdist
from multiprocessing import Queue, Process
class DataTransformer:
def __init__(self, config):
self.config = config
self.PROCESS_NUMBER = 4
self.num_agents = self.config.num_agents
self.size_map = [self.config.map_w, self.config.map_h]
self.AgentState = AgentState(self.config)
# self.communicationRadius = 5 # communicationRadius
# self.communicationRadius = 7 # communicationRadius
self.communicationRadius = self.config.commR # communicationRadius
self.zeroTolerance = 1e-9
self.delta = [[-1, 0], # go up
[0, -1], # go left
[1, 0], # go down
[0, 1], # go right
[0, 0]] # stop
self.num_actions = 5
self.root_path_save = self.config.failCases_dir
self.list_seqtrain_file = []
self.list_train_file = []
self.pathtransformer = self.pathtransformer_RelativeCoordinate
if self.config.dynamic_commR:
# comm radius that ensure initial graph connected
print("run on multirobotsim (radius dynamic) with collision shielding")
self.getAdjacencyMatrix = self.computeAdjacencyMatrix
else:
# comm radius fixed
print("run on multirobotsim (radius fixed) with collision shielding")
self.getAdjacencyMatrix = self.computeAdjacencyMatrix_fixedCommRadius
def set_up(self, epoch):
self.dir_input = os.path.join(self.config.failCases_dir, "input/")
self.dir_sol = os.path.join(self.config.failCases_dir, "output_ECBS/")
self.list_failureCases_solution = self.search_failureCases(self.dir_sol)
self.list_failureCases_input = self.search_failureCases(self.dir_input)
self.nameprefix_input = self.list_failureCases_input[0].split('input/')[-1].split('ID')[0]
self.list_failureCases_solution = sorted(self.list_failureCases_solution)
self.len_failureCases_solution = len(self.list_failureCases_solution)
self.current_epoch = epoch
self.task_queue = Queue()
self.path_save_solDATA = os.path.join(self.root_path_save, "Cache_data", "Epoch_{}".format(epoch))
try:
# Create target Directory
os.makedirs(self.path_save_solDATA)
except FileExistsError:
# print("Directory ", dirName, " already exists")
pass
def solutionTransformer(self):
for id_sol in range(self.len_failureCases_solution):
# for id_sol in range(21000):
self.task_queue.put(id_sol)
time.sleep(0.3)
processes = []
for i in range(self.PROCESS_NUMBER):
# Run Multiprocesses
p = Process(target=self.compute_thread, args=(str(i)))
processes.append(p)
[x.start() for x in processes]
[x.join() for x in processes]
def compute_thread(self,thread_id):
while True:
try:
id_sol = self.task_queue.get(block=False)
print('thread {} get task:{}'.format(thread_id, id_sol))
self.pipeline(id_sol)
except:
# print('thread {} no task, exit'.format(thread_id))
return
def pipeline(self,id_sol):
agents_schedule, agents_goal, makespan, map_data, id_case = self.load_ExpertSolution(id_sol)
log_str = 'Transform_failureCases_ID_#{:05d} from ID_MAP {:05d} in Epoch{}'.format(id_case[2],id_case[1],id_case[0])
print('############## {} ###############'.format(log_str))
self.pathtransformer(map_data, agents_schedule, agents_goal, makespan + 1, id_case)
def load_ExpertSolution(self, ID_case):
name_solution_file = self.list_failureCases_solution[ID_case]
map_setup = name_solution_file.split('output_')[-1].split('_IDMap')[0]
id_sol_map = name_solution_file.split('_IDMap')[-1].split('_IDCase')[0]
id_sol_case = name_solution_file.split('_IDCase')[-1].split('_')[0]
name_inputfile = os.path.join(self.dir_input,
'input_{}_IDMap{}_IDCase{}.yaml'.format(map_setup, id_sol_map, id_sol_case))
with open(name_inputfile, 'r') as stream:
try:
# print(yaml.safe_load(stream))
data_config = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
with open(name_solution_file, 'r') as stream:
try:
# print(yaml.safe_load(stream))
data_output = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
agentsConfig = data_config['agents']
num_agent = len(agentsConfig)
list_posObstacle = data_config['map']['obstacles']
if list_posObstacle == None:
map_data = np.zeros(self.size_map, dtype=np.int64)
else:
map_data = self.setup_map(list_posObstacle)
schedule = data_output['schedule']
makespan = data_output['statistics']['makespan']
# print(data_config)
# print(data_output)
goal_allagents = np.zeros([num_agent, 2])
schedule_agentsState = np.zeros([makespan + 1, num_agent, 2])
schedule_agentsActions = np.zeros([makespan + 1, num_agent, self.num_actions])
schedule_agents = [schedule_agentsState, schedule_agentsActions]
hash_ids = np.zeros(self.num_agents)
for id_agent in range(num_agent):
goalX = agentsConfig[id_agent]['goal'][0]
goalY = agentsConfig[id_agent]['goal'][1]
goal_allagents[id_agent][:] = [goalX, goalY]
schedule_agents = self.obtainSchedule(id_agent, schedule, schedule_agents, goal_allagents, makespan + 1)
str_id = '{}_{}_{}'.format(self.current_epoch,id_sol_case,id_agent)
int_id = int(hashlib.sha256(str_id.encode('utf-8')).hexdigest(), 16) % (10 ** 5)
# hash_ids[id_agent]=np.divide(int_id,10**5)
hash_ids[id_agent] = int_id
# print(id_sol_map, id_sol_case, hash_ids)
return schedule_agents, goal_allagents, makespan, map_data, (self.current_epoch, int(id_sol_map), int(id_sol_case), hash_ids)
def obtainSchedule(self, id_agent, agentplan, schedule_agents, goal_allagents, teamMakeSpan):
name_agent = "agent{}".format(id_agent)
[schedule_agentsState, schedule_agentsActions] = schedule_agents
planCurrentAgent = agentplan[name_agent]
pathLengthCurrentAgent = len(planCurrentAgent)
actionKeyListAgent = []
for step in range(teamMakeSpan):
if step < pathLengthCurrentAgent:
currentX = planCurrentAgent[step]['x']
currentY = planCurrentAgent[step]['y']
else:
currentX = goal_allagents[id_agent][0]
currentY = goal_allagents[id_agent][1]
schedule_agentsState[step][id_agent][:] = [currentX, currentY]
# up left down right stop
actionVectorTarget = [0, 0, 0, 0, 0]
# map action with respect to the change of position of agent
if step < (pathLengthCurrentAgent - 1):
nextX = planCurrentAgent[step + 1]['x']
nextY = planCurrentAgent[step + 1]['y']
# actionCurrent = [nextX - currentX, nextY - currentY]
elif step >= (pathLengthCurrentAgent - 1):
nextX = goal_allagents[id_agent][0]
nextY = goal_allagents[id_agent][1]
actionCurrent = [nextX - currentX, nextY - currentY]
actionKeyIndex = self.delta.index(actionCurrent)
actionKeyListAgent.append(actionKeyIndex)
actionVectorTarget[actionKeyIndex] = 1
schedule_agentsActions[step][id_agent][:] = actionVectorTarget
return [schedule_agentsState,schedule_agentsActions]
def setup_map(self, list_posObstacle):
num_obstacle = len(list_posObstacle)
map_data = np.zeros(self.size_map)
for ID_obs in range(num_obstacle):
obstacleIndexX = list_posObstacle[ID_obs][0]
obstacleIndexY = list_posObstacle[ID_obs][1]
map_data[obstacleIndexX][obstacleIndexY] = 1
return map_data
def pathtransformer_RelativeCoordinate(self, map_data, agents_schedule, agents_goal, makespan, ID_case):
# input: start and goal position,
# output: a set of file,
# each file consist of state (map. goal, state) and target (action for current state)
mode = 'train'
[schedule_agentsState, schedule_agentsActions] = agents_schedule
save_PairredData = {}
(current_epoch,id_sol_map, id_sol_case, hash_ids) = ID_case
# compute AdjacencyMatrix
GSO, communicationRadius = self.getAdjacencyMatrix(schedule_agentsState, self.communicationRadius)
# transform into relative Coordinate, loop "makespan" times
# print(map_data)
# print(agents_goal, schedule_agentsState, makespan)
self.AgentState.setmap(map_data)
input_seq_tensor = self.AgentState.toSeqInputTensor(agents_goal, schedule_agentsState, makespan)
# print(input_seq_tensor)
list_input = input_seq_tensor.cpu().detach().numpy()
save_PairredData.update({'map': map_data, 'goal': agents_goal, 'inputState': schedule_agentsState,
'inputTensor': list_input, 'target': schedule_agentsActions,
'GSO': GSO,'makespan':makespan, 'HashIDs':hash_ids, 'ID_Map':int(id_sol_map), 'ID_case':int(id_sol_case)})
self.save(mode, save_PairredData, ID_case, makespan)
print("Save as Relative Coordination - {}set_#{} at ID_Map{} from Epoch {}.".format(mode, id_sol_case, id_sol_map, current_epoch))
def save(self, mode, save_PairredData, ID_case, makespan):
(current_epoch, id_sol_map, id_sol_case, hash_ids) = ID_case
file_name = os.path.join(self.path_save_solDATA, '{}_IDMap{:05d}_IDCase{:05d}_MP{}.mat'.format(mode, int(id_sol_map), int(id_sol_case), makespan))
# print(file_name)
sio.savemat(file_name, save_PairredData)
def search_failureCases(self, dir):
# make a list of file name of input yaml
list_path = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if self.is_target_file(fname):
path = os.path.join(root, fname)
list_path.append(path)
return list_path
def is_target_file(self, filename):
DATA_EXTENSIONS = ['.yaml']
return any(filename.endswith(extension) for extension in DATA_EXTENSIONS)
def computeAdjacencyMatrix(self, pos, CommunicationRadius, connected=True):
# First, transpose the axis of pos so that the rest of the code follows
# through as legible as possible (i.e. convert the last two dimensions
# from 2 x nNodes to nNodes x 2)
# pos: TimeSteps x nAgents x 2 (X, Y)
# Get the appropriate dimensions
nSamples = pos.shape[0]
len_TimeSteps = pos.shape[0] # length of timesteps
nNodes = pos.shape[1] # Number of nodes
# Create the space to hold the adjacency matrices
W = np.zeros([len_TimeSteps, nNodes, nNodes])
threshold = CommunicationRadius # We compute a different
# threshold for each sample, because otherwise one bad trajectory
# will ruin all the adjacency matrices
for t in range(len_TimeSteps):
# Compute the distances
distances = squareform(pdist(pos[t])) # nNodes x nNodes
# Threshold them
W[t] = (distances < threshold).astype(pos.dtype)
# And get rid of the self-loops
W[t] = W[t] - np.diag(np.diag(W[t]))
# Now, check if it is connected, if not, let's make the
# threshold bigger
while (not graph.isConnected(W[t])) and (connected):
# while (not graph.isConnected(W[t])) and (connected):
# Increase threshold
threshold = threshold * 1.1 # Increase 10%
# Compute adjacency matrix
W[t] = (distances < threshold).astype(pos.dtype)
W[t] = W[t] - np.diag(np.diag(W[t]))
# And since the threshold has probably changed, and we want the same
# threshold for all nodes, we repeat:
W_norm = np.zeros([len_TimeSteps, nNodes, nNodes])
for t in range(len_TimeSteps):
# Initial matrix
allagentPos = pos[t]
distances = squareform(pdist(allagentPos, 'euclidean')) # nNodes x nNodes
W_t = (distances < threshold).astype(allagentPos.dtype)
W_t = W_t - np.diag(np.diag(W_t))
if np.any(W):
# if W is all non-zero matrix, do normalization
if self.config.symmetric_norm:
deg = np.sum(W_t, axis=0) # nNodes (degree vector)
zeroDeg = np.nonzero(np.abs(deg) < self.zeroTolerance)
deg[zeroDeg] = 1.
invSqrtDeg = np.sqrt(1. / deg)
invSqrtDeg[zeroDeg] = 0.
Deg = np.diag(invSqrtDeg)
W_t = Deg @ W_t @ Deg
maxEigenValue = self.get_maxEigenValue(W_t)
W_norm[t] = W_t/maxEigenValue
else:
# if W is all zero matrix, don't do any normalization
W_norm[t] = W
return W_norm, threshold
def get_maxEigenValue(self, matrix):
isSymmetric = np.allclose(matrix, np.transpose(matrix, axes=[1, 0]))
if isSymmetric:
W = np.linalg.eigvalsh(matrix)
else:
W = np.linalg.eigvals(matrix)
maxEigenvalue = np.max(np.real(W), axis=0)
return maxEigenvalue
# return np.max(np.abs(np.linalg.eig(matrix)[0]))
def computeAdjacencyMatrix_fixedCommRadius(self, pos, CommunicationRadius, connected=True):
len_TimeSteps = pos.shape[0] # length of timesteps
nNodes = pos.shape[1] # Number of nodes
# Create the space to hold the adjacency matrices
W_norm = np.zeros([len_TimeSteps, nNodes, nNodes])
for t in range(len_TimeSteps):
# Initial matrix
allagentPos = pos[t]
distances = squareform(pdist(allagentPos, 'euclidean')) # nNodes x nNodes
W = (distances < CommunicationRadius).astype(allagentPos.dtype)
W = W - np.diag(np.diag(W))
if np.any(W):
# if W is all non-zero matrix, do normalization
if self.config.symmetric_norm:
deg = np.sum(W, axis=0) # nNodes (degree vector)
zeroDeg = np.nonzero(np.abs(deg) < self.zeroTolerance)
deg[zeroDeg] = 1.
invSqrtDeg = np.sqrt(1. / deg)
invSqrtDeg[zeroDeg] = 0.
Deg = np.diag(invSqrtDeg)
W = Deg @ W @ Deg
maxEigenValue = self.get_maxEigenValue(W)
W_norm[t] = W/maxEigenValue
else:
# if W is all zero matrix, don't do any normalization
W_norm[t] = W
return W_norm, CommunicationRadius
def pathtransformer_GlobalCoordinate(self, map_data, agents_schedule, agents_goal, makespan, ID_case):
# input: start and goal position,
# output: a set of file,
# each file consist of state (map. goal, state) and target (action for current state)
mode = 'train'
[schedule_agentsState, schedule_agentsActions] = agents_schedule
save_PairredData = {}
save_PairredData.update({'map': map_data, 'goal': agents_goal,
'inputState': schedule_agentsState,
'target': schedule_agentsActions,
'makespan': makespan})
self.save(mode, save_PairredData, ID_case)
# print("Save as Global Coordination - {}set_#{}.".format(mode, ID_case))
# if __name__ == '__main__':
# config = {'num_agents': 12,
# 'map_w': 20,
# 'map_h': 20,
# 'failCases_dir': '/local/scratch/ql295/Data/MultiAgentDataset/test',
# 'exp_net': 'dcp'
# }
# config = {'num_agents': 12,
# 'map_w': 20,
# 'map_h': 20,
# 'failCases_dir': '/local/scratch/ql295/Data/MultiAgentDataset/experiments/dcpOEGAT_map20x20_rho1_10Agent/K3_HS0/1591839220/failure_cases/save',
# 'exp_net': 'dcp',
# 'FOV':9,
# 'guidance': 'Project_G'
# }
# config = {'num_agents': 20,
# 'map_w': 28,
# 'map_h': 28,
# 'failCases_dir': '/local/scratch/ql295/Data/MultiAgentDataset/experiments/dcpOEGAT_map20x20_rho1_10Agent/K3_HS0/1591839220/failure_cases',
# 'exp_net': 'dcp',
# 'FOV':9,
# 'guidance': 'Project_G'
# }
# config = {'num_agents': 10,
# 'map_w': 20,
# 'map_h': 20,
# 'failCases_dir': '/local/scratch/ql295/Data/Project_testbed/Quick_Test',
# 'exp_net': 'dcp',
# 'FOV':9,
# 'guidance': 'Project_G',
# 'commR': 7,
# 'dynamic_commR':False,
# 'symmetric_norm': False,
# }
# config_setup = EasyDict(config)
# DataTransformer = DataTransformer(config_setup)
# DataTransformer.set_up('1')
# DataTransformer.solutionTransformer()
|
error_handling.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""ErrorRendezvous handler for collecting errors from multiple threads."""
import contextlib
import threading
import time
import traceback
from tensorflow.python.framework import errors
from tensorflow.python.platform import tf_logging as logging
_UNINTERESTING_ERRORS = (errors.CancelledError,)
class ErrorRendezvous(object):
"""Resolve errors from multiple threads during TPU execution.
TPU errors can occur on the infeed or outfeed threads as well as the main
training thread.
Depending on which thread "wins" and receives the session error first, we may
end up showing users a confusing and non-actionable error message (session
cancelled) instead of a root cause (e.g. a bad filename).
The rendezvous object provides a location to capture these errors until all
threads terminate. At that point we can choose the most informative error
to report.
"""
def __init__(self, num_sources):
# string -> (message, traceback)
self._errors = {}
self._num_sources = num_sources
self._session_cancel_timer = None
def record_error(self, source, exception, session=None):
"""Report an exception from the given source.
If a session is passed, a timer will be registered to close it after a few
seconds. This is necessary to ensure the main training loop does not hang
if an infeed/oufeed error occurs. We sleep a few seconds to allow a more
interesting error from another thread to propagate.
Args:
source: string, source of the error
exception: Exception being thrown
session: Session to close after delay.
"""
logging.info('Error recorded from %s: %s', source, exception)
stack_trace = traceback.format_exc()
self._errors[source] = (exception, stack_trace)
if session is not None and self._session_cancel_timer is None:
def _cancel_session():
time.sleep(5)
try:
session.close()
except: # pylint: disable=bare-except
pass
self._session_cancel_timer = threading.Thread(target=_cancel_session,)
self._session_cancel_timer.daemon = True
self._session_cancel_timer.start()
def record_done(self, source):
"""Mark execution source `source` as done.
If an error was originally reported from `source` it is left intact.
Args:
source: `str`, source being recorded
"""
logging.info('%s marked as finished', source)
if source not in self._errors:
self._errors[source] = None
@contextlib.contextmanager
def catch_errors(self, source, session=None):
"""Context manager to report any errors within a block."""
try:
yield
except Exception as e: # pylint: disable=broad-except
self.record_error(source, e, session)
def raise_errors(self, timeout_sec=5):
"""Wait for up to `timeout` seconds for all error sources to finish.
Preferentially raise "interesting" errors (errors not in the
_UNINTERESTING_ERRORS) set.
Args:
timeout_sec: Seconds to wait for other error sources.
"""
for _ in range(timeout_sec):
if len(self._errors) == self._num_sources:
break
time.sleep(1)
kept_errors = [(k, v) for (k, v) in self._errors.items() if v is not None]
if not kept_errors:
return
# First check for any interesting errors, then fall back on the session
# cancelled errors etc.
for k, (exc, _) in kept_errors:
if isinstance(exc, _UNINTERESTING_ERRORS):
continue
else:
raise exc
for k, (exc, _) in kept_errors:
raise exc
|
pose_observer.py | import rospy
from std_msgs.msg import Header
from geometry_msgs.msg import Pose, PoseStamped, Point, Quaternion
import threading
import tf2_ros
import tf2_geometry_msgs
import time
from .observer import Observer
class PoseObserver(Observer):
identical_pose = PoseStamped(
header=Header(frame_id="map"),
pose=Pose(
position=Point(x=0, y=0, z=0),
orientation=Quaternion(x=0, y=0, z=0, w=1)))
def __init__(self, robot_frame_id, fixed_frame_id="map", rate=20.0, **kwargs):
super(PoseObserver, self).__init__(**kwargs)
self._robot_frame_id = robot_frame_id
self._fixed_frame_id = fixed_frame_id
self.tf_buffer = tf2_ros.Buffer(rospy.Duration(10))
self.tf_listener = tf2_ros.TransformListener(self.tf_buffer)
self._pose_update_thread = threading.Thread(target=self._worker, name="pose_observer")
self._pose_update_thread.start()
self._rate = rospy.Rate(rate)
def join(self):
self._pose_update_thread.join()
def _worker(self):
while not rospy.is_shutdown():
start = time.clock()
pose = self.get_pose()
if pose is not None:
self._call_event(pose)
try:
self._rate.sleep()
except rospy.ROSInterruptException as e:
rospy.logdebug("PoseObserver: {}".format(e))
def get_pose(self):
try:
transform = self.tf_buffer.lookup_transform(self._fixed_frame_id, self._robot_frame_id, rospy.Time(0), rospy.Duration(0.01))
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException, rospy.ROSInterruptException) as e:
rospy.logwarn_throttle(5, "PoseObserver get Exception: {}".format(e))
return None
if transform:
pose = tf2_geometry_msgs.do_transform_pose(PoseObserver.identical_pose, transform)
else:
rospy.logwarn("PoseObserver get None")
pose = None
return pose
|
IsaacGym.py | from utils.config import set_seed, get_args, parse_sim_params, load_cfg
from utils.parse_task import parse_task
import numpy as np
import torch # import torch after isaacgym modules
import multiprocessing as mp
"""
run the following code in bash before running.
export LD_LIBRARY_PATH=/xfs/home/podracer_steven/anaconda3/envs/rlgpu/lib
can't use os.environ['LD_LIBRARY_PATH'] = /xfs/home/podracer_steven/anaconda3/envs/rlgpu/lib
"""
class PreprocessIsaacVecEnv: # environment wrapper
def __init__(self, env_name, target_return=None, if_print=False, headless=True, data_type=torch.float32,
env_num=32, device_id=0, rl_device_id=-1):
"""Preprocess an Isaac Gym vec environment for RL training.
[Isaac Gym](https://developer.nvidia.com/isaac-gym)"""
# Override env_name if passed on the command line
args = get_args(task_name=env_name, headless=headless)
# set after `args = get_args()` # get_args() in .../utils/config.py
args.device_id = device_id # PhyX device
args.rl_device = f"cuda:{rl_device_id}" if rl_device_id >= 0 else 'cpu'
args.num_envs = env_num # in `.../cfg/train/xxx.yaml`, `numEnvs`
# set before load_cfg()
cfg, cfg_train, logdir = load_cfg(args)
sim_params = parse_sim_params(args, cfg, cfg_train)
set_seed(cfg_train["seed"])
task, env = parse_task(args, cfg, cfg_train, sim_params)
self.env_name = env_name
self.env = env
self.data_type = data_type
self.device = torch.device(env.rl_device)
self.env_num = env.num_environments
state = self.env.reset()
self.env_num = state.shape[0]
self.target_return = target_return
max_step = getattr(task, 'max_episode_length', None)
max_step_default = getattr(task, '_max_episode_steps', None)
if max_step is None:
max_step = max_step_default
if max_step is None:
max_step = 2 ** 10
import gym
if_discrete = isinstance(env.act_space, gym.spaces.Discrete)
self.state_dim = task.num_obs
if if_discrete:
self.action_dim = env.action_space.n
raise RuntimeError("| Not support for discrete environment now. :(")
elif isinstance(env.act_space, gym.spaces.Box):
self.action_dim = task.num_actions
action_max = float(env.action_space.high[0])
# check: whether the action_max is correct, delete before uploading to github, vincent
assert not any(env.action_space.high + env.action_space.low)
else:
raise RuntimeError('| Please set these value manually: if_discrete=bool, action_dim=int, action_max=1.0')
self.action_max, self.max_step = action_max, max_step
self.if_discrete = if_discrete
if if_print:
print(f"\n| env_name: {self.env_name}, action space if_discrete: {self.if_discrete}"
f"\n| state_dim: {self.state_dim:4}, action_dim: {self.action_dim}, action_max: {self.action_max}"
f"\n| max_step: {self.max_step:4}, target_return: {self.target_return}")
def reset(self) -> torch.Tensor:
return self.env.reset()
def step(self, actions: torch.Tensor) -> (torch.Tensor, torch.Tensor, torch.Tensor, None):
return self.env.step(actions)[:3]
class PreprocessIsaacOneEnv(PreprocessIsaacVecEnv): # environment wrapper
def __init__(self, env_name, target_return=None, if_print=False, headless=True, data_type=torch.float32,
env_num=1, device_id=0):
assert env_num == 1
super().__init__(env_name=env_name,
target_return=target_return,
if_print=if_print,
headless=headless,
data_type=data_type,
env_num=1,
device_id=device_id)
def reset(self) -> torch.Tensor:
state = self.env.reset()
return state[0].detach().numpy()
def step(self, action: np.ndarray) -> (np.ndarray, np.ndarray, np.ndarray, None):
ten_action = torch.as_tensor(action, dtype=torch.float32).unsqueeze(0)
ten_state, ten_reward, ten_done, info_dict = self.env.step(ten_action)
state = ten_state[0].detach().numpy()
reward = ten_reward[0].detach().numpy()
done = ten_done[0].detach().numpy()
return state, reward, done, info_dict
def build_isaac_gym_env(env, if_print=False, device_id=0):
env_name = getattr(env, 'env_name', env)
assert isinstance(env_name, str)
env_last_name = env_name[11:]
assert env_last_name in {'Ant', 'Humanoid'}
target_return = {'Ant': 4000, 'Humanoid': 7000}[env_last_name]
if env_name.find('IsaacOneEnv') != -1:
env = PreprocessIsaacOneEnv(env_last_name, target_return=target_return, if_print=if_print,
env_num=1, device_id=device_id)
elif env_name.find('IsaacVecEnv') != -1:
env = PreprocessIsaacVecEnv(env_last_name, target_return=target_return, if_print=if_print,
env_num=32, device_id=device_id)
else:
raise ValueError(f'| build_env_from_env_name: need register: {env_name}')
return env
def run_isaac_env(env_name, device_id):
env = build_isaac_gym_env(env_name, if_print=True, device_id=device_id)
if env.env_num == 1:
def get_random_action():
return torch.rand(env.action_dim, dtype=torch.float32) * 2 - 1
else:
def get_random_action():
return torch.rand((env.env_num, env.action_dim), dtype=torch.float32) * 2 - 1
total_step = 2 ** 4
print("| total_step", total_step)
for step_i in range(total_step):
action = get_random_action()
state, reward, done, info_dict = env.step(action)
print('|', device_id, step_i, state.dtype)
print('| env_num', env.env_num)
def run_multiple_process():
env_last_name = ['Ant', 'Humanoid'][0]
one_env_name = f"IsaacOneEnv{env_last_name}"
vec_env_name = f"IsaacVecEnv{env_last_name}"
process_list = list()
process_list.append(mp.Process(target=run_isaac_env, args=(one_env_name, 4,)))
process_list.append(mp.Process(target=run_isaac_env, args=(vec_env_name, 5,)))
mp.set_start_method(method='spawn') # should be
[p.start() for p in process_list]
[p.join() for p in process_list]
if __name__ == '__main__':
run_isaac_env(env_name='IsaacVecEnvAnt', device_id=3)
# run_multiple_process()
|
test_async.py | import os
import pytest
import threading
from smbclient import (
register_session,
open_file
)
def read_in_chunks(file_object, chunk_size=10 * 1024 * 1024):
while True:
data = file_object.read(chunk_size)
if not data:
break
yield data
def write_to_file(local_dir, remote_dir):
f = open(local_dir, "wb")
with open_file(remote_dir, mode="rb") as fd:
for chunk in read_in_chunks(fd):
print(local_dir, "is being copied.")
f.write(chunk)
def test_async_download_two_files():
register_session("ZDHQ-HUB", username="reality", password="reality")
thread1 = threading.Thread(target=write_to_file, args=("example1.dmp", r"\\ZDHQ-HUB\RealityWorkspace\example.dmp"))
thread2 = threading.Thread(target=write_to_file, args=("example2.dmp", r"\\ZDHQ-HUB\RealityWorkspace\example2.dmp"))
thread1.daemon = False
thread2.daemon = False
thread1.start()
thread2.start()
def test_show_cur_dir():
print(os.getcwd())
|
custom.py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import threading
import time
import ast
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse # pylint: disable=import-error
from binascii import hexlify
from os import urandom
import datetime
import json
import ssl
import sys
import uuid
from six.moves.urllib.request import urlopen # pylint: disable=import-error, ungrouped-imports
import OpenSSL.crypto
from fabric import Connection
from knack.prompting import prompt_pass, NoTTYException
from knack.util import CLIError
from knack.log import get_logger
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import is_valid_resource_id, parse_resource_id
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.applicationinsights import ApplicationInsightsManagementClient
from azure.mgmt.relay.models import AccessRights
from azure.cli.command_modules.relay._client_factory import hycos_mgmt_client_factory, namespaces_mgmt_client_factory
from azure.storage.blob import BlockBlobService, BlobPermissions
from azure.cli.command_modules.network._client_factory import network_client_factory
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, open_page_in_browser, get_json_object, \
ConfiguredDefaultSetter, sdk_no_wait
from azure.cli.core.commands.client_factory import UA_AGENT
from azure.cli.core.profiles import ResourceType
from .tunnel import TunnelServer
from .vsts_cd_provider import VstsContinuousDeliveryProvider
from ._params import AUTH_TYPES, MULTI_CONTAINER_TYPES, LINUX_RUNTIMES, WINDOWS_RUNTIMES
from ._client_factory import web_client_factory, ex_handler_factory
from ._appservice_utils import _generic_site_operation
from .utils import _normalize_sku, get_sku_name
from ._create_util import (zip_contents_from_dir, get_runtime_version_details, create_resource_group, get_app_details,
should_create_new_rg, set_location, does_app_already_exist, get_profile_username,
get_plan_to_use, get_lang_from_content, get_rg_to_use, get_sku_to_use,
detect_os_form_src)
from ._constants import (RUNTIME_TO_DEFAULT_VERSION_FUNCTIONAPP, NODE_VERSION_DEFAULT_FUNCTIONAPP,
RUNTIME_TO_IMAGE_FUNCTIONAPP, NODE_VERSION_DEFAULT)
logger = get_logger(__name__)
# pylint:disable=no-member,too-many-lines,too-many-locals
# region "Common routines shared with quick-start extensions."
# Please maintain compatibility in both interfaces and functionalities"
def create_webapp(cmd, resource_group_name, name, plan, runtime=None, startup_file=None, # pylint: disable=too-many-statements,too-many-branches
deployment_container_image_name=None, deployment_source_url=None, deployment_source_branch='master',
deployment_local_git=None, docker_registry_server_password=None, docker_registry_server_user=None,
multicontainer_config_type=None, multicontainer_config_file=None, tags=None,
using_webapp_up=False, language=None):
SiteConfig, SkuDescription, Site, NameValuePair = cmd.get_models(
'SiteConfig', 'SkuDescription', 'Site', 'NameValuePair')
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
client = web_client_factory(cmd.cli_ctx)
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist".format(plan))
is_linux = plan_info.reserved
node_default_version = NODE_VERSION_DEFAULT
location = plan_info.location
site_config = SiteConfig(app_settings=[])
if isinstance(plan_info.sku, SkuDescription) and plan_info.sku.name.upper() not in ['F1', 'FREE', 'SHARED', 'D1',
'B1', 'B2', 'B3', 'BASIC']:
site_config.always_on = True
webapp_def = Site(location=location, site_config=site_config, server_farm_id=plan_info.id, tags=tags,
https_only=using_webapp_up)
helper = _StackRuntimeHelper(cmd, client, linux=is_linux)
if is_linux:
if not validate_container_app_create_options(runtime, deployment_container_image_name,
multicontainer_config_type, multicontainer_config_file):
raise CLIError("usage error: --runtime | --deployment-container-image-name |"
" --multicontainer-config-type TYPE --multicontainer-config-file FILE")
if startup_file:
site_config.app_command_line = startup_file
if runtime:
site_config.linux_fx_version = runtime
match = helper.resolve(runtime)
if not match:
raise CLIError("Linux Runtime '{}' is not supported."
"Please invoke 'list-runtimes' to cross check".format(runtime))
elif deployment_container_image_name:
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
site_config.app_settings.append(NameValuePair(name="WEBSITES_ENABLE_APP_SERVICE_STORAGE",
value="false"))
elif multicontainer_config_type and multicontainer_config_file:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
site_config.linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
elif plan_info.is_xenon: # windows container webapp
site_config.windows_fx_version = _format_fx_version(deployment_container_image_name)
elif runtime: # windows webapp with runtime specified
if any([startup_file, deployment_container_image_name, multicontainer_config_file, multicontainer_config_type]):
raise CLIError("usage error: --startup-file or --deployment-container-image-name or "
"--multicontainer-config-type and --multicontainer-config-file is "
"only appliable on linux webapp")
match = helper.resolve(runtime)
if not match:
raise CLIError("Runtime '{}' is not supported. Please invoke 'list-runtimes' to cross check".format(runtime)) # pylint: disable=line-too-long
match['setter'](cmd=cmd, stack=match, site_config=site_config)
# Be consistent with portal: any windows webapp should have this even it doesn't have node in the stack
if not match['displayName'].startswith('node'):
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
else: # windows webapp without runtime specified
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
if site_config.app_settings:
for setting in site_config.app_settings:
logger.info('Will set appsetting %s', setting)
if using_webapp_up: # when the routine is invoked as a help method for webapp up
logger.info("will set appsetting for enabling build")
site_config.app_settings.append(NameValuePair(name="SCM_DO_BUILD_DURING_DEPLOYMENT", value=True))
if language is not None and language.lower() == 'dotnetcore':
site_config.app_settings.append(NameValuePair(name='ANCM_ADDITIONAL_ERROR_PAGE_LINK',
value='https://{}.scm.azurewebsites.net/detectors'.format(name)))
poller = client.web_apps.create_or_update(resource_group_name, name, webapp_def)
webapp = LongRunningOperation(cmd.cli_ctx)(poller)
# Ensure SCC operations follow right after the 'create', no precedent appsetting update commands
_set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name)
if deployment_container_image_name:
update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password=docker_registry_server_password)
return webapp
def validate_container_app_create_options(runtime=None, deployment_container_image_name=None,
multicontainer_config_type=None, multicontainer_config_file=None):
if bool(multicontainer_config_type) != bool(multicontainer_config_file):
return False
opts = [runtime, deployment_container_image_name, multicontainer_config_type]
return len([x for x in opts if x]) == 1 # you can only specify one out the combinations
def parse_docker_image_name(deployment_container_image_name):
if not deployment_container_image_name:
return None
slash_ix = deployment_container_image_name.rfind('/')
docker_registry_server_url = deployment_container_image_name[0:slash_ix]
if slash_ix == -1 or ("." not in docker_registry_server_url and ":" not in docker_registry_server_url):
return None
return docker_registry_server_url
def update_app_settings(cmd, resource_group_name, name, settings=None, slot=None, slot_settings=None):
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_application_settings', slot)
result, slot_result = {}, {}
# pylint: disable=too-many-nested-blocks
for src, dest in [(settings, result), (slot_settings, slot_result)]:
for s in src:
try:
temp = shell_safe_json_parse(s)
if isinstance(temp, list): # a bit messy, but we'd like accept the output of the "list" command
for t in temp:
if t.get('slotSetting', True):
slot_result[t['name']] = t['value']
# Mark each setting as the slot setting
else:
result[t['name']] = t['value']
else:
dest.update(temp)
except CLIError:
setting_name, value = s.split('=', 1)
dest[setting_name] = value
result.update(slot_result)
for setting_name, value in result.items():
app_settings.properties[setting_name] = value
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings.properties, slot, client)
app_settings_slot_cfg_names = []
if slot_result:
new_slot_setting_names = slot_result.keys()
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.app_setting_names = slot_cfg_names.app_setting_names or []
slot_cfg_names.app_setting_names += new_slot_setting_names
app_settings_slot_cfg_names = slot_cfg_names.app_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _build_app_settings_output(result.properties, app_settings_slot_cfg_names)
def add_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type, account_name,
share_name, access_key, mount_path=None, slot=None, slot_setting=False):
AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue')
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
if custom_id in azure_storage_accounts.properties:
raise CLIError("Site already configured with an Azure storage account with the id '{}'. "
"Use 'az webapp config storage-account update' to update an existing "
"Azure storage account configuration.".format(custom_id))
azure_storage_accounts.properties[custom_id] = AzureStorageInfoValue(type=storage_type, account_name=account_name,
share_name=share_name, access_key=access_key,
mount_path=mount_path)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def update_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type=None, account_name=None,
share_name=None, access_key=None, mount_path=None, slot=None, slot_setting=False):
AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue')
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
existing_account_config = azure_storage_accounts.properties.pop(custom_id, None)
if not existing_account_config:
raise CLIError("No Azure storage account configuration found with the id '{}'. "
"Use 'az webapp config storage-account add' to add a new "
"Azure storage account configuration.".format(custom_id))
new_account_config = AzureStorageInfoValue(
type=storage_type or existing_account_config.type,
account_name=account_name or existing_account_config.account_name,
share_name=share_name or existing_account_config.share_name,
access_key=access_key or existing_account_config.access_key,
mount_path=mount_path or existing_account_config.mount_path
)
azure_storage_accounts.properties[custom_id] = new_account_config
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def enable_zip_deploy_functionapp(cmd, resource_group_name, name, src, build_remote=False, timeout=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
app = client.web_apps.get(resource_group_name, name)
parse_plan_id = parse_resource_id(app.server_farm_id)
plan_info = None
retry_delay = 10 # seconds
# We need to retry getting the plan because sometimes if the plan is created as part of function app,
# it can take a couple of tries before it gets the plan
for _ in range(5):
plan_info = client.app_service_plans.get(parse_plan_id['resource_group'],
parse_plan_id['name'])
if plan_info is not None:
break
time.sleep(retry_delay)
if build_remote and not app.reserved:
raise CLIError('Remote build is only available on Linux function apps')
is_consumption = is_plan_consumption(cmd, plan_info)
if (not build_remote) and is_consumption and app.reserved:
return upload_zip_to_storage(cmd, resource_group_name, name, src, slot)
if build_remote:
add_remote_build_app_settings(cmd, resource_group_name, name, slot)
else:
remove_remote_build_app_settings(cmd, resource_group_name, name, slot)
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout, slot)
def enable_zip_deploy_webapp(cmd, resource_group_name, name, src, timeout=None, slot=None):
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout=timeout, slot=slot)
def enable_zip_deploy(cmd, resource_group_name, name, src, timeout=None, slot=None):
logger.warning("Getting scm site credentials for zip deployment")
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
try:
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
except ValueError:
raise CLIError('Failed to fetch scm url for function app')
zip_url = scm_url + '/api/zipdeploy?isAsync=true'
deployment_status_url = scm_url + '/api/deployments/latest'
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers = authorization
headers['content-type'] = 'application/octet-stream'
headers['User-Agent'] = UA_AGENT
import requests
import os
from azure.cli.core.util import should_disable_connection_verify
# Read file content
with open(os.path.realpath(os.path.expanduser(src)), 'rb') as fs:
zip_content = fs.read()
logger.warning("Starting zip deployment. This operation can take a while to complete ...")
res = requests.post(zip_url, data=zip_content, headers=headers, verify=not should_disable_connection_verify())
logger.warning("Deployment endpoint responded with status code %d", res.status_code)
# check if there's an ongoing process
if res.status_code == 409:
raise CLIError("There may be an ongoing deployment or your app setting has WEBSITE_RUN_FROM_PACKAGE. "
"Please track your deployment in {} and ensure the WEBSITE_RUN_FROM_PACKAGE app setting "
"is removed.".format(deployment_status_url))
# check the status of async deployment
response = _check_zip_deployment_status(cmd, resource_group_name, name, deployment_status_url,
authorization, timeout)
return response
def add_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
enable_oryx_build = None
scm_do_build_during_deployment = None
website_run_from_package = None
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'ENABLE_ORYX_BUILD':
enable_oryx_build = value in ('true', '1')
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if keyval['name'] == 'WEBSITE_RUN_FROM_PACKAGE':
website_run_from_package = value
if not ((enable_oryx_build is True) and (scm_do_build_during_deployment is True)):
logger.warning("Setting ENABLE_ORYX_BUILD to true")
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to true")
update_app_settings(cmd, resource_group_name, name, [
"ENABLE_ORYX_BUILD=true",
"SCM_DO_BUILD_DURING_DEPLOYMENT=true"
], slot)
time.sleep(5)
if website_run_from_package is not None:
logger.warning("Removing WEBSITE_RUN_FROM_PACKAGE app setting")
delete_app_settings(cmd, resource_group_name, name, [
"WEBSITE_RUN_FROM_PACKAGE"
], slot)
time.sleep(5)
def remove_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
enable_oryx_build = None
scm_do_build_during_deployment = None
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'ENABLE_ORYX_BUILD':
enable_oryx_build = value in ('true', '1')
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if not ((enable_oryx_build is False) and (scm_do_build_during_deployment is False)):
logger.warning("Setting ENABLE_ORYX_BUILD to false")
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to false")
update_app_settings(cmd, resource_group_name, name, [
"ENABLE_ORYX_BUILD=false",
"SCM_DO_BUILD_DURING_DEPLOYMENT=false"
], slot)
time.sleep(5)
def upload_zip_to_storage(cmd, resource_group_name, name, src, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
storage_connection = None
for keyval in settings:
if keyval['name'] == 'AzureWebJobsStorage':
storage_connection = str(keyval['value'])
if storage_connection is None:
raise CLIError('Could not find a \'AzureWebJobsStorage\' application setting')
container_name = "function-releases"
blob_name = "{}-{}.zip".format(datetime.datetime.today().strftime('%Y%m%d%H%M%S'), str(uuid.uuid4()))
block_blob_service = BlockBlobService(connection_string=storage_connection)
if not block_blob_service.exists(container_name):
block_blob_service.create_container(container_name)
# https://gist.github.com/vladignatyev/06860ec2040cb497f0f3
def progress_callback(current, total):
total_length = 30
filled_length = int(round(total_length * current) / float(total))
percents = round(100.0 * current / float(total), 1)
progress_bar = '=' * filled_length + '-' * (total_length - filled_length)
progress_message = 'Uploading {} {}%'.format(progress_bar, percents)
cmd.cli_ctx.get_progress_controller().add(message=progress_message)
block_blob_service.create_blob_from_path(container_name, blob_name, src, validate_content=True,
progress_callback=progress_callback)
now = datetime.datetime.now()
blob_start = now - datetime.timedelta(minutes=10)
blob_end = now + datetime.timedelta(weeks=520)
blob_token = block_blob_service.generate_blob_shared_access_signature(container_name,
blob_name,
permission=BlobPermissions(read=True),
expiry=blob_end,
start=blob_start)
blob_uri = block_blob_service.make_blob_url(container_name, blob_name, sas_token=blob_token)
website_run_from_setting = "WEBSITE_RUN_FROM_PACKAGE={}".format(blob_uri)
update_app_settings(cmd, resource_group_name, name, settings=[website_run_from_setting])
client = web_client_factory(cmd.cli_ctx)
try:
logger.info('\nSyncing Triggers...')
if slot is not None:
client.web_apps.sync_function_triggers_slot(resource_group_name, name, slot)
else:
client.web_apps.sync_function_triggers(resource_group_name, name)
except CloudError as ce:
# This SDK function throws an error if Status Code is 200
if ce.status_code != 200:
raise ce
def _generic_settings_operation(cli_ctx, resource_group_name, name, operation_name,
setting_properties, slot=None, client=None):
client = client or web_client_factory(cli_ctx)
operation = getattr(client.web_apps, operation_name if slot is None else operation_name + '_slot')
if slot is None:
return operation(resource_group_name, name, str, setting_properties)
return operation(resource_group_name, name, slot, str, setting_properties)
def show_webapp(cmd, resource_group_name, name, slot=None, app_instance=None):
webapp = app_instance
if not app_instance: # when the routine is invoked as a help method, not through commands
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(name))
_rename_server_farm_props(webapp)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot)
return webapp
# for generic updater
def get_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def set_webapp(cmd, resource_group_name, name, slot=None, skip_dns_registration=None,
skip_custom_domain_verification=None, force_dns_registration=None, ttl_in_seconds=None, **kwargs):
instance = kwargs['parameters']
client = web_client_factory(cmd.cli_ctx)
updater = client.web_apps.create_or_update_slot if slot else client.web_apps.create_or_update
kwargs = dict(resource_group_name=resource_group_name, name=name, site_envelope=instance,
skip_dns_registration=skip_dns_registration,
skip_custom_domain_verification=skip_custom_domain_verification,
force_dns_registration=force_dns_registration,
ttl_in_seconds=ttl_in_seconds)
if slot:
kwargs['slot'] = slot
return updater(**kwargs)
def update_webapp(instance, client_affinity_enabled=None, https_only=None):
if 'function' in instance.kind:
raise CLIError("please use 'az functionapp update' to update this function app")
if client_affinity_enabled is not None:
instance.client_affinity_enabled = client_affinity_enabled == 'true'
if https_only is not None:
instance.https_only = https_only == 'true'
return instance
def update_functionapp(cmd, instance, plan=None):
client = web_client_factory(cmd.cli_ctx)
if plan is not None:
if is_valid_resource_id(plan):
dest_parse_result = parse_resource_id(plan)
dest_plan_info = client.app_service_plans.get(dest_parse_result['resource_group'],
dest_parse_result['name'])
else:
dest_plan_info = client.app_service_plans.get(instance.resource_group, plan)
if dest_plan_info is None:
raise CLIError("The plan '{}' doesn't exist".format(plan))
validate_plan_switch_compatibility(cmd, client, instance, dest_plan_info)
instance.server_farm_id = dest_plan_info.id
return instance
def validate_plan_switch_compatibility(cmd, client, src_functionapp_instance, dest_plan_instance):
general_switch_msg = 'Currently the switch is only allowed between a Consumption or an Elastic Premium plan.'
src_parse_result = parse_resource_id(src_functionapp_instance.server_farm_id)
src_plan_info = client.app_service_plans.get(src_parse_result['resource_group'],
src_parse_result['name'])
if src_plan_info is None:
raise CLIError('Could not determine the current plan of the functionapp')
if not (is_plan_consumption(cmd, src_plan_info) or is_plan_elastic_premium(cmd, src_plan_info)):
raise CLIError('Your functionapp is not using a Consumption or an Elastic Premium plan. ' + general_switch_msg)
if not (is_plan_consumption(cmd, dest_plan_instance) or is_plan_elastic_premium(cmd, dest_plan_instance)):
raise CLIError('You are trying to move to a plan that is not a Consumption or an Elastic Premium plan. ' +
general_switch_msg)
def set_functionapp(cmd, resource_group_name, name, **kwargs):
instance = kwargs['parameters']
if 'function' not in instance.kind:
raise CLIError('Not a function app to update')
client = web_client_factory(cmd.cli_ctx)
return client.web_apps.create_or_update(resource_group_name, name, site_envelope=instance)
def list_webapp(cmd, resource_group_name=None):
result = _list_app(cmd.cli_ctx, resource_group_name)
return [r for r in result if 'function' not in r.kind]
def list_deleted_webapp(cmd, resource_group_name=None, name=None, slot=None):
result = _list_deleted_app(cmd.cli_ctx, resource_group_name, name, slot)
return sorted(result, key=lambda site: site.deleted_site_id)
def restore_deleted_webapp(cmd, deleted_id, resource_group_name, name, slot=None, restore_content_only=None):
DeletedAppRestoreRequest = cmd.get_models('DeletedAppRestoreRequest')
request = DeletedAppRestoreRequest(deleted_site_id=deleted_id, recover_configuration=not restore_content_only)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restore_from_deleted_app', slot, request)
def list_function_app(cmd, resource_group_name=None):
result = _list_app(cmd.cli_ctx, resource_group_name)
return [r for r in result if 'function' in r.kind]
def _list_app(cli_ctx, resource_group_name=None):
client = web_client_factory(cli_ctx)
if resource_group_name:
result = list(client.web_apps.list_by_resource_group(resource_group_name))
else:
result = list(client.web_apps.list())
for webapp in result:
_rename_server_farm_props(webapp)
return result
def _list_deleted_app(cli_ctx, resource_group_name=None, name=None, slot=None):
client = web_client_factory(cli_ctx)
locations = _get_deleted_apps_locations(cli_ctx)
result = list()
for location in locations:
result = result + list(client.deleted_web_apps.list_by_location(location))
if resource_group_name:
result = [r for r in result if r.resource_group == resource_group_name]
if name:
result = [r for r in result if r.deleted_site_name.lower() == name.lower()]
if slot:
result = [r for r in result if r.slot.lower() == slot.lower()]
return result
def assign_identity(cmd, resource_group_name, name, role='Contributor', slot=None, scope=None):
ManagedServiceIdentity = cmd.get_models('ManagedServiceIdentity')
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
webapp.identity = ManagedServiceIdentity(type='SystemAssigned')
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter, role, scope)
return webapp.identity
def show_identity(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot).identity
def remove_identity(cmd, resource_group_name, name, slot=None):
ManagedServiceIdentity = cmd.get_models('ManagedServiceIdentity')
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
webapp.identity = ManagedServiceIdentity(type='None')
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter)
return webapp.identity
def get_auth_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_auth_settings', slot)
def update_auth_settings(cmd, resource_group_name, name, enabled=None, action=None, # pylint: disable=unused-argument
client_id=None, token_store_enabled=None, # pylint: disable=unused-argument
token_refresh_extension_hours=None, # pylint: disable=unused-argument
allowed_external_redirect_urls=None, client_secret=None, # pylint: disable=unused-argument
allowed_audiences=None, issuer=None, facebook_app_id=None, # pylint: disable=unused-argument
facebook_app_secret=None, facebook_oauth_scopes=None, # pylint: disable=unused-argument
twitter_consumer_key=None, twitter_consumer_secret=None, # pylint: disable=unused-argument
google_client_id=None, google_client_secret=None, # pylint: disable=unused-argument
google_oauth_scopes=None, microsoft_account_client_id=None, # pylint: disable=unused-argument
microsoft_account_client_secret=None, # pylint: disable=unused-argument
microsoft_account_oauth_scopes=None, slot=None): # pylint: disable=unused-argument
auth_settings = get_auth_settings(cmd, resource_group_name, name, slot)
UnauthenticatedClientAction = cmd.get_models('UnauthenticatedClientAction')
if action == 'AllowAnonymous':
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.allow_anonymous
elif action:
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.redirect_to_login_page
auth_settings.default_provider = AUTH_TYPES[action]
import inspect
frame = inspect.currentframe()
bool_flags = ['enabled', 'token_store_enabled']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[2:]:
if values.get(arg, None):
setattr(auth_settings, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_auth_settings', slot, auth_settings)
def list_runtimes(cmd, linux=False):
client = web_client_factory(cmd.cli_ctx)
runtime_helper = _StackRuntimeHelper(cmd=cmd, client=client, linux=linux)
return [s['displayName'] for s in runtime_helper.stacks]
def _rename_server_farm_props(webapp):
# Should be renamed in SDK in a future release
setattr(webapp, 'app_service_plan_id', webapp.server_farm_id)
del webapp.server_farm_id
return webapp
def delete_function_app(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete', slot)
def delete_webapp(cmd, resource_group_name, name, keep_metrics=None, keep_empty_plan=None,
keep_dns_registration=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.delete_slot(resource_group_name, name, slot,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None,
skip_dns_registration=False if keep_dns_registration else None)
else:
client.web_apps.delete(resource_group_name, name,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None,
skip_dns_registration=False if keep_dns_registration else None)
def stop_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'stop', slot)
def start_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'start', slot)
def restart_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restart', slot)
def get_site_configs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_configuration', slot)
def get_app_settings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_app_setting_names = client.web_apps.list_slot_configuration_names(resource_group_name, name).app_setting_names
return _build_app_settings_output(result.properties, slot_app_setting_names)
def get_connection_strings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_constr_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.connection_string_names or []
result = [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_constr_names} for p in result.properties]
return result
def get_azure_storage_accounts(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
slot_azure_storage_config_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.azure_storage_config_names or []
return [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_azure_storage_config_names} for p in result.properties]
def _fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot=None):
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
url = next(p['publishUrl'] for p in profiles if p['publishMethod'] == 'FTP')
setattr(webapp, 'ftpPublishingUrl', url)
return webapp
def _format_fx_version(custom_image_name, container_config_type=None):
fx_version = custom_image_name.strip()
fx_version_lower = fx_version.lower()
# handles case of only spaces
if fx_version:
if container_config_type:
fx_version = '{}|{}'.format(container_config_type, custom_image_name)
elif not fx_version_lower.startswith('docker|'):
fx_version = '{}|{}'.format('DOCKER', custom_image_name)
else:
fx_version = ' '
return fx_version
def _add_fx_version(cmd, resource_group_name, name, custom_image_name, slot=None):
fx_version = _format_fx_version(custom_image_name)
web_app = get_webapp(cmd, resource_group_name, name, slot)
linux_fx = fx_version if web_app.reserved else None
windows_fx = fx_version if web_app.is_xenon else None
return update_site_configs(cmd, resource_group_name, name,
linux_fx_version=linux_fx, windows_fx_version=windows_fx, slot=slot)
def _delete_linux_fx_version(cmd, resource_group_name, name, slot=None):
return update_site_configs(cmd, resource_group_name, name, linux_fx_version=' ', slot=slot)
def _get_fx_version(cmd, resource_group_name, name, slot=None):
site_config = get_site_configs(cmd, resource_group_name, name, slot)
return site_config.linux_fx_version or site_config.windows_fx_version or ''
def url_validator(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc, result.path])
except ValueError:
return False
def _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot=None):
from base64 import b64decode
linux_fx_version = _get_fx_version(cmd, resource_group_name, name, slot)
if not any([linux_fx_version.startswith(s) for s in MULTI_CONTAINER_TYPES]):
raise CLIError("Cannot decode config that is not one of the"
" following types: {}".format(','.join(MULTI_CONTAINER_TYPES)))
return b64decode(linux_fx_version.split('|')[1].encode('utf-8'))
def _get_linux_multicontainer_encoded_config_from_file(file_name):
from base64 import b64encode
config_file_bytes = None
if url_validator(file_name):
response = urlopen(file_name, context=_ssl_context())
config_file_bytes = response.read()
else:
with open(file_name, 'rb') as f:
config_file_bytes = f.read()
# Decode base64 encoded byte array into string
return b64encode(config_file_bytes).decode('utf-8')
# for any modifications to the non-optional parameters, adjust the reflection logic accordingly
# in the method
# pylint: disable=unused-argument
def update_site_configs(cmd, resource_group_name, name, slot=None, number_of_workers=None, linux_fx_version=None,
windows_fx_version=None, pre_warmed_instance_count=None, php_version=None,
python_version=None, net_framework_version=None,
java_version=None, java_container=None, java_container_version=None,
remote_debugging_enabled=None, web_sockets_enabled=None,
always_on=None, auto_heal_enabled=None,
use32_bit_worker_process=None,
min_tls_version=None,
http20_enabled=None,
app_command_line=None,
ftps_state=None,
generic_configurations=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers', number_of_workers, min_val=0, max_val=20)
if linux_fx_version:
if linux_fx_version.strip().lower().startswith('docker|'):
update_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE=false"])
else:
delete_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE"])
if pre_warmed_instance_count is not None:
pre_warmed_instance_count = validate_range_of_int_flag('--prewarmed-instance-count', pre_warmed_instance_count,
min_val=0, max_val=20)
import inspect
frame = inspect.currentframe()
bool_flags = ['remote_debugging_enabled', 'web_sockets_enabled', 'always_on',
'auto_heal_enabled', 'use32_bit_worker_process', 'http20_enabled']
int_flags = ['pre_warmed_instance_count', 'number_of_workers']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[3:]:
if arg in int_flags and values[arg] is not None:
values[arg] = validate_and_convert_to_int(arg, values[arg])
if arg != 'generic_configurations' and values.get(arg, None):
setattr(configs, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
generic_configurations = generic_configurations or []
result = {}
for s in generic_configurations:
try:
result.update(get_json_object(s))
except CLIError:
config_name, value = s.split('=', 1)
result[config_name] = value
for config_name, value in result.items():
setattr(configs, config_name, value)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
def delete_app_settings(cmd, resource_group_name, name, setting_names, slot=None):
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
app_settings.properties.pop(setting_name, None)
if slot_cfg_names.app_setting_names and setting_name in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings.properties, slot, client)
return _build_app_settings_output(result.properties, slot_cfg_names.app_setting_names)
def delete_azure_storage_accounts(cmd, resource_group_name, name, custom_id, slot=None):
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
azure_storage_accounts.properties.pop(custom_id, None)
if slot_cfg_names.azure_storage_config_names and custom_id in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.remove(custom_id)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
return result.properties
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and sys.platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_app_settings_output(app_settings, slot_cfg_names):
slot_cfg_names = slot_cfg_names or []
return [{'name': p,
'value': app_settings[p],
'slotSetting': p in slot_cfg_names} for p in _mask_creds_related_appsettings(app_settings)]
def update_connection_strings(cmd, resource_group_name, name, connection_string_type,
settings=None, slot=None, slot_settings=None):
from azure.mgmt.web.models import ConnStringValueTypePair
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
for name_value in settings + slot_settings:
# split at the first '=', connection string should not have '=' in the name
conn_string_name, value = name_value.split('=', 1)
if value[0] in ["'", '"']: # strip away the quots used as separators
value = value[1:-1]
conn_strings.properties[conn_string_name] = ConnStringValueTypePair(value=value,
type=connection_string_type)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings.properties, slot, client)
if slot_settings:
new_slot_setting_names = [n.split('=', 1)[0] for n in slot_settings]
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.connection_string_names = slot_cfg_names.connection_string_names or []
slot_cfg_names.connection_string_names += new_slot_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def delete_connection_strings(cmd, resource_group_name, name, setting_names, slot=None):
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
conn_strings.properties.pop(setting_name, None)
if slot_cfg_names.connection_string_names and setting_name in slot_cfg_names.connection_string_names:
slot_cfg_names.connection_string_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings.properties, slot, client)
CONTAINER_APPSETTING_NAMES = ['DOCKER_REGISTRY_SERVER_URL', 'DOCKER_REGISTRY_SERVER_USERNAME',
'DOCKER_REGISTRY_SERVER_PASSWORD', "WEBSITES_ENABLE_APP_SERVICE_STORAGE"]
APPSETTINGS_TO_MASK = ['DOCKER_REGISTRY_SERVER_PASSWORD']
def update_container_settings(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
websites_enable_app_service_storage=None, docker_registry_server_password=None,
multicontainer_config_type=None, multicontainer_config_file=None, slot=None):
settings = []
if docker_registry_server_url is not None:
settings.append('DOCKER_REGISTRY_SERVER_URL=' + docker_registry_server_url)
if (not docker_registry_server_user and not docker_registry_server_password and
docker_registry_server_url and '.azurecr.io' in docker_registry_server_url):
logger.warning('No credential was provided to access Azure Container Registry. Trying to look up...')
parsed = urlparse(docker_registry_server_url)
registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0]
try:
docker_registry_server_user, docker_registry_server_password = _get_acr_cred(cmd.cli_ctx, registry_name)
except Exception as ex: # pylint: disable=broad-except
logger.warning("Retrieving credentials failed with an exception:'%s'", ex) # consider throw if needed
if docker_registry_server_user is not None:
settings.append('DOCKER_REGISTRY_SERVER_USERNAME=' + docker_registry_server_user)
if docker_registry_server_password is not None:
settings.append('DOCKER_REGISTRY_SERVER_PASSWORD=' + docker_registry_server_password)
if docker_custom_image_name is not None:
_add_fx_version(cmd, resource_group_name, name, docker_custom_image_name, slot)
if websites_enable_app_service_storage:
settings.append('WEBSITES_ENABLE_APP_SERVICE_STORAGE=' + websites_enable_app_service_storage)
if docker_registry_server_user or docker_registry_server_password or docker_registry_server_url or websites_enable_app_service_storage: # pylint: disable=line-too-long
update_app_settings(cmd, resource_group_name, name, settings, slot)
settings = get_app_settings(cmd, resource_group_name, name, slot)
if multicontainer_config_file and multicontainer_config_type:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
update_site_configs(cmd, resource_group_name, name, linux_fx_version=linux_fx_version, slot=slot)
elif multicontainer_config_file or multicontainer_config_type:
logger.warning('Must change both settings --multicontainer-config-file FILE --multicontainer-config-type TYPE')
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
slot=slot))
def update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
docker_registry_server_password=None, slot=None):
return update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
docker_custom_image_name, docker_registry_server_user, None,
docker_registry_server_password, multicontainer_config_type=None,
multicontainer_config_file=None, slot=slot)
def _get_acr_cred(cli_ctx, registry_name):
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
from azure.cli.core.commands.parameters import get_resources_in_subscription
client = get_mgmt_service_client(cli_ctx, ContainerRegistryManagementClient).registries
result = get_resources_in_subscription(cli_ctx, 'Microsoft.ContainerRegistry/registries')
result = [item for item in result if item.name.lower() == registry_name]
if not result or len(result) > 1:
raise CLIError("No resource or more than one were found with name '{}'.".format(registry_name))
resource_group_name = parse_resource_id(result[0].id)['resource_group']
registry = client.get(resource_group_name, registry_name)
if registry.admin_user_enabled: # pylint: disable=no-member
cred = client.list_credentials(resource_group_name, registry_name)
return cred.username, cred.passwords[0].value
raise CLIError("Failed to retrieve container registry credentials. Please either provide the "
"credentials or run 'az acr update -n {} --admin-enabled true' to enable "
"admin first.".format(registry_name))
def delete_container_settings(cmd, resource_group_name, name, slot=None):
_delete_linux_fx_version(cmd, resource_group_name, name, slot)
delete_app_settings(cmd, resource_group_name, name, CONTAINER_APPSETTING_NAMES, slot)
def show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config, slot))
def show_container_settings_functionapp(cmd, resource_group_name, name, slot=None):
return show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=slot)
def _filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config=None, slot=None):
result = [x for x in settings if x['name'] in CONTAINER_APPSETTING_NAMES]
fx_version = _get_fx_version(cmd, resource_group_name, name, slot).strip()
if fx_version:
added_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME',
'value': fx_version}
result.append(added_image_name)
if show_multicontainer_config:
decoded_value = _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot)
decoded_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME_DECODED',
'value': decoded_value}
result.append(decoded_image_name)
return result
# TODO: remove this when #3660(service tracking issue) is resolved
def _mask_creds_related_appsettings(settings):
for x in [x1 for x1 in settings if x1 in APPSETTINGS_TO_MASK]:
settings[x] = None
return settings
def add_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
HostNameBinding = cmd.get_models('HostNameBinding')
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
binding = HostNameBinding(location=webapp.location, site_name=webapp.name)
if slot is None:
return client.web_apps.create_or_update_host_name_binding(resource_group_name, webapp.name, hostname, binding)
return client.web_apps.create_or_update_host_name_binding_slot(resource_group_name, webapp.name, hostname, binding,
slot)
def delete_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return client.web_apps.delete_host_name_binding(resource_group_name, webapp_name, hostname)
return client.web_apps.delete_host_name_binding_slot(resource_group_name, webapp_name, slot, hostname)
def list_hostnames(cmd, resource_group_name, webapp_name, slot=None):
result = list(_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'list_host_name_bindings', slot))
for r in result:
r.name = r.name.split('/')[-1]
return result
def get_external_ip(cmd, resource_group_name, webapp_name):
SslState = cmd.get_models('SslState')
# logics here are ported from portal
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
if webapp.hosting_environment_profile:
address = client.app_service_environments.list_vips(
resource_group_name, webapp.hosting_environment_profile.name)
if address.internal_ip_address:
ip_address = address.internal_ip_address
else:
vip = next((s for s in webapp.host_name_ssl_states if s.ssl_state == SslState.ip_based_enabled), None)
ip_address = vip.virtual_ip if vip else address.service_ip_address
else:
ip_address = _resolve_hostname_through_dns(webapp.default_host_name)
return {'ip': ip_address}
def _resolve_hostname_through_dns(hostname):
import socket
return socket.gethostbyname(hostname)
def create_webapp_slot(cmd, resource_group_name, webapp, slot, configuration_source=None):
Site, SiteConfig = cmd.get_models('Site', 'SiteConfig')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, webapp)
if not site:
raise CLIError("'{}' app doesn't exist".format(webapp))
if 'functionapp' in site.kind:
raise CLIError("'{}' is a function app. Please use `az functionapp deployment slot create`.".format(webapp))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
slot_def.site_config = SiteConfig()
poller = client.web_apps.create_or_update_slot(resource_group_name, webapp, slot_def, slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def create_functionapp_slot(cmd, resource_group_name, name, slot, configuration_source=None):
Site = cmd.get_models('Site')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' function app doesn't exist".format(name))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
poller = client.web_apps.create_or_update_slot(resource_group_name, name, slot_def, slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, name, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source=None):
clone_from_prod = configuration_source.lower() == webapp.lower()
site_config = get_site_configs(cmd, resource_group_name, webapp,
None if clone_from_prod else configuration_source)
_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_configuration', slot, site_config)
# slot create doesn't clone over the app-settings and connection-strings, so we do it here
# also make sure slot settings don't get propagated.
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, webapp)
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings',
src_slot)
for a in slot_cfg_names.app_setting_names or []:
app_settings.properties.pop(a, None)
connection_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_connection_strings',
src_slot)
for a in slot_cfg_names.connection_string_names or []:
connection_strings.properties.pop(a, None)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_application_settings',
app_settings.properties, slot, client)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_connection_strings',
connection_strings.properties, slot, client)
def config_source_control(cmd, resource_group_name, name, repo_url, repository_type='git', branch=None, # pylint: disable=too-many-locals
manual_integration=None, git_token=None, slot=None, cd_app_type=None,
app_working_dir=None, nodejs_task_runner=None, python_framework=None,
python_version=None, cd_account_create=None, cd_project_url=None, test=None,
slot_swap=None, private_repo_username=None, private_repo_password=None):
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
if cd_project_url:
# Add default values
cd_app_type = 'AspNet' if cd_app_type is None else cd_app_type
python_framework = 'Django' if python_framework is None else python_framework
python_version = 'Python 3.5.3 x86' if python_version is None else python_version
webapp_list = None if test is None else list_webapp(resource_group_name)
vsts_provider = VstsContinuousDeliveryProvider()
cd_app_type_details = {
'cd_app_type': cd_app_type,
'app_working_dir': app_working_dir,
'nodejs_task_runner': nodejs_task_runner,
'python_framework': python_framework,
'python_version': python_version
}
try:
status = vsts_provider.setup_continuous_delivery(cmd.cli_ctx, resource_group_name, name, repo_url,
branch, git_token, slot_swap, cd_app_type_details,
cd_project_url, cd_account_create, location, test,
private_repo_username, private_repo_password, webapp_list)
except RuntimeError as ex:
raise CLIError(ex)
logger.warning(status.status_message)
return status
non_vsts_params = [cd_app_type, app_working_dir, nodejs_task_runner, python_framework,
python_version, cd_account_create, test, slot_swap]
if any(non_vsts_params):
raise CLIError('Following parameters are of no use when cd_project_url is None: ' +
'cd_app_type, app_working_dir, nodejs_task_runner, python_framework,' +
'python_version, cd_account_create, test, slot_swap')
from azure.mgmt.web.models import SiteSourceControl, SourceControl
if git_token:
sc = SourceControl(location=location, source_control_name='GitHub', token=git_token)
client.update_source_control('GitHub', sc)
source_control = SiteSourceControl(location=location, repo_url=repo_url, branch=branch,
is_manual_integration=manual_integration,
is_mercurial=(repository_type != 'git'))
# SCC config can fail if previous commands caused SCMSite shutdown, so retry here.
for i in range(5):
try:
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'create_or_update_source_control',
slot, source_control)
return LongRunningOperation(cmd.cli_ctx)(poller)
except Exception as ex: # pylint: disable=broad-except
import re
ex = ex_handler_factory(no_throw=True)(ex)
# for non server errors(50x), just throw; otherwise retry 4 times
if i == 4 or not re.findall(r'\(50\d\)', str(ex)):
raise
logger.warning('retrying %s/4', i + 1)
time.sleep(5) # retry in a moment
def update_git_token(cmd, git_token=None):
'''
Update source control token cached in Azure app service. If no token is provided,
the command will clean up existing token.
'''
client = web_client_factory(cmd.cli_ctx)
from azure.mgmt.web.models import SourceControl
sc = SourceControl(name='not-really-needed', source_control_name='GitHub', token=git_token or '')
return client.update_source_control('GitHub', sc)
def show_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_source_control', slot)
def delete_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete_source_control', slot)
def enable_local_git(cmd, resource_group_name, name, slot=None):
SiteConfigResource = cmd.get_models('SiteConfigResource')
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
site_config = SiteConfigResource(location=location)
site_config.scm_type = 'LocalGit'
if slot is None:
client.web_apps.create_or_update_configuration(resource_group_name, name, site_config)
else:
client.web_apps.create_or_update_configuration_slot(resource_group_name, name,
site_config, slot)
return {'url': _get_local_git_url(cmd.cli_ctx, client, resource_group_name, name, slot)}
def sync_site_repo(cmd, resource_group_name, name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'sync_repository', slot)
except CloudError as ex: # Because of bad spec, sdk throws on 200. We capture it here
if ex.status_code not in [200, 204]:
raise ex
def list_app_service_plans(cmd, resource_group_name=None):
client = web_client_factory(cmd.cli_ctx)
if resource_group_name is None:
plans = list(client.app_service_plans.list(detailed=True)) # enables querying "numberOfSites"
else:
plans = list(client.app_service_plans.list_by_resource_group(resource_group_name))
for plan in plans:
# prune a few useless fields
del plan.geo_region
del plan.subscription
return plans
def create_app_service_plan(cmd, resource_group_name, name, is_linux, hyper_v, per_site_scaling=False,
app_service_environment=None, sku='B1', number_of_workers=None, location=None,
tags=None, no_wait=False):
HostingEnvironmentProfile, SkuDescription, AppServicePlan = cmd.get_models(
'HostingEnvironmentProfile', 'SkuDescription', 'AppServicePlan')
sku = _normalize_sku(sku)
_validate_asp_sku(app_service_environment, sku)
if is_linux and hyper_v:
raise CLIError('usage error: --is-linux | --hyper-v')
client = web_client_factory(cmd.cli_ctx)
if app_service_environment:
if hyper_v:
raise CLIError('Windows containers is not yet supported in app service environment')
ase_id = _validate_app_service_environment_id(cmd.cli_ctx, app_service_environment, resource_group_name)
ase_def = HostingEnvironmentProfile(id=ase_id)
ase_list = client.app_service_environments.list()
ase_found = False
for ase in ase_list:
if ase.id.lower() == ase_id.lower():
location = ase.location
ase_found = True
break
if not ase_found:
raise CLIError("App service environment '{}' not found in subscription.".format(ase_id))
else: # Non-ASE
ase_def = None
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
# the api is odd on parameter naming, have to live with it for now
sku_def = SkuDescription(tier=get_sku_name(sku), name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), hyper_v=(hyper_v or None), name=name,
per_site_scaling=per_site_scaling, hosting_environment_profile=ase_def)
return sdk_no_wait(no_wait, client.app_service_plans.create_or_update, name=name,
resource_group_name=resource_group_name, app_service_plan=plan_def)
def update_app_service_plan(instance, sku=None, number_of_workers=None):
if number_of_workers is None and sku is None:
logger.warning('No update is done. Specify --sku and/or --number-of-workers.')
sku_def = instance.sku
if sku is not None:
sku = _normalize_sku(sku)
sku_def.tier = get_sku_name(sku)
sku_def.name = sku
if number_of_workers is not None:
sku_def.capacity = number_of_workers
instance.sku = sku_def
return instance
def update_functionapp_app_service_plan(cmd, instance, sku=None, number_of_workers=None, max_burst=None):
instance = update_app_service_plan(instance, sku, number_of_workers)
if max_burst is not None:
if not is_plan_elastic_premium(cmd, instance):
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
instance.maximum_elastic_worker_count = max_burst
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-instances',
number_of_workers, min_val=0, max_val=20)
return update_app_service_plan(instance, sku, number_of_workers)
def show_backup_configuration(cmd, resource_group_name, webapp_name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except Exception: # pylint: disable=broad-except
raise CLIError('Backup configuration not found')
def list_backups(cmd, resource_group_name, webapp_name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'list_backups',
slot)
def create_backup(cmd, resource_group_name, webapp_name, storage_account_url,
db_name=None, db_type=None,
db_connection_string=None, backup_name=None, slot=None):
BackupRequest = cmd.get_models('BackupRequest')
client = web_client_factory(cmd.cli_ctx)
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
backup_request = BackupRequest(backup_request_name=backup_name,
storage_account_url=storage_account_url, databases=db_setting)
if slot:
return client.web_apps.backup_slot(resource_group_name, webapp_name, backup_request, slot)
return client.web_apps.backup(resource_group_name, webapp_name, backup_request)
def update_backup_schedule(cmd, resource_group_name, webapp_name, storage_account_url=None,
frequency=None, keep_at_least_one_backup=None,
retention_period_in_days=None, db_name=None,
db_connection_string=None, db_type=None, backup_name=None, slot=None):
DefaultErrorResponseException, BackupSchedule, BackupRequest = cmd.get_models(
'DefaultErrorResponseException', 'BackupSchedule', 'BackupRequest')
configuration = None
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
if not backup_name:
backup_name = '{0}_{1}'.format(webapp_name, datetime.datetime.utcnow().strftime('%Y%m%d%H%M'))
try:
configuration = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except DefaultErrorResponseException:
# No configuration set yet
if not all([storage_account_url, frequency, retention_period_in_days,
keep_at_least_one_backup]):
raise CLIError('No backup configuration found. A configuration must be created. ' +
'Usage: --container-url URL --frequency TIME --retention DAYS ' +
'--retain-one TRUE/FALSE')
# If arguments were not specified, use the values in the current backup schedule
if storage_account_url is None:
storage_account_url = configuration.storage_account_url
if retention_period_in_days is None:
retention_period_in_days = configuration.backup_schedule.retention_period_in_days
if keep_at_least_one_backup is None:
keep_at_least_one_backup = configuration.backup_schedule.keep_at_least_one_backup
else:
keep_at_least_one_backup = keep_at_least_one_backup.lower() == 'true'
if frequency:
# Parse schedule frequency
frequency_num, frequency_unit = _parse_frequency(cmd, frequency)
else:
frequency_num = configuration.backup_schedule.frequency_interval
frequency_unit = configuration.backup_schedule.frequency_unit
if configuration and configuration.databases:
db = configuration.databases[0]
db_type = db_type or db.database_type
db_name = db_name or db.name
db_connection_string = db_connection_string or db.connection_string
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
backup_schedule = BackupSchedule(frequency_interval=frequency_num, frequency_unit=frequency_unit.name,
keep_at_least_one_backup=keep_at_least_one_backup,
retention_period_in_days=retention_period_in_days)
backup_request = BackupRequest(backup_request_name=backup_name, backup_schedule=backup_schedule,
enabled=True, storage_account_url=storage_account_url,
databases=db_setting)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'update_backup_configuration',
slot, backup_request)
def restore_backup(cmd, resource_group_name, webapp_name, storage_account_url, backup_name,
db_name=None, db_type=None, db_connection_string=None,
target_name=None, overwrite=None, ignore_hostname_conflict=None, slot=None):
RestoreRequest = cmd.get_models('RestoreRequest')
client = web_client_factory(cmd.cli_ctx)
storage_blob_name = backup_name
if not storage_blob_name.lower().endswith('.zip'):
storage_blob_name += '.zip'
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
restore_request = RestoreRequest(storage_account_url=storage_account_url,
blob_name=storage_blob_name, overwrite=overwrite,
site_name=target_name, databases=db_setting,
ignore_conflicting_host_names=ignore_hostname_conflict)
if slot:
return client.web_apps.restore_slot(resource_group_name, webapp_name, 0, restore_request, slot)
return client.web_apps.restore(resource_group_name, webapp_name, 0, restore_request)
def list_snapshots(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_snapshots',
slot)
def restore_snapshot(cmd, resource_group_name, name, time, slot=None, restore_content_only=False, # pylint: disable=redefined-outer-name
source_resource_group=None, source_name=None, source_slot=None):
from azure.cli.core.commands.client_factory import get_subscription_id
SnapshotRecoverySource, SnapshotRestoreRequest = cmd.get_models('SnapshotRecoverySource', 'SnapshotRestoreRequest')
client = web_client_factory(cmd.cli_ctx)
recover_config = not restore_content_only
if all([source_resource_group, source_name]):
# Restore from source app to target app
sub_id = get_subscription_id(cmd.cli_ctx)
source_id = "/subscriptions/" + sub_id + "/resourceGroups/" + source_resource_group + \
"/providers/Microsoft.Web/sites/" + source_name
if source_slot:
source_id = source_id + "/slots/" + source_slot
source = SnapshotRecoverySource(id=source_id)
request = SnapshotRestoreRequest(overwrite=False, snapshot_time=time, recovery_source=source,
recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
if any([source_resource_group, source_name]):
raise CLIError('usage error: --source-resource-group and --source-name must both be specified if one is used')
# Overwrite app with its own snapshot
request = SnapshotRestoreRequest(overwrite=True, snapshot_time=time, recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
# pylint: disable=inconsistent-return-statements
def _create_db_setting(cmd, db_name, db_type, db_connection_string):
DatabaseBackupSetting = cmd.get_models('DatabaseBackupSetting')
if all([db_name, db_type, db_connection_string]):
return [DatabaseBackupSetting(database_type=db_type, name=db_name, connection_string=db_connection_string)]
if any([db_name, db_type, db_connection_string]):
raise CLIError('usage error: --db-name NAME --db-type TYPE --db-connection-string STRING')
def _parse_frequency(cmd, frequency):
FrequencyUnit = cmd.get_models('FrequencyUnit')
unit_part = frequency.lower()[-1]
if unit_part == 'd':
frequency_unit = FrequencyUnit.day
elif unit_part == 'h':
frequency_unit = FrequencyUnit.hour
else:
raise CLIError('Frequency must end with d or h for "day" or "hour"')
try:
frequency_num = int(frequency[:-1])
except ValueError:
raise CLIError('Frequency must start with a number')
if frequency_num < 0:
raise CLIError('Frequency must be positive')
return frequency_num, frequency_unit
def _get_location_from_resource_group(cli_ctx, resource_group_name):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
group = client.resource_groups.get(resource_group_name)
return group.location
def _get_location_from_webapp(client, resource_group_name, webapp):
webapp = client.web_apps.get(resource_group_name, webapp)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp))
return webapp.location
def _get_deleted_apps_locations(cli_ctx):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
web_provider = client.providers.get('Microsoft.Web')
del_sites_resource = next((x for x in web_provider.resource_types if x.resource_type == 'deletedSites'), None)
if del_sites_resource:
return del_sites_resource.locations
return []
def _get_local_git_url(cli_ctx, client, resource_group_name, name, slot=None):
user = client.get_publishing_user()
result = _generic_site_operation(cli_ctx, resource_group_name, name, 'get_source_control', slot)
parsed = urlparse(result.repo_url)
return '{}://{}@{}/{}.git'.format(parsed.scheme, user.publishing_user_name,
parsed.netloc, name)
def _get_scm_url(cmd, resource_group_name, name, slot=None):
from azure.mgmt.web.models import HostType
webapp = show_webapp(cmd, resource_group_name, name, slot=slot)
for host in webapp.host_name_ssl_states or []:
if host.host_type == HostType.repository:
return "https://{}".format(host.name)
# this should not happen, but throw anyway
raise ValueError('Failed to retrieve Scm Uri')
def get_publishing_user(cmd):
client = web_client_factory(cmd.cli_ctx)
return client.get_publishing_user()
def set_deployment_user(cmd, user_name, password=None):
'''
Update deployment credentials.(Note, all webapps in your subscription will be impacted)
'''
User = cmd.get_models('User')
client = web_client_factory(cmd.cli_ctx)
user = User(publishing_user_name=user_name)
if password is None:
try:
password = prompt_pass(msg='Password: ', confirm=True)
except NoTTYException:
raise CLIError('Please specify both username and password in non-interactive mode.')
user.publishing_password = password
return client.update_publishing_user(user)
def list_publishing_credentials(cmd, resource_group_name, name, slot=None):
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_credentials', slot)
return content.result()
def list_publish_profiles(cmd, resource_group_name, name, slot=None):
import xmltodict
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_profile_xml_with_secrets', slot)
full_xml = ''
for f in content:
full_xml += f.decode()
profiles = xmltodict.parse(full_xml, xml_attribs=True)['publishData']['publishProfile']
converted = []
for profile in profiles:
new = {}
for key in profile:
# strip the leading '@' xmltodict put in for attributes
new[key.lstrip('@')] = profile[key]
converted.append(new)
return converted
def enable_cd(cmd, resource_group_name, name, enable, slot=None):
settings = []
settings.append("DOCKER_ENABLE_CI=" + enable)
update_app_settings(cmd, resource_group_name, name, settings, slot)
return show_container_cd_url(cmd, resource_group_name, name, slot)
def show_container_cd_url(cmd, resource_group_name, name, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
docker_enabled = False
for setting in settings:
if setting['name'] == 'DOCKER_ENABLE_CI' and setting['value'] == 'true':
docker_enabled = True
break
cd_settings = {}
cd_settings['DOCKER_ENABLE_CI'] = docker_enabled
if docker_enabled:
credentials = list_publishing_credentials(cmd, resource_group_name, name, slot)
if credentials:
cd_url = credentials.scm_uri + '/docker/hook'
cd_settings['CI_CD_URL'] = cd_url
else:
cd_settings['CI_CD_URL'] = ''
return cd_settings
def view_in_browser(cmd, resource_group_name, name, slot=None, logs=False):
url = _get_url(cmd, resource_group_name, name, slot)
open_page_in_browser(url)
if logs:
get_streaming_log(cmd, resource_group_name, name, provider=None, slot=slot)
def _get_url(cmd, resource_group_name, name, slot=None):
SslState = cmd.get_models('SslState')
site = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
url = site.enabled_host_names[0] # picks the custom domain URL incase a domain is assigned
ssl_host = next((h for h in site.host_name_ssl_states
if h.ssl_state != SslState.disabled), None)
return ('https' if ssl_host else 'http') + '://' + url
# TODO: expose new blob suport
def config_diagnostics(cmd, resource_group_name, name, level=None,
application_logging=None, web_server_logging=None,
docker_container_logging=None, detailed_error_messages=None,
failed_request_tracing=None, slot=None):
from azure.mgmt.web.models import (FileSystemApplicationLogsConfig, ApplicationLogsConfig,
SiteLogsConfig, HttpLogsConfig, FileSystemHttpLogsConfig,
EnabledConfig)
client = web_client_factory(cmd.cli_ctx)
# TODO: ensure we call get_site only once
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
location = site.location
application_logs = None
if application_logging is not None:
if not application_logging:
level = 'Off'
elif level is None:
level = 'Error'
fs_log = FileSystemApplicationLogsConfig(level=level)
application_logs = ApplicationLogsConfig(file_system=fs_log)
http_logs = None
server_logging_option = web_server_logging or docker_container_logging
if server_logging_option:
# TODO: az blob storage log config currently not in use, will be impelemented later.
# Tracked as Issue: #4764 on Github
filesystem_log_config = None
turned_on = server_logging_option != 'off'
if server_logging_option in ['filesystem', 'off']:
# 100 mb max log size, retention lasts 3 days. Yes we hard code it, portal does too
filesystem_log_config = FileSystemHttpLogsConfig(retention_in_mb=100, retention_in_days=3,
enabled=turned_on)
http_logs = HttpLogsConfig(file_system=filesystem_log_config, azure_blob_storage=None)
detailed_error_messages_logs = (None if detailed_error_messages is None
else EnabledConfig(enabled=detailed_error_messages))
failed_request_tracing_logs = (None if failed_request_tracing is None
else EnabledConfig(enabled=failed_request_tracing))
site_log_config = SiteLogsConfig(location=location,
application_logs=application_logs,
http_logs=http_logs,
failed_requests_tracing=failed_request_tracing_logs,
detailed_error_messages=detailed_error_messages_logs)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_diagnostic_logs_config',
slot, site_log_config)
def show_diagnostic_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_diagnostic_logs_configuration', slot)
def config_slot_auto_swap(cmd, resource_group_name, webapp, slot, auto_swap_slot=None, disable=None):
client = web_client_factory(cmd.cli_ctx)
site_config = client.web_apps.get_configuration_slot(resource_group_name, webapp, slot)
site_config.auto_swap_slot_name = '' if disable else (auto_swap_slot or 'production')
return client.web_apps.update_configuration_slot(resource_group_name, webapp, site_config, slot)
def list_slots(cmd, resource_group_name, webapp):
client = web_client_factory(cmd.cli_ctx)
slots = list(client.web_apps.list_slots(resource_group_name, webapp))
for slot in slots:
slot.name = slot.name.split('/')[-1]
setattr(slot, 'app_service_plan', parse_resource_id(slot.server_farm_id)['name'])
del slot.server_farm_id
return slots
def swap_slot(cmd, resource_group_name, webapp, slot, target_slot=None, action='swap'):
client = web_client_factory(cmd.cli_ctx)
if action == 'swap':
poller = client.web_apps.swap_slot_slot(resource_group_name, webapp,
slot, (target_slot or 'production'), True)
return poller
if action == 'preview':
if target_slot is None:
result = client.web_apps.apply_slot_config_to_production(resource_group_name,
webapp, slot, True)
else:
result = client.web_apps.apply_slot_configuration_slot(resource_group_name, webapp,
slot, target_slot, True)
return result
# we will reset both source slot and target slot
if target_slot is None:
client.web_apps.reset_production_slot_config(resource_group_name, webapp)
else:
client.web_apps.reset_slot_configuration_slot(resource_group_name, webapp, target_slot)
return None
def delete_slot(cmd, resource_group_name, webapp, slot):
client = web_client_factory(cmd.cli_ctx)
# TODO: once swagger finalized, expose other parameters like: delete_all_slots, etc...
client.web_apps.delete_slot(resource_group_name, webapp, slot)
def set_traffic_routing(cmd, resource_group_name, name, distribution):
RampUpRule = cmd.get_models('RampUpRule')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
configs = get_site_configs(cmd, resource_group_name, name)
host_name_split = site.default_host_name.split('.', 1)
host_name_suffix = '.' + host_name_split[1]
host_name_val = host_name_split[0]
configs.experiments.ramp_up_rules = []
for r in distribution:
slot, percentage = r.split('=')
action_host_name_slot = host_name_val + "-" + slot
configs.experiments.ramp_up_rules.append(RampUpRule(action_host_name=action_host_name_slot + host_name_suffix,
reroute_percentage=float(percentage),
name=slot))
_generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', None, configs)
return configs.experiments.ramp_up_rules
def show_traffic_routing(cmd, resource_group_name, name):
configs = get_site_configs(cmd, resource_group_name, name)
return configs.experiments.ramp_up_rules
def clear_traffic_routing(cmd, resource_group_name, name):
set_traffic_routing(cmd, resource_group_name, name, [])
def add_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
from azure.mgmt.web.models import CorsSettings
configs = get_site_configs(cmd, resource_group_name, name, slot)
if not configs.cors:
configs.cors = CorsSettings()
configs.cors.allowed_origins = (configs.cors.allowed_origins or []) + allowed_origins
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return result.cors
def remove_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if configs.cors:
if allowed_origins:
configs.cors.allowed_origins = [x for x in (configs.cors.allowed_origins or []) if x not in allowed_origins]
else:
configs.cors.allowed_origins = []
configs = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return configs.cors
def show_cors(cmd, resource_group_name, name, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
return configs.cors
def get_streaming_log(cmd, resource_group_name, name, provider=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
streaming_url = scm_url + '/logstream'
if provider:
streaming_url += ('/' + provider.lstrip('/'))
user, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
t = threading.Thread(target=_get_log, args=(streaming_url, user, password))
t.daemon = True
t.start()
while True:
time.sleep(100) # so that ctrl+c can stop the command
def download_historical_logs(cmd, resource_group_name, name, log_file=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
url = scm_url.rstrip('/') + '/dump'
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
_get_log(url, user_name, password, log_file)
logger.warning('Downloaded logs to %s', log_file)
def _get_site_credential(cli_ctx, resource_group_name, name, slot=None):
creds = _generic_site_operation(cli_ctx, resource_group_name, name, 'list_publishing_credentials', slot)
creds = creds.result()
return (creds.publishing_user_name, creds.publishing_password)
def _get_log(url, user_name, password, log_file=None):
import certifi
import urllib3
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
r = http.request(
'GET',
url,
headers=headers,
preload_content=False
)
if r.status != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
url, r.status, r.reason))
if log_file: # download logs
with open(log_file, 'wb') as f:
while True:
data = r.read(1024)
if not data:
break
f.write(data)
else: # streaming
std_encoding = sys.stdout.encoding
for chunk in r.stream():
if chunk:
# Extra encode() and decode for stdout which does not surpport 'utf-8'
print(chunk.decode(encoding='utf-8', errors='replace')
.encode(std_encoding, errors='replace')
.decode(std_encoding, errors='replace'), end='') # each line of log has CRLF.
r.release_conn()
def upload_ssl_cert(cmd, resource_group_name, name, certificate_password, certificate_file, slot=None):
Certificate = cmd.get_models('Certificate')
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
cert_file = open(certificate_file, 'rb')
cert_contents = cert_file.read()
hosting_environment_profile_param = (webapp.hosting_environment_profile.name
if webapp.hosting_environment_profile else '')
thumb_print = _get_cert(certificate_password, certificate_file)
cert_name = _generate_cert_name(thumb_print, hosting_environment_profile_param,
webapp.location, resource_group_name)
cert = Certificate(password=certificate_password, pfx_blob=cert_contents,
location=webapp.location, server_farm_id=webapp.server_farm_id)
return client.certificates.create_or_update(resource_group_name, cert_name, cert)
def _generate_cert_name(thumb_print, hosting_environment, location, resource_group_name):
return "%s_%s_%s_%s" % (thumb_print, hosting_environment, location, resource_group_name)
def _get_cert(certificate_password, certificate_file):
''' Decrypts the .pfx file '''
p12 = OpenSSL.crypto.load_pkcs12(open(certificate_file, 'rb').read(), certificate_password)
cert = p12.get_certificate()
digest_algorithm = 'sha1'
thumbprint = cert.digest(digest_algorithm).decode("utf-8").replace(':', '')
return thumbprint
def list_ssl_certs(cmd, resource_group_name):
client = web_client_factory(cmd.cli_ctx)
return client.certificates.list_by_resource_group(resource_group_name)
def delete_ssl_cert(cmd, resource_group_name, certificate_thumbprint):
client = web_client_factory(cmd.cli_ctx)
webapp_certs = client.certificates.list_by_resource_group(resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
return client.certificates.delete(resource_group_name, webapp_cert.name)
raise CLIError("Certificate for thumbprint '{}' not found".format(certificate_thumbprint))
def import_ssl_cert(cmd, resource_group_name, name, key_vault, key_vault_certificate_name):
Certificate = cmd.get_models('Certificate')
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise CLIError("'{}' app doesn't exist in resource group {}".format(name, resource_group_name))
server_farm_id = webapp.server_farm_id
location = webapp.location
kv_id = _format_key_vault_id(cmd.cli_ctx, key_vault, resource_group_name)
kv_id_parts = parse_resource_id(kv_id)
kv_name = kv_id_parts['name']
kv_resource_group_name = kv_id_parts['resource_group']
cert_name = '{}-{}-{}'.format(resource_group_name, kv_name, key_vault_certificate_name)
lnk = 'https://azure.github.io/AppService/2016/05/24/Deploying-Azure-Web-App-Certificate-through-Key-Vault.html'
lnk_msg = 'Find more details here: {}'.format(lnk)
if not _check_service_principal_permissions(cmd, kv_resource_group_name, kv_name):
logger.warning('Unable to verify Key Vault permissions.')
logger.warning('You may need to grant Microsoft.Azure.WebSites service principal the Secret:Get permission')
logger.warning(lnk_msg)
kv_cert_def = Certificate(location=location, key_vault_id=kv_id, password='',
key_vault_secret_name=key_vault_certificate_name, server_farm_id=server_farm_id)
return client.certificates.create_or_update(name=cert_name, resource_group_name=resource_group_name,
certificate_envelope=kv_cert_def)
def create_managed_ssl_cert(cmd, resource_group_name, name, hostname, slot=None):
Certificate = cmd.get_models('Certificate')
hostname = hostname.lower()
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not webapp:
slot_text = "Deployment slot {} in ".format(slot) if slot else ''
raise CLIError("{0}app {1} doesn't exist in resource group {2}".format(slot_text, name, resource_group_name))
parsed_plan_id = parse_resource_id(webapp.server_farm_id)
plan_info = client.app_service_plans.get(parsed_plan_id['resource_group'], parsed_plan_id['name'])
if plan_info.sku.tier.upper() == 'FREE' or plan_info.sku.tier.upper() == 'SHARED':
raise CLIError('Managed Certificate is not supported on Free and Shared tier.')
if not _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot):
slot_text = " --slot {}".format(slot) if slot else ""
raise CLIError("Hostname (custom domain) '{0}' is not registered with {1}. "
"Use 'az webapp config hostname add --resource-group {2} "
"--webapp-name {1}{3} --hostname {0}' "
"to register the hostname.".format(hostname, name, resource_group_name, slot_text))
server_farm_id = webapp.server_farm_id
location = webapp.location
easy_cert_def = Certificate(location=location, canonical_name=hostname,
server_farm_id=server_farm_id, password='')
return client.certificates.create_or_update(name=hostname, resource_group_name=resource_group_name,
certificate_envelope=easy_cert_def)
def _check_service_principal_permissions(cmd, resource_group_name, key_vault_name):
from azure.cli.command_modules.keyvault._client_factory import keyvault_client_vaults_factory
from azure.cli.command_modules.role._client_factory import _graph_client_factory
from azure.graphrbac.models import GraphErrorException
kv_client = keyvault_client_vaults_factory(cmd.cli_ctx, None)
vault = kv_client.get(resource_group_name=resource_group_name, vault_name=key_vault_name)
# Check for Microsoft.Azure.WebSites app registration
AZURE_PUBLIC_WEBSITES_APP_ID = 'abfa0a7c-a6b6-4736-8310-5855508787cd'
AZURE_GOV_WEBSITES_APP_ID = '6a02c803-dafd-4136-b4c3-5a6f318b4714'
graph_sp_client = _graph_client_factory(cmd.cli_ctx).service_principals
for policy in vault.properties.access_policies:
try:
sp = graph_sp_client.get(policy.object_id)
if sp.app_id == AZURE_PUBLIC_WEBSITES_APP_ID or sp.app_id == AZURE_GOV_WEBSITES_APP_ID:
for perm in policy.permissions.secrets:
if perm == "Get":
return True
except GraphErrorException:
pass # Lookup will fail for non service principals (users, groups, etc.)
return False
def _update_host_name_ssl_state(cmd, resource_group_name, webapp_name, webapp,
host_name, ssl_state, thumbprint, slot=None):
Site, HostNameSslState = cmd.get_models('Site', 'HostNameSslState')
updated_webapp = Site(host_name_ssl_states=[HostNameSslState(name=host_name,
ssl_state=ssl_state,
thumbprint=thumbprint,
to_update=True)],
location=webapp.location, tags=webapp.tags)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'create_or_update',
slot, updated_webapp)
def _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(name))
cert_resource_group_name = parse_resource_id(webapp.server_farm_id)['resource_group']
webapp_certs = client.certificates.list_by_resource_group(cert_resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
if len(webapp_cert.host_names) == 1 and not webapp_cert.host_names[0].startswith('*'):
return _update_host_name_ssl_state(cmd, resource_group_name, name, webapp,
webapp_cert.host_names[0], ssl_type,
certificate_thumbprint, slot)
query_result = list_hostnames(cmd, resource_group_name, name, slot)
hostnames_in_webapp = [x.name.split('/')[-1] for x in query_result]
to_update = _match_host_names_from_cert(webapp_cert.host_names, hostnames_in_webapp)
for h in to_update:
_update_host_name_ssl_state(cmd, resource_group_name, name, webapp,
h, ssl_type, certificate_thumbprint, slot)
return show_webapp(cmd, resource_group_name, name, slot)
raise CLIError("Certificate for thumbprint '{}' not found.".format(certificate_thumbprint))
def bind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
SslState = cmd.get_models('SslState')
return _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint,
SslState.sni_enabled if ssl_type == 'SNI' else SslState.ip_based_enabled, slot)
def unbind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, slot=None):
SslState = cmd.get_models('SslState')
return _update_ssl_binding(cmd, resource_group_name, name,
certificate_thumbprint, SslState.disabled, slot)
def _match_host_names_from_cert(hostnames_from_cert, hostnames_in_webapp):
# the goal is to match '*.foo.com' with host name like 'admin.foo.com', 'logs.foo.com', etc
matched = set()
for hostname in hostnames_from_cert:
if hostname.startswith('*'):
for h in hostnames_in_webapp:
if hostname[hostname.find('.'):] == h[h.find('.'):]:
matched.add(h)
elif hostname in hostnames_in_webapp:
matched.add(hostname)
return matched
# help class handles runtime stack in format like 'node|6.1', 'php|5.5'
class _StackRuntimeHelper(object):
def __init__(self, cmd, client, linux=False):
self._cmd = cmd
self._client = client
self._linux = linux
self._stacks = []
def resolve(self, display_name):
self._load_stacks()
return next((s for s in self._stacks if s['displayName'].lower() == display_name.lower()),
None)
@property
def stacks(self):
self._load_stacks()
return self._stacks
@staticmethod
def update_site_config(stack, site_config, cmd=None):
for k, v in stack['configs'].items():
setattr(site_config, k, v)
return site_config
@staticmethod
def update_site_appsettings(cmd, stack, site_config):
NameValuePair = cmd.get_models('NameValuePair')
if site_config.app_settings is None:
site_config.app_settings = []
site_config.app_settings += [NameValuePair(name=k, value=v) for k, v in stack['configs'].items()]
return site_config
def _load_stacks(self):
if self._stacks:
return
os_type = ('Linux' if self._linux else 'Windows')
raw_stacks = self._client.provider.get_available_stacks(os_type_selected=os_type, raw=True)
bytes_value = raw_stacks._get_next().content # pylint: disable=protected-access
json_value = bytes_value.decode('utf8')
json_stacks = json.loads(json_value)
stacks = json_stacks['value']
result = []
if self._linux:
for properties in [(s['properties']) for s in stacks]:
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
})
else: # Windows stacks
config_mappings = {
'node': 'WEBSITE_NODE_DEFAULT_VERSION',
'python': 'python_version',
'php': 'php_version',
'aspnet': 'net_framework_version'
}
# get all stack version except 'java'
for stack in stacks:
if stack['name'] not in config_mappings:
continue
name, properties = stack['name'], stack['properties']
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': name + '|' + major['displayVersion'],
'configs': {
config_mappings[name]: (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
}
})
# deal with java, which pairs with java container version
java_stack = next((s for s in stacks if s['name'] == 'java'))
java_container_stack = next((s for s in stacks if s['name'] == 'javaContainers'))
for java_version in java_stack['properties']['majorVersions']:
for fx in java_container_stack['properties']['frameworks']:
for fx_version in fx['majorVersions']:
result.append({
'displayName': 'java|{}|{}|{}'.format(java_version['displayVersion'],
fx['display'],
fx_version['displayVersion']),
'configs': {
'java_version': java_version['runtimeVersion'],
'java_container': fx['name'],
'java_container_version': fx_version['runtimeVersion']
}
})
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
def get_app_insights_key(cli_ctx, resource_group, name):
appinsights_client = get_mgmt_service_client(cli_ctx, ApplicationInsightsManagementClient)
appinsights = appinsights_client.components.get(resource_group, name)
if appinsights is None or appinsights.instrumentation_key is None:
raise CLIError("App Insights {} under resource group {} was not found.".format(name, resource_group))
return appinsights.instrumentation_key
def create_functionapp_app_service_plan(cmd, resource_group_name, name, is_linux, sku,
number_of_workers=None, max_burst=None, location=None, tags=None):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
sku = _normalize_sku(sku)
tier = get_sku_name(sku)
if max_burst is not None:
if tier.lower() != "elasticpremium":
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-elastic-worker-count',
number_of_workers, min_val=0, max_val=20)
client = web_client_factory(cmd.cli_ctx)
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
sku_def = SkuDescription(tier=tier, name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), maximum_elastic_worker_count=max_burst,
hyper_v=None, name=name)
return client.app_service_plans.create_or_update(resource_group_name, name, plan_def)
def is_plan_consumption(cmd, plan_info):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier.lower() == 'dynamic'
return False
def is_plan_elastic_premium(cmd, plan_info):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier == 'ElasticPremium'
return False
def validate_and_convert_to_int(flag, val):
try:
return int(val)
except ValueError:
raise CLIError("Usage error: {} is expected to have an int value.".format(flag))
def validate_range_of_int_flag(flag_name, value, min_val, max_val):
value = validate_and_convert_to_int(flag_name, value)
if min_val > value or value > max_val:
raise CLIError("Usage error: {} is expected to be between {} and {} (inclusive)".format(flag_name, min_val,
max_val))
return value
def create_function(cmd, resource_group_name, name, storage_account, plan=None,
os_type=None, functions_version=None, runtime=None, runtime_version=None,
consumption_plan_location=None, app_insights=None, app_insights_key=None,
disable_app_insights=None, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None,
docker_registry_server_password=None, docker_registry_server_user=None,
deployment_container_image_name=None, tags=None):
# pylint: disable=too-many-statements, too-many-branches
if functions_version is None:
logger.warning("No functions version specified so defaulting to 2. In the future, specifying a version will "
"be required. To create a 2.x function you would pass in the flag `--functions_version 2`")
functions_version = '2'
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
if bool(plan) == bool(consumption_plan_location):
raise CLIError("usage error: --plan NAME_OR_ID | --consumption-plan-location LOCATION")
SiteConfig, Site, NameValuePair = cmd.get_models('SiteConfig', 'Site', 'NameValuePair')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
site_config = SiteConfig(app_settings=[])
functionapp_def = Site(location=None, site_config=site_config, tags=tags)
client = web_client_factory(cmd.cli_ctx)
plan_info = None
if runtime is not None:
runtime = runtime.lower()
if consumption_plan_location:
locations = list_consumption_locations(cmd)
location = next((l for l in locations if l['name'].lower() == consumption_plan_location.lower()), None)
if location is None:
raise CLIError("Location is invalid. Use: az functionapp list-consumption-locations")
functionapp_def.location = consumption_plan_location
functionapp_def.kind = 'functionapp'
# if os_type is None, the os type is windows
is_linux = os_type and os_type.lower() == 'linux'
else: # apps with SKU based plan
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist".format(plan))
location = plan_info.location
is_linux = plan_info.reserved
functionapp_def.server_farm_id = plan
functionapp_def.location = location
if is_linux and not runtime and (consumption_plan_location or not deployment_container_image_name):
raise CLIError(
"usage error: --runtime RUNTIME required for linux functions apps without custom image.")
if runtime:
if is_linux and runtime not in LINUX_RUNTIMES:
raise CLIError("usage error: Currently supported runtimes (--runtime) in linux function apps are: {}."
.format(', '.join(LINUX_RUNTIMES)))
if not is_linux and runtime not in WINDOWS_RUNTIMES:
raise CLIError("usage error: Currently supported runtimes (--runtime) in windows function apps are: {}."
.format(', '.join(WINDOWS_RUNTIMES)))
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_WORKER_RUNTIME', value=runtime))
if runtime_version is not None:
if runtime is None:
raise CLIError('Must specify --runtime to use --runtime-version')
allowed_versions = RUNTIME_TO_IMAGE_FUNCTIONAPP[functions_version][runtime].keys()
if runtime_version not in allowed_versions:
raise CLIError('--runtime-version {} is not supported for the selected --runtime {} and '
'--functions_version {}. Supported versions are: {}'
.format(runtime_version, runtime, functions_version, ', '.join(allowed_versions)))
con_string = _validate_and_get_connection_string(cmd.cli_ctx, resource_group_name, storage_account)
if is_linux:
functionapp_def.kind = 'functionapp,linux'
functionapp_def.reserved = True
is_consumption = consumption_plan_location is not None
if not is_consumption:
site_config.app_settings.append(NameValuePair(name='MACHINEKEY_DecryptionKey',
value=str(hexlify(urandom(32)).decode()).upper()))
if deployment_container_image_name:
functionapp_def.kind = 'functionapp,linux,container'
site_config.app_settings.append(NameValuePair(name='DOCKER_CUSTOM_IMAGE_NAME',
value=deployment_container_image_name))
site_config.app_settings.append(NameValuePair(name='FUNCTION_APP_EDIT_MODE', value='readOnly'))
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='false'))
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
else:
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='true'))
if runtime not in RUNTIME_TO_IMAGE_FUNCTIONAPP[functions_version].keys():
raise CLIError("An appropriate linux image for runtime:'{}' was not found".format(runtime))
if deployment_container_image_name is None:
site_config.linux_fx_version = _get_linux_fx_functionapp(is_consumption,
functions_version,
runtime,
runtime_version)
else:
functionapp_def.kind = 'functionapp'
# adding appsetting to site to make it a function
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION',
value=_get_extension_version_functionapp(functions_version)))
site_config.app_settings.append(NameValuePair(name='AzureWebJobsStorage', value=con_string))
site_config.app_settings.append(NameValuePair(name='AzureWebJobsDashboard', value=con_string))
site_config.app_settings.append(NameValuePair(name='WEBSITE_NODE_DEFAULT_VERSION',
value=_get_website_node_version_functionapp(functions_version,
runtime,
runtime_version)))
# If plan is not consumption or elastic premium, we need to set always on
if consumption_plan_location is None and not is_plan_elastic_premium(cmd, plan_info):
site_config.always_on = True
# If plan is elastic premium or windows consumption, we need these app settings
is_windows_consumption = consumption_plan_location is not None and not is_linux
if is_plan_elastic_premium(cmd, plan_info) or is_windows_consumption:
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTAZUREFILECONNECTIONSTRING',
value=con_string))
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTSHARE', value=name.lower()))
create_app_insights = False
if app_insights_key is not None:
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=app_insights_key))
elif app_insights is not None:
instrumentation_key = get_app_insights_key(cmd.cli_ctx, resource_group_name, app_insights)
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=instrumentation_key))
elif not disable_app_insights:
create_app_insights = True
poller = client.web_apps.create_or_update(resource_group_name, name, functionapp_def)
functionapp = LongRunningOperation(cmd.cli_ctx)(poller)
if consumption_plan_location and is_linux:
logger.warning("Your Linux function app '%s', that uses a consumption plan has been successfully"
"created but is not active until content is published using"
"Azure Portal or the Functions Core Tools.", name)
else:
_set_remote_or_local_git(cmd, functionapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
if create_app_insights:
try:
try_create_application_insights(cmd, functionapp)
except Exception: # pylint: disable=broad-except
logger.warning('Error while trying to create and configure an Application Insights for the Function App. '
'Please use the Azure Portal to create and configure the Application Insights, if needed.')
if deployment_container_image_name:
update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password)
return functionapp
def _get_extension_version_functionapp(functions_version):
if functions_version is not None:
return '~{}'.format(functions_version)
return '~2'
def _get_linux_fx_functionapp(is_consumption, functions_version, runtime, runtime_version):
if runtime_version is None:
runtime_version = RUNTIME_TO_DEFAULT_VERSION_FUNCTIONAPP[functions_version][runtime]
if is_consumption:
return '{}|{}'.format(runtime.upper(), runtime_version)
# App service or Elastic Premium
return _format_fx_version(RUNTIME_TO_IMAGE_FUNCTIONAPP[functions_version][runtime][runtime_version])
def _get_website_node_version_functionapp(functions_version, runtime, runtime_version):
if runtime is None or runtime != 'node':
return NODE_VERSION_DEFAULT_FUNCTIONAPP[functions_version]
if runtime_version is not None:
return '~{}'.format(runtime_version)
return NODE_VERSION_DEFAULT_FUNCTIONAPP[functions_version]
def try_create_application_insights(cmd, functionapp):
creation_failed_warn = 'Unable to create the Application Insights for the Function App. ' \
'Please use the Azure Portal to manually create and configure the Application Insights, ' \
'if needed.'
ai_resource_group_name = functionapp.resource_group
ai_name = functionapp.name
ai_location = functionapp.location
app_insights_client = get_mgmt_service_client(cmd.cli_ctx, ApplicationInsightsManagementClient)
ai_properties = {
"name": ai_name,
"location": ai_location,
"kind": "web",
"properties": {
"Application_Type": "web"
}
}
appinsights = app_insights_client.components.create_or_update(ai_resource_group_name, ai_name, ai_properties)
if appinsights is None or appinsights.instrumentation_key is None:
logger.warning(creation_failed_warn)
return
# We make this success message as a warning to no interfere with regular JSON output in stdout
logger.warning('Application Insights \"%s\" was created for this Function App. '
'You can visit https://portal.azure.com/#resource%s/overview to view your '
'Application Insights component', appinsights.name, appinsights.id)
update_app_settings(cmd, functionapp.resource_group, functionapp.name,
['APPINSIGHTS_INSTRUMENTATIONKEY={}'.format(appinsights.instrumentation_key)])
def _set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None):
if deployment_source_url:
logger.warning("Linking to git repository '%s'", deployment_source_url)
try:
config_source_control(cmd, resource_group_name, name, deployment_source_url, 'git',
deployment_source_branch, manual_integration=True)
except Exception as ex: # pylint: disable=broad-except
ex = ex_handler_factory(no_throw=True)(ex)
logger.warning("Link to git repository failed due to error '%s'", ex)
if deployment_local_git:
local_git_info = enable_local_git(cmd, resource_group_name, name)
logger.warning("Local git is configured with url of '%s'", local_git_info['url'])
setattr(webapp, 'deploymentLocalGitUrl', local_git_info['url'])
def _validate_and_get_connection_string(cli_ctx, resource_group_name, storage_account):
sa_resource_group = resource_group_name
if is_valid_resource_id(storage_account):
sa_resource_group = parse_resource_id(storage_account)['resource_group']
storage_account = parse_resource_id(storage_account)['name']
storage_client = get_mgmt_service_client(cli_ctx, StorageManagementClient)
storage_properties = storage_client.storage_accounts.get_properties(sa_resource_group,
storage_account)
error_message = ''
endpoints = storage_properties.primary_endpoints
sku = storage_properties.sku.name
allowed_storage_types = ['Standard_GRS', 'Standard_RAGRS', 'Standard_LRS', 'Standard_ZRS', 'Premium_LRS']
for e in ['blob', 'queue', 'table']:
if not getattr(endpoints, e, None):
error_message = "Storage account '{}' has no '{}' endpoint. It must have table, queue, and blob endpoints all enabled".format(storage_account, e) # pylint: disable=line-too-long
if sku not in allowed_storage_types:
error_message += 'Storage type {} is not allowed'.format(sku)
if error_message:
raise CLIError(error_message)
obj = storage_client.storage_accounts.list_keys(sa_resource_group, storage_account) # pylint: disable=no-member
try:
keys = [obj.keys[0].value, obj.keys[1].value] # pylint: disable=no-member
except AttributeError:
# Older API versions have a slightly different structure
keys = [obj.key1, obj.key2] # pylint: disable=no-member
endpoint_suffix = cli_ctx.cloud.suffixes.storage_endpoint
connection_string = 'DefaultEndpointsProtocol={};EndpointSuffix={};AccountName={};AccountKey={}'.format(
"https",
endpoint_suffix,
storage_account,
keys[0]) # pylint: disable=no-member
return connection_string
def list_consumption_locations(cmd):
client = web_client_factory(cmd.cli_ctx)
regions = client.list_geo_regions(sku='Dynamic')
return [{'name': x.name.lower().replace(' ', '')} for x in regions]
def list_locations(cmd, sku, linux_workers_enabled=None):
client = web_client_factory(cmd.cli_ctx)
full_sku = get_sku_name(sku)
return client.list_geo_regions(full_sku, linux_workers_enabled)
def _check_zip_deployment_status(cmd, rg_name, name, deployment_status_url, authorization, timeout=None):
import requests
from azure.cli.core.util import should_disable_connection_verify
total_trials = (int(timeout) // 2) if timeout else 450
num_trials = 0
while num_trials < total_trials:
time.sleep(2)
response = requests.get(deployment_status_url, headers=authorization,
verify=not should_disable_connection_verify())
time.sleep(2)
try:
res_dict = response.json()
except json.decoder.JSONDecodeError:
logger.warning("Deployment status endpoint %s returns malformed data. Retrying...", deployment_status_url)
res_dict = {}
finally:
num_trials = num_trials + 1
if res_dict.get('status', 0) == 3:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("""Zip deployment failed. {}. Please run the command az webapp log tail
-n {} -g {}""".format(res_dict, name, rg_name))
if res_dict.get('status', 0) == 4:
break
if 'progress' in res_dict:
logger.info(res_dict['progress']) # show only in debug mode, customers seem to find this confusing
# if the deployment is taking longer than expected
if res_dict.get('status', 0) != 4:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("""Timeout reached by the command, however, the deployment operation
is still on-going. Navigate to your scm site to check the deployment status""")
return res_dict
def list_continuous_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_continuous_web_jobs', slot)
def start_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.start_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.start_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def stop_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.stop_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.stop_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def remove_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_continuous_web_job(resource_group_name, name, webjob_name)
def list_triggered_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_triggered_web_jobs', slot)
def run_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.run_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.run_triggered_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_triggered_web_job(resource_group_name, name, webjob_name)
def remove_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_triggered_web_job(resource_group_name, name, webjob_name)
def list_hc(cmd, name, resource_group_name, slot=None):
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
client = web_client_factory(cmd.cli_ctx)
if slot is None:
listed_vals = client.web_apps.list_hybrid_connections(resource_group_name, name)
else:
listed_vals = client.web_apps.list_hybrid_connections_slot(resource_group_name, name, slot)
# reformats hybrid connection, to prune unnecessary fields
mod_list = []
for x in listed_vals.additional_properties["value"]:
properties = x["properties"]
resourceGroup = x["id"].split("/")
mod_hc = {
"id": x["id"],
"location": x["location"],
"name": x["name"],
"properties": {
"hostname": properties["hostname"],
"port": properties["port"],
"relayArmUri": properties["relayArmUri"],
"relayName": properties["relayName"],
"serviceBusNamespace": properties["serviceBusNamespace"],
"serviceBusSuffix": properties["serviceBusSuffix"]
},
"resourceGroup": resourceGroup[4],
"type": x["type"]
}
mod_list.append(mod_hc)
return mod_list
def add_hc(cmd, name, resource_group_name, namespace, hybrid_connection, slot=None):
HybridConnection = cmd.get_models('HybridConnection')
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
web_client = web_client_factory(cmd.cli_ctx)
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
namespace_client = namespaces_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
hy_co_id = ''
for n in namespace_client.list():
if n.name == namespace:
hy_co_id = n.id
i = 0
hy_co_resource_group = ''
hy_co_split = hy_co_id.split("/")
for z in hy_co_split:
if z == "resourceGroups":
hy_co_resource_group = hy_co_split[i + 1]
i = i + 1
# calling the relay API to get information about the hybrid connection
hy_co = hy_co_client.get(hy_co_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(hy_co_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(hy_co_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(hy_co_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_info = hy_co.id
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = ''
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
id_parameters = hy_co_info.split("/")
# populate object with information from the hybrid connection, and set it
# on webapp
hc = HybridConnection(service_bus_namespace=id_parameters[8],
relay_name=hybrid_connection,
relay_arm_uri=hy_co_info,
hostname=hostname,
port=port,
send_key_name="defaultSender",
send_key_value=hy_co_keys.primary_key,
service_bus_suffix=".servicebus.windows.net")
if slot is None:
return_hc = web_client.web_apps.create_or_update_hybrid_connection(resource_group_name, name, namespace,
hybrid_connection, hc)
else:
return_hc = web_client.web_apps.create_or_update_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, hc, slot)
# reformats hybrid connection, to prune unnecessary fields
resourceGroup = return_hc.id.split("/")
mod_hc = {
"hostname": return_hc.hostname,
"id": return_hc.id,
"location": return_hc.additional_properties["location"],
"name": return_hc.name,
"port": return_hc.port,
"relayArmUri": return_hc.relay_arm_uri,
"resourceGroup": resourceGroup[4],
"serviceBusNamespace": return_hc.service_bus_namespace,
"serviceBusSuffix": return_hc.service_bus_suffix
}
return mod_hc
# set the key the apps use to connect with the hybrid connection
def set_hc_key(cmd, plan, resource_group_name, namespace, hybrid_connection, key_type):
HybridConnection = cmd.get_models('HybridConnection')
web_client = web_client_factory(cmd.cli_ctx)
# extract the hybrid connection resource group
asp_hy_co = web_client.app_service_plans.get_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
arm_uri = asp_hy_co.relay_arm_uri
split_uri = arm_uri.split("resourceGroups/")
resource_group_strings = split_uri[1].split('/')
relay_resource_group = resource_group_strings[0]
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
# calling the relay function to obtain information about the hc in question
hy_co = hy_co_client.get(relay_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(relay_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(relay_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(relay_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = 0
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
key = "empty"
if key_type.lower() == "primary":
key = hy_co_keys.primary_key
elif key_type.lower() == "secondary":
key = hy_co_keys.secondary_key
# enures input is correct
if key == "empty":
logger.warning("Key type is invalid - must be primary or secondary")
return
apps = web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan, namespace,
hybrid_connection)
# changes the key for every app that uses that hybrid connection
for x in apps:
app_info = ast.literal_eval(x)
app_name = app_info["name"]
app_id = app_info["id"]
id_split = app_id.split("/")
app_resource_group = id_split[4]
hc = HybridConnection(service_bus_namespace=namespace, relay_name=hybrid_connection,
relay_arm_uri=arm_uri, hostname=hostname, port=port, send_key_name="defaultSender",
send_key_value=key)
web_client.web_apps.update_hybrid_connection(app_resource_group, app_name, namespace,
hybrid_connection, hc)
return web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
def appservice_list_vnet(cmd, resource_group_name, plan):
web_client = web_client_factory(cmd.cli_ctx)
return web_client.app_service_plans.list_vnets(resource_group_name, plan)
def remove_hc(cmd, resource_group_name, name, namespace, hybrid_connection, slot=None):
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_hc = client.web_apps.delete_hybrid_connection(resource_group_name, name, namespace, hybrid_connection)
else:
return_hc = client.web_apps.delete_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, slot)
return return_hc
def list_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
result = list(client.web_apps.list_vnet_connections(resource_group_name, name))
else:
result = list(client.web_apps.list_vnet_connections_slot(resource_group_name, name, slot))
mod_list = []
# reformats the vnet entry, removing unecessary information
for x in result:
# removes GUIDs from name and id
longName = x.name
if '_' in longName:
usIndex = longName.index('_')
shortName = longName[usIndex + 1:]
else:
shortName = longName
v_id = x.id
lastSlash = v_id.rindex('/')
shortId = v_id[:lastSlash] + '/' + shortName
# extracts desired fields
certThumbprint = x.cert_thumbprint
location = x.additional_properties["location"]
v_type = x.type
vnet_resource_id = x.vnet_resource_id
id_strings = v_id.split('/')
resourceGroup = id_strings[4]
routes = x.routes
vnet_mod = {"certThumbprint": certThumbprint,
"id": shortId,
"location": location,
"name": shortName,
"resourceGroup": resourceGroup,
"routes": routes,
"type": v_type,
"vnetResourceId": vnet_resource_id}
mod_list.append(vnet_mod)
return mod_list
def add_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot=None):
SwiftVirtualNetwork = cmd.get_models('SwiftVirtualNetwork')
Delegation = cmd.get_models('Delegation', resource_type=ResourceType.MGMT_NETWORK)
client = web_client_factory(cmd.cli_ctx)
vnet_client = network_client_factory(cmd.cli_ctx)
list_all_vnets = vnet_client.virtual_networks.list_all()
vnet_id = ''
for v in list_all_vnets:
if v.name == vnet:
vnet_id = v.id
# parsing the arm uri in order to extract vnet_name and vnet_resource_group
vnet_id_strings = vnet_id.split('/')
vnet_resource_group = ''
i = 0
for z in vnet_id_strings:
if z.lower() == "resourcegroups":
vnet_resource_group = vnet_id_strings[i + 1]
i = i + 1
if slot is None:
swift_connection_info = client.web_apps.get_swift_virtual_network_connection(resource_group_name, name)
else:
swift_connection_info = client.web_apps.get_swift_virtual_network_connection_slot(resource_group_name,
name, slot)
# check to see if the connection would be supported
if swift_connection_info.swift_supported is not True:
return logger.warning("""Your app must be in an Azure App Service deployment that is
capable of scaling up to Premium v2\nLearn more:
https://go.microsoft.com/fwlink/?linkid=2060115&clcid=0x409""")
subnetObj = vnet_client.subnets.get(vnet_resource_group, vnet, subnet)
delegations = subnetObj.delegations
delegated = False
for d in delegations:
if d.service_name.lower() == "microsoft.web/serverfarms".lower():
delegated = True
if not delegated:
subnetObj.delegations = [Delegation(name="delegation", service_name="Microsoft.Web/serverFarms")]
vnet_client.subnets.create_or_update(vnet_resource_group, vnet, subnet,
subnet_parameters=subnetObj)
id_subnet = vnet_client.subnets.get(vnet_resource_group, vnet, subnet)
subnet_resource_id = id_subnet.id
swiftVnet = SwiftVirtualNetwork(subnet_resource_id=subnet_resource_id,
swift_supported=True)
if slot is None:
return_vnet = client.web_apps.create_or_update_swift_virtual_network_connection(resource_group_name, name,
swiftVnet)
else:
return_vnet = client.web_apps.create_or_update_swift_virtual_network_connection_slot(resource_group_name, name,
swiftVnet, slot)
# reformats the vnet entry, removing unecessary information
id_strings = return_vnet.id.split('/')
resourceGroup = id_strings[4]
mod_vnet = {
"id": return_vnet.id,
"location": return_vnet.additional_properties["location"],
"name": return_vnet.name,
"resourceGroup": resourceGroup,
"subnetResourceId": return_vnet.subnet_resource_id
}
return mod_vnet
def remove_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_vnet = client.web_apps.delete_swift_virtual_network(resource_group_name, name)
else:
return_vnet = client.web_apps.delete_swift_virtual_network_slot(resource_group_name, name, slot)
return return_vnet
def get_history_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_triggered_web_job_history_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.list_triggered_web_job_history(resource_group_name, name, webjob_name)
def webapp_up(cmd, name, resource_group_name=None, plan=None, location=None, sku=None, dryrun=False, logs=False, # pylint: disable=too-many-statements,
launch_browser=False, html=False):
import os
AppServicePlan = cmd.get_models('AppServicePlan')
src_dir = os.getcwd()
_src_path_escaped = "{}".format(src_dir.replace(os.sep, os.sep + os.sep))
client = web_client_factory(cmd.cli_ctx)
user = get_profile_username()
_create_new_rg = False
_create_new_app = does_app_already_exist(cmd, name)
os_name = detect_os_form_src(src_dir, html)
lang_details = get_lang_from_content(src_dir, html)
language = lang_details.get('language')
# detect the version
data = get_runtime_version_details(lang_details.get('file_loc'), language)
version_used_create = data.get('to_create')
detected_version = data.get('detected')
runtime_version = "{}|{}".format(language, version_used_create) if \
version_used_create != "-" else version_used_create
site_config = None
if not _create_new_app: # App exists
# Get the ASP & RG info, if the ASP & RG parameters are provided we use those else we need to find those
logger.warning("Webapp %s already exists. The command will deploy contents to the existing app.", name)
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app {}. Please check that the app is a part of "
"the current subscription".format(name))
current_rg = app_details.resource_group
if resource_group_name is not None and (resource_group_name.lower() != current_rg.lower()):
raise CLIError("The webapp {} exists in ResourceGroup {} and does not match the value entered {}. Please "
"re-run command with the correct parameters.". format(name, current_rg, resource_group_name))
rg_name = resource_group_name or current_rg
if location is None:
loc = app_details.location.replace(" ", "").lower()
else:
loc = location.replace(" ", "").lower()
plan_details = parse_resource_id(app_details.server_farm_id)
current_plan = plan_details['name']
if plan is not None and current_plan.lower() != plan.lower():
raise CLIError("The plan name entered {} does not match the plan name that the webapp is hosted in {}."
"Please check if you have configured defaults for plan name and re-run command."
.format(plan, current_plan))
plan = plan or plan_details['name']
plan_info = client.app_service_plans.get(rg_name, plan)
sku = plan_info.sku.name if isinstance(plan_info, AppServicePlan) else 'Free'
current_os = 'Linux' if plan_info.reserved else 'Windows'
# Raise error if current OS of the app is different from the current one
if current_os.lower() != os_name.lower():
raise CLIError("The webapp {} is a {} app. The code detected at '{}' will default to "
"'{}'. "
"Please create a new app to continue this operation.".format(name, current_os, src_dir, os))
_is_linux = plan_info.reserved
# for an existing app check if the runtime version needs to be updated
# Get site config to check the runtime version
site_config = client.web_apps.get_configuration(rg_name, name)
else: # need to create new app, check if we need to use default RG or use user entered values
logger.warning("webapp %s doesn't exist", name)
sku = get_sku_to_use(src_dir, html, sku)
loc = set_location(cmd, sku, location)
rg_name = get_rg_to_use(cmd, user, loc, os_name, resource_group_name)
_is_linux = os_name.lower() == 'linux'
_create_new_rg = should_create_new_rg(cmd, rg_name, _is_linux)
plan = get_plan_to_use(cmd, user, os_name, loc, sku, rg_name, _create_new_rg, plan)
dry_run_str = r""" {
"name" : "%s",
"appserviceplan" : "%s",
"resourcegroup" : "%s",
"sku": "%s",
"os": "%s",
"location" : "%s",
"src_path" : "%s",
"runtime_version_detected": "%s",
"runtime_version": "%s"
}
""" % (name, plan, rg_name, get_sku_name(sku), os_name, loc, _src_path_escaped, detected_version,
runtime_version)
create_json = json.loads(dry_run_str)
if dryrun:
logger.warning("Web app will be created with the below configuration,re-run command "
"without the --dryrun flag to create & deploy a new app")
return create_json
if _create_new_rg:
logger.warning("Creating Resource group '%s' ...", rg_name)
create_resource_group(cmd, rg_name, location)
logger.warning("Resource group creation complete")
# create ASP
logger.warning("Creating AppServicePlan '%s' ...", plan)
# we will always call the ASP create or update API so that in case of re-deployment, if the SKU or plan setting are
# updated we update those
create_app_service_plan(cmd, rg_name, plan, _is_linux, hyper_v=False, per_site_scaling=False, sku=sku,
number_of_workers=1 if _is_linux else None, location=location)
if _create_new_app:
logger.warning("Creating webapp '%s' ...", name)
create_webapp(cmd, rg_name, name, plan, runtime_version if _is_linux else None, tags={"cli": 'webapp_up'},
using_webapp_up=True, language=language)
_configure_default_logging(cmd, rg_name, name)
else: # for existing app if we might need to update the stack runtime settings
if os_name.lower() == 'linux' and site_config.linux_fx_version != runtime_version:
logger.warning('Updating runtime version from %s to %s',
site_config.linux_fx_version, runtime_version)
update_site_configs(cmd, rg_name, name, linux_fx_version=runtime_version)
elif os_name.lower() == 'windows' and site_config.windows_fx_version != runtime_version:
logger.warning('Updating runtime version from %s to %s',
site_config.windows_fx_version, runtime_version)
update_site_configs(cmd, rg_name, name, windows_fx_version=runtime_version)
create_json['runtime_version'] = runtime_version
# Zip contents & Deploy
logger.warning("Creating zip with contents of dir %s ...", src_dir)
# zip contents & deploy
zip_file_path = zip_contents_from_dir(src_dir, language)
enable_zip_deploy(cmd, rg_name, name, zip_file_path)
# Remove the file after deployment, handling exception if user removed the file manually
try:
os.remove(zip_file_path)
except OSError:
pass
if launch_browser:
logger.warning("Launching app using default browser")
view_in_browser(cmd, rg_name, name, None, logs)
else:
_url = _get_url(cmd, rg_name, name)
logger.warning("You can launch the app at %s", _url)
create_json.update({'URL': _url})
if logs:
_configure_default_logging(cmd, rg_name, name)
return get_streaming_log(cmd, rg_name, name)
with ConfiguredDefaultSetter(cmd.cli_ctx.config, True):
cmd.cli_ctx.config.set_value('defaults', 'group', rg_name)
cmd.cli_ctx.config.set_value('defaults', 'sku', sku)
cmd.cli_ctx.config.set_value('defaults', 'appserviceplan', plan)
cmd.cli_ctx.config.set_value('defaults', 'location', loc)
cmd.cli_ctx.config.set_value('defaults', 'web', name)
return create_json
def _ping_scm_site(cmd, resource_group, name):
from azure.cli.core.util import should_disable_connection_verify
# wake up kudu, by making an SCM call
import requests
# work around until the timeout limits issue for linux is investigated & fixed
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group, name)
scm_url = _get_scm_url(cmd, resource_group, name)
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{}:{}'.format(user_name, password))
requests.get(scm_url + '/api/settings', headers=authorization, verify=not should_disable_connection_verify())
def is_webapp_up(tunnel_server):
return tunnel_server.is_webapp_up()
def get_tunnel(cmd, resource_group_name, name, port=None, slot=None):
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise CLIError("Only Linux App Service Plans supported, Found a Windows App Service Plan")
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
profile_user_name = next(p['userName'] for p in profiles)
profile_user_password = next(p['userPWD'] for p in profiles)
if port is None:
port = 0 # Will auto-select a free port from 1024-65535
logger.info('No port defined, creating on random free port')
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
tunnel_server = TunnelServer('', port, scm_url, profile_user_name, profile_user_password)
_ping_scm_site(cmd, resource_group_name, name)
_wait_for_webapp(tunnel_server)
return tunnel_server
def create_tunnel(cmd, resource_group_name, name, port=None, slot=None, timeout=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
logger.warning('Opening tunnel on port: %s', tunnel_server.local_port)
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
logger.warning('Tunnel is ready, connect on port %s', tunnel_server.local_port)
else:
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
logger.warning('SSH is available { username: %s, password: %s }', ssh_user_name, ssh_user_password)
logger.warning('Ctrl + C to close')
if timeout:
time.sleep(int(timeout))
else:
while t.isAlive():
time.sleep(5)
def create_tunnel_and_session(cmd, resource_group_name, name, port=None, slot=None, timeout=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
s = threading.Thread(target=_start_ssh_session,
args=('localhost', tunnel_server.get_port(), ssh_user_name, ssh_user_password))
s.daemon = True
s.start()
if timeout:
time.sleep(int(timeout))
else:
while s.isAlive() and t.isAlive():
time.sleep(5)
def _wait_for_webapp(tunnel_server):
tries = 0
while True:
if is_webapp_up(tunnel_server):
break
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError("Timeout Error, Unable to establish a connection")
tries = tries + 1
logger.warning('.')
time.sleep(1)
def _start_tunnel(tunnel_server):
tunnel_server.start_server()
def _start_ssh_session(hostname, port, username, password):
tries = 0
while True:
try:
c = Connection(host=hostname,
port=port,
user=username,
# connect_timeout=60*10,
connect_kwargs={"password": password})
break
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError("Timeout Error, Unable to establish a connection")
tries = tries + 1
logger.warning('.')
time.sleep(1)
try:
c.run('cat /etc/motd', pty=True)
c.run('source /etc/profile; exec $SHELL -l', pty=True)
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
finally:
c.close()
def ssh_webapp(cmd, resource_group_name, name, port=None, slot=None, timeout=None): # pylint: disable=too-many-statements
import platform
if platform.system() == "Windows":
raise CLIError('webapp ssh is only supported on linux and mac')
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
raise CLIError('remote debugging is enabled, please disable')
create_tunnel_and_session(cmd, resource_group_name, name, port=port, slot=slot, timeout=timeout)
def create_devops_pipeline(
cmd,
functionapp_name=None,
organization_name=None,
project_name=None,
repository_name=None,
overwrite_yaml=None,
allow_force_push=None,
github_pat=None,
github_repository=None
):
from .azure_devops_build_interactive import AzureDevopsBuildInteractive
azure_devops_build_interactive = AzureDevopsBuildInteractive(cmd, logger, functionapp_name,
organization_name, project_name, repository_name,
overwrite_yaml, allow_force_push,
github_pat, github_repository)
return azure_devops_build_interactive.interactive_azure_devops_build()
def _configure_default_logging(cmd, rg_name, name):
logger.warning("Configuring default logging for the app, if not already enabled")
return config_diagnostics(cmd, rg_name, name,
application_logging=True, web_server_logging='filesystem',
docker_container_logging='true')
def _validate_app_service_environment_id(cli_ctx, ase, resource_group_name):
ase_is_id = is_valid_resource_id(ase)
if ase_is_id:
return ase
from msrestazure.tools import resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Web',
type='hostingEnvironments',
name=ase)
def _validate_asp_sku(app_service_environment, sku):
# Isolated SKU is supported only for ASE
if sku in ['I1', 'I2', 'I3']:
if not app_service_environment:
raise CLIError("The pricing tier 'Isolated' is not allowed for this app service plan. Use this link to "
"learn more: https://docs.microsoft.com/en-us/azure/app-service/overview-hosting-plans")
else:
if app_service_environment:
raise CLIError("Only pricing tier 'Isolated' is allowed in this app service plan. Use this link to "
"learn more: https://docs.microsoft.com/en-us/azure/app-service/overview-hosting-plans")
def _format_key_vault_id(cli_ctx, key_vault, resource_group_name):
key_vault_is_id = is_valid_resource_id(key_vault)
if key_vault_is_id:
return key_vault
from msrestazure.tools import resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.KeyVault',
type='vaults',
name=key_vault)
def _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot=None):
hostname_bindings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_host_name_bindings', slot)
verified_hostname_found = False
for hostname_binding in hostname_bindings:
binding_name = hostname_binding.name.split('/')[-1]
if binding_name.lower() == hostname and hostname_binding.host_name_type == 'Verified':
verified_hostname_found = True
return verified_hostname_found
|
server.py | """
rpyc plug-in server (threaded or forking)
"""
import sys
import os
import socket
import time
import threading
import errno
import logging
try:
import Queue
except ImportError:
import queue as Queue
from rpyc.core import SocketStream, Channel, Connection
from rpyc.utils.registry import UDPRegistryClient
from rpyc.utils.authenticators import AuthenticationError
from rpyc.lib import safe_import
from rpyc.lib.compat import poll, get_exc_errno
signal = safe_import("signal")
class Server(object):
"""Base server implementation
:param service: the :class:`service <service.Service>` to expose
:param hostname: the host to bind to. Default is IPADDR_ANY, but you may
want to restrict it only to ``localhost`` in some setups
:param ipv6: whether to create an IPv6 or IPv4 socket. The default is IPv4
:param port: the TCP port to bind to
:param backlog: the socket's backlog (passed to ``listen()``)
:param reuse_addr: whether or not to create the socket with the ``SO_REUSEADDR`` option set.
:param socket_path: for Unix domain sockets - specifies the socket's path (filename);
requires platform support for ``AF_UNIX``. This option is mutually
exclusive with ``hostname``, ``ipv6`` and ``port``
:param authenticator: the :ref:`api-authenticators` to use. If ``None``, no authentication
is performed.
:param registrar: the :class:`registrar <rpyc.utils.registry.RegistryClient>` to use.
If ``None``, a default :class:`rpyc.utils.registry.UDPRegistryClient`
will be used
:param auto_register: whether or not to register using the *registrar*. By default, the
server will attempt to register only if a registrar was explicitly given.
:param protocol_config: the :data:`configuration dictionary <rpyc.core.protocol.DEFAULT_CONFIG>`
that is passed to the RPyC connection
:param logger: the ``logger`` to use (of the built-in ``logging`` module). If ``None``, a
default logger will be created.
"""
def __init__(self, service, hostname = "", ipv6 = False, port = 0,
backlog = 10, reuse_addr = True, authenticator = None, registrar = None,
auto_register = None, protocol_config = {}, logger = None, socket_path = None):
self.active = False
self._closed = False
self.service = service
self.authenticator = authenticator
self.backlog = backlog
if auto_register is None:
self.auto_register = bool(registrar)
else:
self.auto_register = auto_register
self.protocol_config = protocol_config
self.clients = set()
if socket_path is not None:
if hostname or port or ipv6:
raise ValueError("`socket_path` is mutually exclusive with: hostname, port, ipv6")
self.listener = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.listener.bind(socket_path)
# set the self.port to the path as it's used for the registry and logging
self.host, self.port = "", socket_path
else:
if ipv6:
if hostname == "localhost" and sys.platform != "win32":
# on windows, you should bind to localhost even for ipv6
hostname = "localhost6"
self.listener = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
self.listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if reuse_addr and sys.platform != "win32":
# warning: reuseaddr is not what you'd expect on windows!
# it allows you to bind an already bound port, resulting in "unexpected behavior"
# (quoting MSDN)
self.listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.listener.bind((hostname, port))
if sys.platform == "win32":
# hack so we can receive Ctrl+C on windows
self.listener.settimeout(0.5)
# hack for IPv6 (the tuple can be longer than 2)
sockname = self.listener.getsockname()
self.host, self.port = sockname[0], sockname[1]
if logger is None:
logger = logging.getLogger("%s/%s" % (self.service.get_service_name(), self.port))
self.logger = logger
if "logger" not in self.protocol_config:
self.protocol_config["logger"] = self.logger
if registrar is None:
registrar = UDPRegistryClient(logger = self.logger)
self.registrar = registrar
def close(self):
"""Closes (terminates) the server and all of its clients. If applicable,
also unregisters from the registry server"""
if self._closed:
return
self._closed = True
self.active = False
if self.auto_register:
try:
self.registrar.unregister(self.port)
except Exception:
self.logger.exception("error unregistering services")
try:
self.listener.shutdown(socket.SHUT_RDWR)
except (EnvironmentError, socket.error):
pass
self.listener.close()
self.logger.info("listener closed")
for c in set(self.clients):
try:
c.shutdown(socket.SHUT_RDWR)
except Exception:
pass
c.close()
self.clients.clear()
def fileno(self):
"""returns the listener socket's file descriptor"""
return self.listener.fileno()
def accept(self):
"""accepts an incoming socket connection (blocking)"""
while True:
try:
sock, addrinfo = self.listener.accept()
except socket.timeout:
pass
except socket.error:
ex = sys.exc_info()[1]
if get_exc_errno(ex) == errno.EINTR:
pass
else:
raise EOFError()
else:
break
sock.setblocking(True)
self.logger.info("accepted %s", addrinfo)
self.clients.add(sock)
self._accept_method(sock)
def _accept_method(self, sock):
"""this method should start a thread, fork a child process, or
anything else in order to serve the client. once the mechanism has
been created, it should invoke _authenticate_and_serve_client with
`sock` as the argument"""
raise NotImplementedError
def _authenticate_and_serve_client(self, sock):
try:
if self.authenticator:
addrinfo = sock.getpeername()
try:
sock2, credentials = self.authenticator(sock)
except AuthenticationError:
self.logger.info("%s failed to authenticate, rejecting connection", addrinfo)
return
else:
self.logger.info("%s authenticated successfully", addrinfo)
else:
credentials = None
sock2 = sock
try:
self._serve_client(sock2, credentials)
except Exception:
self.logger.exception("client connection terminated abruptly")
raise
finally:
try:
sock.shutdown(socket.SHUT_RDWR)
except Exception:
pass
sock.close()
self.clients.discard(sock)
def _serve_client(self, sock, credentials):
addrinfo = sock.getpeername()
if credentials:
self.logger.info("welcome %s (%r)", addrinfo, credentials)
else:
self.logger.info("welcome %s", addrinfo)
try:
config = dict(self.protocol_config, credentials = credentials,
endpoints = (sock.getsockname(), addrinfo))
conn = Connection(self.service, Channel(SocketStream(sock)),
config = config, _lazy = True)
conn._init_service()
conn.serve_all()
finally:
self.logger.info("goodbye %s", addrinfo)
def _bg_register(self):
interval = self.registrar.REREGISTER_INTERVAL
self.logger.info("started background auto-register thread "
"(interval = %s)", interval)
tnext = 0
try:
while self.active:
t = time.time()
if t >= tnext:
tnext = t + interval
try:
self.registrar.register(self.service.get_service_aliases(),
self.port)
except Exception:
self.logger.exception("error registering services")
time.sleep(1)
finally:
if not self._closed:
self.logger.info("background auto-register thread finished")
def start(self):
"""Starts the server (blocking). Use :meth:`close` to stop"""
self.listener.listen(self.backlog)
self.logger.info("server started on [%s]:%s", self.host, self.port)
self.active = True
if self.auto_register:
t = threading.Thread(target = self._bg_register)
t.setDaemon(True)
t.start()
try:
try:
while True:
self.accept()
except EOFError:
pass # server closed by another thread
except KeyboardInterrupt:
print("")
self.logger.warn("keyboard interrupt!")
finally:
self.logger.info("server has terminated")
self.close()
class OneShotServer(Server):
"""
A server that handles a single connection (blockingly), and terminates after that
Parameters: see :class:`Server`
"""
def _accept_method(self, sock):
try:
self._authenticate_and_serve_client(sock)
finally:
self.close()
class ThreadedServer(Server):
"""
A server that spawns a thread for each connection. Works on any platform
that supports threads.
Parameters: see :class:`Server`
"""
def _accept_method(self, sock):
t = threading.Thread(target = self._authenticate_and_serve_client, args = (sock,))
t.setDaemon(True)
t.start()
class ThreadPoolServer(Server):
"""This server is threaded like the ThreadedServer but reuses threads so that
recreation is not necessary for each request. The pool of threads has a fixed
size that can be set with the 'nbThreads' argument. The default size is 20.
The server dispatches request to threads by batch, that is a given thread may process
up to request_batch_size requests from the same connection in one go, before it goes to
the next connection with pending requests. By default, self.request_batch_size
is set to 10 and it can be overwritten in the constructor arguments.
Contributed by *@sponce*
Parameters: see :class:`Server`
"""
def __init__(self, *args, **kwargs):
'''Initializes a ThreadPoolServer. In particular, instantiate the thread pool.'''
# get the number of threads in the pool
nbthreads = 20
if 'nbThreads' in kwargs:
nbthreads = kwargs['nbThreads']
del kwargs['nbThreads']
# get the request batch size
self.request_batch_size = 10
if 'requestBatchSize' in kwargs:
self.request_batch_size = kwargs['requestBatchSize']
del kwargs['requestBatchSize']
# init the parent
Server.__init__(self, *args, **kwargs)
# a queue of connections having somethign to process
self._active_connection_queue = Queue.Queue()
# declare the pool as already active
self.active = True
# setup the thread pool for handling requests
self.workers = []
for _ in range(nbthreads):
t = threading.Thread(target = self._serve_clients)
t.setName('ThreadPoolWorker')
t.daemon = True
t.start()
self.workers.append(t)
# a polling object to be used be the polling thread
self.poll_object = poll()
# a dictionary fd -> connection
self.fd_to_conn = {}
# setup a thread for polling inactive connections
self.polling_thread = threading.Thread(target = self._poll_inactive_clients)
self.polling_thread.setName('PollingThread')
self.polling_thread.setDaemon(True)
self.polling_thread.start()
def close(self):
'''closes a ThreadPoolServer. In particular, joins the thread pool.'''
# close parent server
Server.close(self)
# stop producer thread
self.polling_thread.join()
# cleanup thread pool : first fill the pool with None fds so that all threads exit
# the blocking get on the queue of active connections. Then join the threads
for _ in range(len(self.workers)):
self._active_connection_queue.put(None)
for w in self.workers:
w.join()
def _remove_from_inactive_connection(self, fd):
'''removes a connection from the set of inactive ones'''
# unregister the connection in the polling object
try:
self.poll_object.unregister(fd)
except KeyError:
# the connection has already been unregistered
pass
def _drop_connection(self, fd):
'''removes a connection by closing it and removing it from internal structs'''
# cleanup fd_to_conn dictionnary
try:
conn = self.fd_to_conn[fd]
del self.fd_to_conn[fd]
except KeyError:
# the active connection has already been removed
pass
# close connection
conn.close()
def _add_inactive_connection(self, fd):
'''adds a connection to the set of inactive ones'''
self.poll_object.register(fd, "rw")
def _handle_poll_result(self, connlist):
'''adds a connection to the set of inactive ones'''
for fd, evt in connlist:
try:
# remove connection from the inactive ones
self._remove_from_inactive_connection(fd)
# Is it an error ?
if "e" in evt or "n" in evt or "h" in evt:
# it was an error, connection was closed. Do the same on our side
self._drop_connection(fd)
else:
# connection has data, let's add it to the active queue
self._active_connection_queue.put(fd)
except KeyError:
# the connection has already been dropped. Give up
pass
def _poll_inactive_clients(self):
'''Main method run by the polling thread of the thread pool.
Check whether inactive clients have become active'''
while self.active:
try:
# the actual poll, with a timeout of 1s so that we can exit in case
# we re not active anymore
active_clients = self.poll_object.poll(1)
# for each client that became active, put them in the active queue
self._handle_poll_result(active_clients)
except Exception:
ex = sys.exc_info()[1]
# "Caught exception in Worker thread" message
self.logger.warning("failed to poll clients, caught exception : %s", str(ex))
# wait a bit so that we do not loop too fast in case of error
time.sleep(0.2)
def _serve_requests(self, fd):
'''Serves requests from the given connection and puts it back to the appropriate queue'''
# serve a maximum of RequestBatchSize requests for this connection
for _ in range(self.request_batch_size):
try:
if not self.fd_to_conn[fd].poll(): # note that poll serves the request
# we could not find a request, so we put this connection back to the inactive set
self._add_inactive_connection(fd)
return
except EOFError:
# the connection has been closed by the remote end. Close it on our side and return
self._drop_connection(fd)
return
except Exception:
# put back the connection to active queue in doubt and raise the exception to the upper level
self._active_connection_queue.put(fd)
raise
# we've processed the maximum number of requests. Put back the connection in the active queue
self._active_connection_queue.put(fd)
def _serve_clients(self):
'''Main method run by the processing threads of the thread pool.
Loops forever, handling requests read from the connections present in the active_queue'''
while self.active:
try:
# note that we do not use a timeout here. This is because the implementation of
# the timeout version performs badly. So we block forever, and exit by filling
# the queue with None fds
fd = self._active_connection_queue.get(True)
# fd may be None (case where we want to exit the blocking get to close the service)
if fd:
# serve the requests of this connection
self._serve_requests(fd)
except Queue.Empty:
# we've timed out, let's just retry. We only use the timeout so that this
# thread can stop even if there is nothing in the queue
pass
except Exception:
ex = sys.exc_info()[1]
# "Caught exception in Worker thread" message
self.logger.warning("failed to serve client, caught exception : %s", str(ex))
# wait a bit so that we do not loop too fast in case of error
time.sleep(0.2)
def _authenticate_and_build_connection(self, sock):
'''Authenticate a client and if it succees, wraps the socket in a connection object.
Note that this code is cut and paste from the rpyc internals and may have to be
changed if rpyc evolves'''
# authenticate
if self.authenticator:
h, p = sock.getpeername()
try:
sock, credentials = self.authenticator(sock)
except AuthenticationError:
self.logger.info("%s:%s failed to authenticate, rejecting connection", h, p)
return None
else:
credentials = None
# build a connection
h, p = sock.getpeername()
config = dict(self.protocol_config, credentials=credentials, connid="%s:%d"%(h, p))
return Connection(self.service, Channel(SocketStream(sock)), config=config)
def _accept_method(self, sock):
'''Implementation of the accept method : only pushes the work to the internal queue.
In case the queue is full, raises an AsynResultTimeout error'''
try:
# authenticate and build connection object
conn = self._authenticate_and_build_connection(sock)
# put the connection in the active queue
if conn:
fd = conn.fileno()
self.fd_to_conn[fd] = conn
self._add_inactive_connection(fd)
self.clients.clear()
except Exception:
ex = sys.exc_info()[1]
self.logger.warning("failed to serve client, caught exception : %s", str(ex))
class ForkingServer(Server):
"""
A server that forks a child process for each connection. Available on
POSIX compatible systems only.
Parameters: see :class:`Server`
"""
def __init__(self, *args, **kwargs):
if not signal:
raise OSError("ForkingServer not supported on this platform")
Server.__init__(self, *args, **kwargs)
# setup sigchld handler
self._prevhandler = signal.signal(signal.SIGCHLD, self._handle_sigchld)
def close(self):
Server.close(self)
signal.signal(signal.SIGCHLD, self._prevhandler)
@classmethod
def _handle_sigchld(cls, signum, unused):
try:
while True:
pid, dummy = os.waitpid(-1, os.WNOHANG)
if pid <= 0:
break
except OSError:
pass
# re-register signal handler (see man signal(2), under Portability)
signal.signal(signal.SIGCHLD, cls._handle_sigchld)
def _accept_method(self, sock):
pid = os.fork()
if pid == 0:
# child
try:
try:
self.logger.debug("child process created")
signal.signal(signal.SIGCHLD, self._prevhandler)
#76: call signal.siginterrupt(False) in forked child
signal.siginterrupt(signal.SIGCHLD, False)
self.listener.close()
self.clients.clear()
self._authenticate_and_serve_client(sock)
except:
self.logger.exception("child process terminated abnormally")
else:
self.logger.debug("child process terminated")
finally:
self.logger.debug("child terminated")
os._exit(0)
else:
# parent
sock.close()
|
test_all.py | #! /usr/bin/env python3
"""Brute-force test script: test libpqxx against many compilers etc.
This script makes no changes in the source tree; all builds happen in
temporary directories.
To make this possible, you may need to run "make distclean" in the
source tree. The configure script will refuse to configure otherwise.
"""
# Without this, pocketlint does not yet understand the print function.
from __future__ import print_function
from abc import (
ABCMeta,
abstractmethod,
)
from argparse import ArgumentParser
from contextlib import contextmanager
from datetime import datetime
from functools import partial
import json
from multiprocessing import (
JoinableQueue,
Process,
Queue,
)
from multiprocessing.pool import (
Pool,
)
from os import (
cpu_count,
getcwd,
)
import os.path
from queue import Empty
from shutil import rmtree
from subprocess import (
CalledProcessError,
check_call,
check_output,
DEVNULL,
)
from sys import (
stderr,
stdout,
)
from tempfile import mkdtemp
from textwrap import dedent
CPUS = cpu_count()
GCC_VERSIONS = list(range(8, 14))
GCC = ['g++-%d' % ver for ver in GCC_VERSIONS]
CLANG_VERSIONS = list(range(7, 15))
CLANG = ['clang++-6.0'] + ['clang++-%d' % ver for ver in CLANG_VERSIONS]
CXX = GCC + CLANG
STDLIB = (
'',
'-stdlib=libc++',
)
OPT = ('-O0', '-O3')
LINK = {
'static': ['--enable-static', '--disable-shared'],
'dynamic': ['--disable-static', '--enable-shared'],
}
DEBUG = {
'plain': [],
'audit': ['--enable-audit'],
'maintainer': ['--enable-maintainer-mode'],
'full': ['--enable-audit', '--enable-maintainer-mode'],
}
# CMake "generators." Maps a value for cmake's -G option to a command line to
# run.
#
# I prefer Ninja if available, because it's fast. But hey, the default will
# work.
#
# Maps the name of the generator (as used with cmake's -G option) to the
# actual command line needed to do the build.
CMAKE_GENERATORS = {
'Ninja': ['ninja'],
'Unix Makefiles': ['make', '-j%d' % CPUS],
}
class Fail(Exception):
"""A known, well-handled exception. Doesn't need a traceback."""
class Skip(Exception):
""""We're not doing this build. It's not an error though."""
def run(cmd, output, cwd=None):
"""Run a command, write output to file-like object."""
command_line = ' '.join(cmd)
output.write("%s\n\n" % command_line)
check_call(cmd, stdout=output, stderr=output, cwd=cwd)
def report(output, message):
"""Report a message to output, and standard output."""
print(message, flush=True)
output.write('\n\n')
output.write(message)
output.write('\n')
def file_contains(path, text):
"""Does the file at path contain text?"""
with open(path) as stream:
for line in stream:
if text in line:
return True
return False
@contextmanager
def tmp_dir():
"""Create a temporary directory, and clean it up again."""
tmp = mkdtemp()
try:
yield tmp
finally:
rmtree(tmp)
def write_check_code(work_dir):
"""Write a simple C++ program so we can tesst whether we can compile it.
Returns the file's full path.
"""
path = os.path.join(work_dir, "check.cxx")
with open(path, 'w') as source:
source.write(dedent("""\
#include <iostream>
int main()
{
std::cout << "Hello world." << std::endl;
}
"""))
return path
def check_compiler(work_dir, cxx, stdlib, check, verbose=False):
"""Is the given compiler combo available?"""
err_file = os.path.join(work_dir, 'stderr.log')
if verbose:
err_output = open(err_file, 'w')
else:
err_output = DEVNULL
try:
command = [cxx, check]
if stdlib != '':
command.append(stdlib)
check_call(command, cwd=work_dir, stderr=err_output)
except (OSError, CalledProcessError):
if verbose:
with open(err_file) as errors:
stdout.write(errors.read())
print("Can't build with '%s %s'. Skipping." % (cxx, stdlib))
return False
else:
return True
# TODO: Use Pool.
def check_compilers(compilers, stdlibs, verbose=False):
"""Check which compiler configurations are viable."""
with tmp_dir() as work_dir:
check = write_check_code(work_dir)
return [
(cxx, stdlib)
for stdlib in stdlibs
for cxx in compilers
if check_compiler(
work_dir, cxx, stdlib, check=check, verbose=verbose)
]
def find_cmake_command():
"""Figure out a CMake generator we can use, or None."""
try:
caps = check_output(['cmake', '-E', 'capabilities'])
except FileNotFoundError:
return None
names = {generator['name'] for generator in json.loads(caps)['generators']}
for gen in CMAKE_GENERATORS.keys():
if gen in names:
return gen
return None
class Config:
"""Configuration for a build.
These classes must be suitable for pickling, so we can send its objects to
worker processes.
"""
__metaclass__ = ABCMeta
@abstractmethod
def name(self):
"""Return an identifier for this build configuration."""
def make_log_name(self):
"""Compose log file name for this build."""
return "build-%s.out" % self.name()
class Build:
"""A pending or ondoing build, in its own directory.
Each step returns True for Success, or False for failure.
These classes must be suitable for pickling, so we can send its objects to
worker processes.
"""
def __init__(self, logs_dir, config=None):
self.config = config
self.log = os.path.join(logs_dir, config.make_log_name())
# Start a fresh log file.
with open(self.log, 'w') as log:
log.write("Starting %s.\n" % datetime.utcnow())
self.work_dir = mkdtemp()
def clean_up(self):
"""Delete the build tree."""
rmtree(self.work_dir)
@abstractmethod
def configure(self, log):
"""Prepare for a build."""
@abstractmethod
def build(self, log):
"""Build the code, including the tests. Don't run tests though."""
def test(self, log):
"""Run tests."""
run(
[os.path.join(os.path.curdir, 'test', 'runner')], log,
cwd=self.work_dir)
def logging(self, function):
"""Call function, pass open write handle for `self.log`."""
# TODO: Should probably be a decorator.
with open(self.log, 'a') as log:
try:
function(log)
except Exception as error:
log.write("%s\n" % error)
raise
def do_configure(self):
"""Call `configure`, writing output to `self.log`."""
self.logging(self.configure)
def do_build(self):
"""Call `build`, writing output to `self.log`."""
self.logging(self.build)
def do_test(self):
"""Call `test`, writing output to `self.log`."""
self.logging(self.test)
class AutotoolsConfig(Config):
"""A combination of build options for the "configure" script."""
def __init__(self, cxx, opt, stdlib, link, link_opts, debug, debug_opts):
self.cxx = cxx
self.opt = opt
self.stdlib = stdlib
self.link = link
self.link_opts = link_opts
self.debug = debug
self.debug_opts = debug_opts
def name(self):
return '_'.join([
self.cxx, self.opt, self.stdlib, self.link, self.debug])
class AutotoolsBuild(Build):
"""Build using the "configure" script."""
__metaclass__ = ABCMeta
def configure(self, log):
configure = [
os.path.join(getcwd(), "configure"),
"CXX=%s" % self.config.cxx,
]
if self.config.stdlib == '':
configure += [
"CXXFLAGS=%s" % self.config.opt,
]
else:
configure += [
"CXXFLAGS=%s %s" % (self.config.opt, self.config.stdlib),
"LDFLAGS=%s" % self.config.stdlib,
]
configure += [
"--disable-documentation",
] + self.config.link_opts + self.config.debug_opts
run(configure, log, cwd=self.work_dir)
def build(self, log):
run(['make', '-j%d' % CPUS], log, cwd=self.work_dir)
# Passing "TESTS=" like this will suppress the actual running of
# the tests. We run them in the "test" stage.
run(['make', '-j%d' % CPUS, 'check', 'TESTS='], log, cwd=self.work_dir)
class CMakeConfig(Config):
"""Configuration for a CMake build."""
def __init__(self, generator):
self.generator = generator
self.builder = CMAKE_GENERATORS[generator]
def name(self):
return "cmake"
class CMakeBuild(Build):
"""Build using CMake.
Ignores the config for now.
"""
__metaclass__ = ABCMeta
def configure(self, log):
source_dir = getcwd()
generator = self.config.generator
run(
['cmake', '-G', generator, source_dir], output=log,
cwd=self.work_dir)
def build(self, log):
run(self.config.builder, log, cwd=self.work_dir)
def parse_args():
"""Parse command-line arguments."""
parser = ArgumentParser(description=__doc__)
parser.add_argument('--verbose', '-v', action='store_true')
parser.add_argument(
'--compilers', '-c', default=','.join(CXX),
help="Compilers, separated by commas. Default is %(default)s.")
parser.add_argument(
'--optimize', '-O', default=','.join(OPT),
help=(
"Alternative optimisation options, separated by commas. "
"Default is %(default)s."))
parser.add_argument(
'--stdlibs', '-L', default=','.join(STDLIB),
help=(
"Comma-separated options for choosing standard library. "
"Defaults to %(default)s."))
parser.add_argument(
'--logs', '-l', default='.', metavar='DIRECTORY',
help="Write build logs to DIRECTORY.")
parser.add_argument(
'--jobs', '-j', default=CPUS, metavar='CPUS',
help=(
"When running 'make', run up to CPUS concurrent processes. "
"Defaults to %(default)s."))
parser.add_argument(
'--minimal', '-m', action='store_true',
help="Make it as short a run as possible. For testing this script.")
return parser.parse_args()
def soft_get(queue, block=True):
"""Get an item off `queue`, or `None` if the queue is empty."""
try:
return queue.get(block)
except Empty:
return None
def read_queue(queue, block=True):
"""Read entries off `queue`, terminating when it gets a `None`.
Also terminates when the queue is empty.
"""
entry = soft_get(queue, block)
while entry is not None:
yield entry
entry = soft_get(queue, block)
def service_builds(in_queue, fail_queue, out_queue):
"""Worker process for "build" stage: process one job at a time.
Sends successful builds to `out_queue`, and failed builds to `fail_queue`.
Terminates when it receives a `None`, at which point it will send a `None`
into `out_queue` in turn.
"""
for build in read_queue(in_queue):
try:
build.do_build()
except Exception as error:
fail_queue.put((build, "%s" % error))
else:
out_queue.put(build)
in_queue.task_done()
# Mark the end of the queue.
out_queue.put(None)
def service_tests(in_queue, fail_queue, out_queue):
"""Worker process for "test" stage: test one build at a time.
Sends successful builds to `out_queue`, and failed builds to `fail_queue`.
Terminates when it receives a final `None`. Does not send out a final
`None` of its own.
"""
for build in read_queue(in_queue):
try:
build.do_test()
except Exception as error:
fail_queue.put((build, "%s" % error))
else:
out_queue.put(build)
in_queue.task_done()
def report_failures(queue, message):
"""Report failures from a failure queue. Return total number."""
failures = 0
for build, error in read_queue(queue, block=False):
print("%s: %s - %s" % (message, build.config.name(), error))
failures += 1
return failures
def count_entries(queue):
"""Get and discard all entries from `queue`, return the total count."""
total = 0
for _ in read_queue(queue, block=False):
total += 1
return total
def gather_builds(args):
"""Produce the list of builds we want to perform."""
if args.verbose:
print("\nChecking available compilers.")
compiler_candidates = args.compilers.split(',')
compilers = check_compilers(
compiler_candidates, args.stdlibs.split(','),
verbose=args.verbose)
if list(compilers) == []:
raise Fail(
"Did not find any viable compilers. Tried: %s."
% ', '.join(compiler_candidates))
opt_levels = args.optimize.split(',')
link_types = LINK.items()
debug_mixes = DEBUG.items()
if args.minimal:
compilers = compilers[:1]
opt_levels = opt_levels[:1]
link_types = list(link_types)[:1]
debug_mixes = list(debug_mixes)[:1]
builds = [
AutotoolsBuild(
args.logs,
AutotoolsConfig(
opt=opt, link=link, link_opts=link_opts, debug=debug,
debug_opts=debug_opts, cxx=cxx, stdlib=stdlib))
for opt in sorted(opt_levels)
for link, link_opts in sorted(link_types)
for debug, debug_opts in sorted(debug_mixes)
for cxx, stdlib in compilers
]
cmake = find_cmake_command()
if cmake is not None:
builds.append(CMakeBuild(args.logs, CMakeConfig(cmake)))
return builds
def enqueue(queue, build, *args):
"""Put `build` on `queue`.
Ignores additional arguments, so that it can be used as a clalback for
`Pool`.
We do this instead of a lambda in order to get the closure right. We want
the build for the current iteration, not the last one that was executed
before the lambda runs.
"""
queue.put(build)
def enqueue_error(queue, build, error):
"""Put the pair of `build` and `error` on `queue`."""
queue.put((build, error))
def main(args):
"""Do it all."""
if not os.path.isdir(args.logs):
raise Fail("Logs location '%s' is not a directory." % args.logs)
builds = gather_builds(args)
if args.verbose:
print("Lined up %d builds." % len(builds))
# The "configure" step is single-threaded. We can run many at the same
# time, even when we're also running a "build" step at the same time.
# This means we may run a lot more processes than we have CPUs, but there's
# no law against that. There's also I/O time to be covered.
configure_pool = Pool()
# Builds which have failed the "configure" stage, with their errors. This
# queue must never stall, so that we can let results pile up here while the
# work continues.
configure_fails = Queue(len(builds))
# Waiting list for the "build" stage. It contains Build objects,
# terminated by a final None to signify that there are no more builds to be
# done.
build_queue = JoinableQueue(10)
# Builds that have failed the "build" stage.
build_fails = Queue(len(builds))
# Waiting list for the "test" stage. It contains Build objects, terminated
# by a final None.
test_queue = JoinableQueue(10)
# The "build" step tries to utilise all CPUs, and it may use a fair bit of
# memory. Run only one of these at a time, in a single worker process.
build_worker = Process(
target=service_builds, args=(build_queue, build_fails, test_queue))
build_worker.start()
# Builds that have failed the "test" stage.
test_fails = Queue(len(builds))
# Completed builds. This must never stall.
done_queue = JoinableQueue(len(builds))
# The "test" step can not run concurrently (yet). So, run tests serially
# in a single worker process. It takes its jobs directly from the "build"
# worker.
test_worker = Process(
target=service_tests, args=(test_queue, test_fails, done_queue))
test_worker.start()
# Feed all builds into the "configure" pool. Each build which passes this
# stage goes into the "build" queue.
for build in builds:
configure_pool.apply_async(
build.do_configure, callback=partial(enqueue, build_queue, build),
error_callback=partial(enqueue_error, configure_fails, build))
if args.verbose:
print("All jobs are underway.")
configure_pool.close()
configure_pool.join()
# TODO: Async reporting for faster feedback.
configure_fail_count = report_failures(configure_fails, "CONFIGURE FAIL")
if args.verbose:
print("Configure stage done.")
# Mark the end of the build queue for the build worker.
build_queue.put(None)
build_worker.join()
# TODO: Async reporting for faster feedback.
build_fail_count = report_failures(build_fails, "BUILD FAIL")
if args.verbose:
print("Build step done.")
# Mark the end of the test queue for the test worker.
test_queue.put(None)
test_worker.join()
# TODO: Async reporting for faster feedback.
# TODO: Collate failures into meaningful output, e.g. "shared library fails."
test_fail_count = report_failures(test_fails, "TEST FAIL")
if args.verbose:
print("Test step done.")
# All done. Clean up.
for build in builds:
build.clean_up()
ok_count = count_entries(done_queue)
if ok_count == len(builds):
print("All tests OK.")
else:
print(
"Failures during configure: %d - build: %d - test: %d. OK: %d."
% (
configure_fail_count,
build_fail_count,
test_fail_count,
ok_count,
))
if __name__ == '__main__':
try:
exit(main(parse_args()))
except Fail as failure:
stderr.write("%s\n" % failure)
exit(2)
|
server.py | #########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import traceback
import tempfile
import re
import collections
import json
import threading
import socket
from wsgiref.simple_server import WSGIServer, WSGIRequestHandler
from wsgiref.simple_server import make_server as make_wsgi_server
import bottle
from cloudify._compat import text_type, queue, StringIO
from cloudify.proxy.client import ScriptException
from cloudify.state import current_ctx
class CtxProxy(object):
def __init__(self, ctx, socket_url):
self.ctx = ctx
self.socket_url = socket_url
def process(self, request):
try:
typed_request = json.loads(request)
args = typed_request['args']
payload = process_ctx_request(self.ctx, args)
result_type = 'result'
if isinstance(payload, ScriptException):
payload = dict(message=str(payload))
result_type = 'stop_operation'
result = json.dumps({
'type': result_type,
'payload': payload
})
except Exception as e:
tb = StringIO()
traceback.print_exc(file=tb)
payload = {
'type': type(e).__name__,
'message': str(e),
'traceback': tb.getvalue()
}
result = json.dumps({
'type': 'error',
'payload': payload
})
return result
def close(self):
pass
class HTTPCtxProxy(CtxProxy):
def __init__(self, ctx, port=None):
port = port or get_unused_port()
socket_url = 'http://localhost:{0}'.format(port)
super(HTTPCtxProxy, self).__init__(ctx, socket_url)
self.port = port
self._started = queue.Queue(1)
self.thread = self._start_server()
self._started.get(timeout=5)
def _start_server(self):
proxy = self
class BottleServerAdapter(bottle.ServerAdapter):
def run(self, app):
class Server(WSGIServer):
allow_reuse_address = True
def handle_error(self, request, client_address):
pass
class Handler(WSGIRequestHandler):
def address_string(self):
return self.client_address[0]
def log_request(*args, **kwargs):
if not self.quiet:
return WSGIRequestHandler.log_request(
*args, **kwargs)
self.srv = make_wsgi_server(
self.host,
self.port,
app,
Server,
Handler)
proxy.server = self.srv
self.port = self.srv.server_port
proxy._started.put(True)
self.srv.serve_forever(poll_interval=0.1)
bottle.post('/', callback=self._request_handler)
def serve():
bottle.run(
host='localhost',
port=self.port,
quiet=True,
server=BottleServerAdapter)
thread = threading.Thread(target=serve)
thread.daemon = True
thread.start()
return thread
def close(self):
self.server.shutdown()
self.server.server_close()
def _request_handler(self):
request = bottle.request.body.read().decode('utf-8')
with current_ctx.push(self.ctx):
response = self.process(request)
return bottle.LocalResponse(
body=response,
status=200,
headers={'content-type': 'application/json'})
class ZMQCtxProxy(CtxProxy):
def __init__(self, ctx, socket_url):
super(ZMQCtxProxy, self).__init__(ctx, socket_url)
import zmq
self.z_context = zmq.Context(io_threads=1)
self.sock = self.z_context.socket(zmq.REP)
self.sock.bind(self.socket_url)
self.poller = zmq.Poller()
self.poller.register(self.sock, zmq.POLLIN)
def poll_and_process(self, timeout=1):
import zmq
state = dict(self.poller.poll(1000 * timeout)).get(self.sock)
if not state == zmq.POLLIN:
return False
request = self.sock.recv()
response = self.process(request)
self.sock.send_string(response)
return True
def close(self):
self.sock.close()
self.z_context.term()
class UnixCtxProxy(ZMQCtxProxy):
def __init__(self, ctx, socket_path=None):
if not socket_path:
socket_path = tempfile.mktemp(prefix='ctx-', suffix='.socket')
socket_url = 'ipc://{0}'.format(socket_path)
super(UnixCtxProxy, self).__init__(ctx, socket_url)
class TCPCtxProxy(ZMQCtxProxy):
def __init__(self, ctx, ip='127.0.0.1', port=None):
port = port or get_unused_port()
socket_url = 'tcp://{0}:{1}'.format(ip, port)
super(TCPCtxProxy, self).__init__(ctx, socket_url)
class StubCtxProxy(object):
socket_url = ''
def close(self):
pass
def process_ctx_request(ctx, args):
current = ctx
num_args = len(args)
index = 0
while index < num_args:
arg = args[index]
desugared_attr = _desugar_attr(current, arg)
if desugared_attr:
current = getattr(current, desugared_attr)
elif isinstance(current, collections.MutableMapping):
key = arg
path_dict = PathDictAccess(current)
if index + 1 == num_args:
# read dict prop by path
value = path_dict.get(key)
current = value
elif index + 2 == num_args:
# set dict prop by path
value = args[index + 1]
current = path_dict.set(key, value)
else:
raise RuntimeError('Illegal argument while accessing dict')
break
elif callable(current):
kwargs = {}
remaining_args = args[index:]
if isinstance(remaining_args[-1], collections.MutableMapping):
kwargs = remaining_args[-1]
remaining_args = remaining_args[:-1]
current = current(*remaining_args, **kwargs)
break
else:
raise RuntimeError('{0} cannot be processed in {1}'
.format(arg, args))
index += 1
if callable(current):
current = current()
return current
def _desugar_attr(obj, attr):
if not isinstance(attr, text_type):
return None
try:
if hasattr(obj, attr):
return attr
except UnicodeError:
return None
attr = attr.replace('-', '_')
if hasattr(obj, attr):
return attr
return None
class PathDictAccess(object):
pattern = re.compile(r"(.+)\[(\d+)\]")
def __init__(self, obj):
self.obj = obj
def set(self, prop_path, value):
obj, prop_name = self._get_parent_obj_prop_name_by_path(prop_path)
obj[prop_name] = value
def get(self, prop_path):
value = self._get_object_by_path(prop_path)
return value
def _get_object_by_path(self, prop_path, fail_on_missing=True):
# when setting a nested object, make sure to also set all the
# intermediate path objects
current = self.obj
for prop_segment in prop_path.split('.'):
match = self.pattern.match(prop_segment)
if match:
index = int(match.group(2))
property_name = match.group(1)
if property_name not in current:
self._raise_illegal(prop_path)
if not isinstance(current[property_name], list):
self._raise_illegal(prop_path)
current = current[property_name][index]
else:
if prop_segment not in current:
if fail_on_missing:
self._raise_illegal(prop_path)
else:
current[prop_segment] = {}
current = current[prop_segment]
return current
def _get_parent_obj_prop_name_by_path(self, prop_path):
split = prop_path.split('.')
if len(split) == 1:
return self.obj, prop_path
parent_path = '.'.join(split[:-1])
parent_obj = self._get_object_by_path(parent_path,
fail_on_missing=False)
prop_name = split[-1]
return parent_obj, prop_name
@staticmethod
def _raise_illegal(prop_path):
raise RuntimeError('illegal path: {0}'.format(prop_path))
def get_unused_port():
sock = socket.socket()
sock.bind(('127.0.0.1', 0))
_, port = sock.getsockname()
sock.close()
return port
|
controller_server.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import socket
import time
from .log_helper import get_logger
from threading import Thread
from .lock import lock, unlock
__all__ = ['ControllerServer']
_logger = get_logger(__name__, level=logging.INFO)
class ControllerServer(object):
"""The controller wrapper with a socket server to handle the request of search agent.
Args:
controller(slim.searcher.Controller): The controller used to generate tokens.
address(tuple): The address of current server binding with format (ip, port). Default: ('', 0).
which means setting ip automatically
max_client_num(int): The maximum number of clients connecting to current server simultaneously. Default: 100.
search_steps(int|None): The total steps of searching. None means never stopping. Default: None
key(str|None): Config information. Default: None.
"""
def __init__(self,
controller=None,
address=('', 0),
max_client_num=100,
search_steps=None,
key=None):
"""
"""
self._controller = controller
self._address = address
self._max_client_num = max_client_num
self._search_steps = search_steps
self._closed = False
self._port = address[1]
self._ip = address[0]
self._key = key
self._client_num = 0
self._client = dict()
self._compare_time = 172800 ### 48 hours
def start(self):
self._socket_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket_server.bind(self._address)
self._socket_server.listen(self._max_client_num)
self._port = self._socket_server.getsockname()[1]
self._ip = self._socket_server.getsockname()[0]
_logger.info("ControllerServer Start!!!")
_logger.debug("ControllerServer - listen on: [{}:{}]".format(
self._ip, self._port))
thread = Thread(target=self.run)
thread.setDaemon(True)
thread.start()
return str(thread)
def close(self):
"""Close the server."""
self._closed = True
_logger.info("server closed!")
def port(self):
"""Get the port."""
return self._port
def ip(self):
"""Get the ip."""
return self._ip
def run(self):
"""Start the server.
"""
_logger.info("Controller Server run...")
try:
while ((self._search_steps is None) or
(self._controller._iter <
(self._search_steps))) and not self._closed:
conn, addr = self._socket_server.accept()
message = conn.recv(1024).decode()
_logger.debug(message)
if message.strip("\n") == "next_tokens":
tokens = self._controller.next_tokens()
tokens = ",".join([str(token) for token in tokens])
conn.send(tokens.encode())
elif message.strip("\n") == "current_info":
current_info = dict()
current_info['best_tokens'] = self._controller.best_tokens
current_info['best_reward'] = self._controller.max_reward
current_info[
'current_tokens'] = self._controller.current_tokens
conn.send(str(current_info).encode())
else:
_logger.debug("recv message from {}: [{}]".format(addr,
message))
messages = message.strip('\n').split("\t")
if (len(messages) < 5) or (messages[0] != self._key):
_logger.debug("recv noise from {}: [{}]".format(
addr, message))
continue
tokens = messages[1]
reward = messages[2]
iter = messages[3]
client_name = messages[4]
one_step_time = -1
if client_name in self._client.keys():
current_time = time.time() - self._client[client_name]
if current_time > one_step_time:
one_step_time = current_time
self._compare_time = 2 * one_step_time
if client_name not in self._client.keys():
self._client[client_name] = time.time()
self._client_num += 1
self._client[client_name] = time.time()
for key_client in self._client.keys():
### if a client not request token in double train one tokens' time, we think this client was stoped.
if (time.time() - self._client[key_client]
) > self._compare_time and len(self._client.keys(
)) > 1:
self._client.pop(key_client)
self._client_num -= 1
_logger.debug(
"client: {}, client_num: {}, compare_time: {}".format(
self._client, self._client_num,
self._compare_time))
tokens = [int(token) for token in tokens.split(",")]
self._controller.update(tokens,
float(reward),
int(iter), int(self._client_num))
response = "ok"
conn.send(response.encode())
_logger.debug("send message to {}: [{}]".format(addr,
tokens))
conn.close()
except Exception as err:
_logger.error(err)
finally:
self._socket_server.close()
self.close()
|
a3d709a60f053fd06c281db96aef18fa5ac2808dTwistedThreadPool.py | import twisted.python.threadable as threadable
import twisted.python.threadpool as threadpool
import repository.persistence.Repository as Repository
import Queue
threadable.init()
class RepositoryThreadPool(threadpool.ThreadPool):
"""
An extension of the Twisted Thread Pool class that leverages
a C{RepositoryThread} instead of the standard python C{Thread}
"""
def startAWorker(self):
self.workers = self.workers + 1
name = "RepositoryPoolThread-%s-%s" % (id(self), self.workers)
try:
firstJob = self.q.get(0)
except Queue.Empty:
firstJob = None
newThread = Repository.RepositoryThread(target=self._worker, name=name, args=(firstJob,))
self.threads.append(newThread)
newThread.start()
|
demo.py |
from ramp_server import Partition, RAMPAlgorithm
from ramp_client import Client
from random import sample, random, choice
from string import ascii_uppercase
from threading import Semaphore, Thread
# RAMPAlgorithm.{Fast, Small, Hybrid}
ALGORITHM = RAMPAlgorithm.Fast
NUM_PARTITIONS = 5
NUM_CLIENTS = 5
NUM_TXNS = 1000
READ_PROPORTION = .5
TXN_LENGTH = 4
NUM_KEYS = 100
KEYS = [str(i) for i in range(0, NUM_KEYS)]
PARTITIONS = [Partition() for _ in range(0, NUM_PARTITIONS)]
request_sem = Semaphore(NUM_TXNS)
finished_sem = Semaphore()
def random_string():
return ''.join(choice(ascii_uppercase) for _ in range(6))
def run_client(client):
while(request_sem.acquire(False)):
txn_keys = sample(KEYS, TXN_LENGTH)
if random() < READ_PROPORTION:
client.get_all(txn_keys)
else:
kvps = {}
value = random_string()
for key in txn_keys:
kvps[key] = value
client.put_all(kvps)
finished_sem.release()
for c_id in range(0, NUM_CLIENTS):
client = Client(c_id, PARTITIONS, ALGORITHM)
t = Thread(target=run_client, args=(client,))
t.start()
finished_sem.acquire(NUM_TXNS)
print "DONE!"
|
performance_test_with_sqlalchemy.py | #!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0OA
#
# Authors:
# - Wen Guan, <wen.guan@cern.ch>, 2019
"""
performance test to insert contents.
"""
import datetime
import time
import threading
from uuid import uuid4 as uuid
from idds.orm.base.session import transactional_session
from idds.common.constants import (TransformType, TransformStatus, CollectionType,
CollectionRelationType, CollectionStatus,
ContentType, ContentStatus)
from idds.orm.transforms import add_transform
from idds.orm.collections import add_collection
from idds.orm.contents import add_contents
def get_transform_prop():
trans_properties = {
'transform_type': TransformType.EventStreaming,
'transform_tag': 's3128',
'priority': 0,
'status': TransformStatus.New,
'retries': 0,
'expired_at': datetime.datetime.utcnow().replace(microsecond=0),
'transform_metadata': {'input': {'coll_id': 123},
'output': {'coll_id': 456},
'log': {'coll_id': 789}}
}
return trans_properties
def get_collection_prop():
coll_properties = {
'scope': 'test_scope',
'name': 'test_name_%s' % str(uuid()),
'coll_type': CollectionType.Dataset,
'request_id': None,
'transform_id': None,
'relation_type': CollectionRelationType.Input,
'coll_size': 0,
'status': CollectionStatus.New,
'total_files': 0,
'retries': 0,
'expired_at': datetime.datetime.utcnow().replace(microsecond=0),
'coll_metadata': {'ddm_status': 'closed'}
}
return coll_properties
def get_content_prop():
content_properties = {
'coll_id': None,
'scope': 'test_scope',
'name': 'test_file_name_%s' % str(uuid()),
'min_id': 0,
'max_id': 100,
'content_type': ContentType.File,
'status': ContentStatus.New,
'bytes': 1,
'md5': None,
'adler32': None,
'processing_id': None,
'storage_id': None,
'retries': 0,
'path': None,
'expired_at': datetime.datetime.utcnow().replace(microsecond=0),
'collcontent_metadata': {'id': 123}
}
return content_properties
@transactional_session
def test_insert_contents(coll_id, num_contents=1, session=None):
# print("test_insert_contents, num_contents: %s" % num_contents)
list_contents = []
for i in range(num_contents):
content_properties = get_content_prop()
content_properties['coll_id'] = coll_id
list_contents.append(content_properties)
add_contents(list_contents, bulk_size=num_contents, session=session)
def test_thread(num_contents_per_thread, num_contents_per_session, coll_id):
for i in range(num_contents_per_thread // num_contents_per_session):
test_insert_contents(coll_id, num_contents_per_session)
def test(num_threads=1, total_contents=1, num_colls_per_session=1):
trans_properties = get_transform_prop()
trans_id = add_transform(**trans_properties)
coll_properties = get_collection_prop()
coll_properties['transform_id'] = trans_id
coll_id = add_collection(**coll_properties)
time_start = time.time()
threads = [threading.Thread(target=test_thread, args=(total_contents // num_threads, num_colls_per_session, coll_id)) for i in range(num_threads)]
[thread.start() for thread in threads]
while len(threads) > 0:
left_threads = []
for thread in threads:
if thread.is_alive():
left_threads.append(thread)
time.sleep(0.1)
threads = left_threads
time_end = time.time()
print("num_threads=%s, total_contents=%s, num_colls_per_session=%s, time used: %s" % (num_threads, total_contents, num_colls_per_session, time_end - time_start))
if __name__ == '__main__':
test(num_threads=1, total_contents=10, num_colls_per_session=5)
test(num_threads=1, total_contents=1, num_colls_per_session=1)
test(num_threads=1, total_contents=10000, num_colls_per_session=1000)
test(num_threads=1, total_contents=100000, num_colls_per_session=1000)
test(num_threads=1, total_contents=1000000, num_colls_per_session=1000)
test(num_threads=10, total_contents=10000, num_colls_per_session=1000)
test(num_threads=10, total_contents=100000, num_colls_per_session=1000)
test(num_threads=10, total_contents=1000000, num_colls_per_session=1000)
test(num_threads=20, total_contents=10000, num_colls_per_session=500)
test(num_threads=20, total_contents=100000, num_colls_per_session=500)
test(num_threads=20, total_contents=1000000, num_colls_per_session=500)
|
cas.py | from __future__ import print_function
from couchbase.bucket import Bucket
from couchbase.bucket import LOCKMODE_WAIT
from threading import Thread
from couchbase.exceptions import KeyExistsError
cb = Bucket('couchbase://10.0.0.31/default', lockmode=LOCKMODE_WAIT)
cb.upsert('a_list', [])
print('Will attempt concurrent document mutations without CAS')
def add_item_to_list(client, new_item):
l = client.get('a_list').value
l.append(new_item)
client.replace('a_list', l)
threads = [Thread(target=add_item_to_list, args=(cb, "item_" + str(x)))
for x in range(0, 10)]
[t.start() for t in threads]
[t.join() for t in threads]
cur_list = cb.get('a_list').value
print('Current list has {0} elements'.format(len(cur_list)))
if len(cur_list) != 10:
print('Concurrent modifications removed some of our items!', cur_list)
# The same as above, but using CAS
def add_item_to_list_safe(client, new_item):
while True:
rv = client.get('a_list')
l = rv.value
l.append(new_item)
try:
cb.replace('a_list', l, cas=rv.cas)
return
except KeyExistsError:
print("Cas mismatch for item", new_item)
continue
# Reset the list again
cb.upsert('a_list', [])
print('Will attempt concurrent modifications using CAS')
threads = [Thread(target=add_item_to_list_safe, args=(cb, "item_" + str(x)))
for x in range(0, 10)]
[t.start() for t in threads]
[t.join() for t in threads]
cur_list = cb.get('a_list').value
print('Current list has {0} elements'.format(len(cur_list)))
assert len(cur_list) == 10
|
test_process_utils.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import logging
import multiprocessing
import os
import signal
import subprocess
import time
import unittest
from contextlib import suppress
from subprocess import CalledProcessError
from tempfile import NamedTemporaryFile
from time import sleep
from unittest import mock
import psutil
import pytest
from airflow.exceptions import AirflowException
from airflow.utils import process_utils
from airflow.utils.process_utils import (
check_if_pidfile_process_is_running,
execute_in_subprocess,
execute_in_subprocess_with_kwargs,
set_new_process_group,
)
class TestReapProcessGroup(unittest.TestCase):
@staticmethod
def _ignores_sigterm(child_pid, child_setup_done):
def signal_handler(unused_signum, unused_frame):
pass
signal.signal(signal.SIGTERM, signal_handler)
child_pid.value = os.getpid()
child_setup_done.release()
while True:
time.sleep(1)
@staticmethod
def _parent_of_ignores_sigterm(parent_pid, child_pid, setup_done):
def signal_handler(unused_signum, unused_frame):
pass
os.setsid()
signal.signal(signal.SIGTERM, signal_handler)
child_setup_done = multiprocessing.Semaphore(0)
child = multiprocessing.Process(
target=TestReapProcessGroup._ignores_sigterm, args=[child_pid, child_setup_done]
)
child.start()
child_setup_done.acquire(timeout=5.0)
parent_pid.value = os.getpid()
setup_done.release()
while True:
time.sleep(1)
def test_reap_process_group(self):
"""
Spin up a process that can't be killed by SIGTERM and make sure
it gets killed anyway.
"""
parent_setup_done = multiprocessing.Semaphore(0)
parent_pid = multiprocessing.Value('i', 0)
child_pid = multiprocessing.Value('i', 0)
args = [parent_pid, child_pid, parent_setup_done]
parent = multiprocessing.Process(target=TestReapProcessGroup._parent_of_ignores_sigterm, args=args)
try:
parent.start()
assert parent_setup_done.acquire(timeout=5.0)
assert psutil.pid_exists(parent_pid.value)
assert psutil.pid_exists(child_pid.value)
process_utils.reap_process_group(parent_pid.value, logging.getLogger(), timeout=1)
assert not psutil.pid_exists(parent_pid.value)
assert not psutil.pid_exists(child_pid.value)
finally:
try:
os.kill(parent_pid.value, signal.SIGKILL) # terminate doesn't work here
os.kill(child_pid.value, signal.SIGKILL) # terminate doesn't work here
except OSError:
pass
class TestExecuteInSubProcess:
def test_should_print_all_messages1(self, caplog):
execute_in_subprocess(["bash", "-c", "echo CAT; echo KITTY;"])
msgs = [record.getMessage() for record in caplog.records]
assert ["Executing cmd: bash -c 'echo CAT; echo KITTY;'", 'Output:', 'CAT', 'KITTY'] == msgs
def test_should_print_all_messages_from_cwd(self, caplog, tmp_path):
execute_in_subprocess(["bash", "-c", "echo CAT; pwd; echo KITTY;"], cwd=str(tmp_path))
msgs = [record.getMessage() for record in caplog.records]
assert [
"Executing cmd: bash -c 'echo CAT; pwd; echo KITTY;'",
'Output:',
'CAT',
str(tmp_path),
'KITTY',
] == msgs
def test_should_raise_exception(self):
with pytest.raises(CalledProcessError):
process_utils.execute_in_subprocess(["bash", "-c", "exit 1"])
def test_using_env_as_kwarg_works(self, caplog):
execute_in_subprocess_with_kwargs(["bash", "-c", 'echo "My value is ${VALUE}"'], env=dict(VALUE="1"))
assert "My value is 1" in caplog.text
def my_sleep_subprocess():
sleep(100)
def my_sleep_subprocess_with_signals():
signal.signal(signal.SIGINT, lambda signum, frame: None)
signal.signal(signal.SIGTERM, lambda signum, frame: None)
sleep(100)
class TestKillChildProcessesByPids(unittest.TestCase):
def test_should_kill_process(self):
before_num_process = subprocess.check_output(["ps", "-ax", "-o", "pid="]).decode().count("\n")
process = multiprocessing.Process(target=my_sleep_subprocess, args=())
process.start()
sleep(0)
num_process = subprocess.check_output(["ps", "-ax", "-o", "pid="]).decode().count("\n")
assert before_num_process + 1 == num_process
process_utils.kill_child_processes_by_pids([process.pid])
num_process = subprocess.check_output(["ps", "-ax", "-o", "pid="]).decode().count("\n")
assert before_num_process == num_process
def test_should_force_kill_process(self):
process = multiprocessing.Process(target=my_sleep_subprocess_with_signals, args=())
process.start()
sleep(0)
all_processes = subprocess.check_output(["ps", "-ax", "-o", "pid="]).decode().splitlines()
assert str(process.pid) in map(lambda x: x.strip(), all_processes)
with self.assertLogs(process_utils.log) as cm:
process_utils.kill_child_processes_by_pids([process.pid], timeout=0)
assert any("Killing child PID" in line for line in cm.output)
sleep(0)
all_processes = subprocess.check_output(["ps", "-ax", "-o", "pid="]).decode().splitlines()
assert str(process.pid) not in map(lambda x: x.strip(), all_processes)
class TestPatchEnviron(unittest.TestCase):
def test_should_update_variable_and_restore_state_when_exit(self):
with mock.patch.dict("os.environ", {"TEST_NOT_EXISTS": "BEFORE", "TEST_EXISTS": "BEFORE"}):
del os.environ["TEST_NOT_EXISTS"]
assert "BEFORE" == os.environ["TEST_EXISTS"]
assert "TEST_NOT_EXISTS" not in os.environ
with process_utils.patch_environ({"TEST_NOT_EXISTS": "AFTER", "TEST_EXISTS": "AFTER"}):
assert "AFTER" == os.environ["TEST_NOT_EXISTS"]
assert "AFTER" == os.environ["TEST_EXISTS"]
assert "BEFORE" == os.environ["TEST_EXISTS"]
assert "TEST_NOT_EXISTS" not in os.environ
def test_should_restore_state_when_exception(self):
with mock.patch.dict("os.environ", {"TEST_NOT_EXISTS": "BEFORE", "TEST_EXISTS": "BEFORE"}):
del os.environ["TEST_NOT_EXISTS"]
assert "BEFORE" == os.environ["TEST_EXISTS"]
assert "TEST_NOT_EXISTS" not in os.environ
with suppress(AirflowException):
with process_utils.patch_environ({"TEST_NOT_EXISTS": "AFTER", "TEST_EXISTS": "AFTER"}):
assert "AFTER" == os.environ["TEST_NOT_EXISTS"]
assert "AFTER" == os.environ["TEST_EXISTS"]
raise AirflowException("Unknown exception")
assert "BEFORE" == os.environ["TEST_EXISTS"]
assert "TEST_NOT_EXISTS" not in os.environ
class TestCheckIfPidfileProcessIsRunning(unittest.TestCase):
def test_ok_if_no_file(self):
check_if_pidfile_process_is_running('some/pid/file', process_name="test")
def test_remove_if_no_process(self):
# Assert file is deleted
with pytest.raises(FileNotFoundError):
with NamedTemporaryFile('+w') as f:
f.write('19191919191919191991')
f.flush()
check_if_pidfile_process_is_running(f.name, process_name="test")
def test_raise_error_if_process_is_running(self):
pid = os.getpid()
with NamedTemporaryFile('+w') as f:
f.write(str(pid))
f.flush()
with pytest.raises(AirflowException, match="is already running under PID"):
check_if_pidfile_process_is_running(f.name, process_name="test")
class TestSetNewProcessGroup(unittest.TestCase):
@mock.patch("os.setpgid")
def test_not_session_leader(self, mock_set_pid):
pid = os.getpid()
with mock.patch('os.getsid', autospec=True) as mock_get_sid:
mock_get_sid.return_value = pid + 1
set_new_process_group()
assert mock_set_pid.call_count == 1
@mock.patch("os.setpgid")
def test_session_leader(self, mock_set_pid):
pid = os.getpid()
with mock.patch('os.getsid', autospec=True) as mock_get_sid:
mock_get_sid.return_value = pid
set_new_process_group()
assert mock_set_pid.call_count == 0
|
network.py | # Electrum - Lightweight Bitcoin Client
# Copyright (c) 2011-2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
import queue
import os
import random
import re
from collections import defaultdict
import threading
import socket
import json
import sys
import ipaddress
import asyncio
from typing import NamedTuple, Optional
import dns
import dns.resolver
from aiorpcx import TaskGroup
from . import util
from .util import PrintError, print_error, aiosafe, bfh
from .bitcoin import COIN
from . import constants
from . import blockchain
from .interface import Interface, serialize_server, deserialize_server
from .version import PROTOCOL_VERSION
from .simple_config import SimpleConfig
NODES_RETRY_INTERVAL = 60
SERVER_RETRY_INTERVAL = 10
def parse_servers(result):
""" parse servers list into dict format"""
servers = {}
for item in result:
host = item[1]
out = {}
version = None
pruning_level = '-'
if len(item) > 2:
for v in item[2]:
if re.match("[st]\d*", v):
protocol, port = v[0], v[1:]
if port == '': port = constants.net.DEFAULT_PORTS[protocol]
out[protocol] = port
elif re.match("v(.?)+", v):
version = v[1:]
elif re.match("p\d*", v):
pruning_level = v[1:]
if pruning_level == '': pruning_level = '0'
if out:
out['pruning'] = pruning_level
out['version'] = version
servers[host] = out
return servers
def filter_version(servers):
def is_recent(version):
try:
return util.versiontuple(version) >= util.versiontuple(PROTOCOL_VERSION)
except Exception as e:
return False
return {k: v for k, v in servers.items() if is_recent(v.get('version'))}
def filter_noonion(servers):
return {k: v for k, v in servers.items() if not k.endswith('.onion')}
def filter_protocol(hostmap, protocol='s'):
'''Filters the hostmap for those implementing protocol.
The result is a list in serialized form.'''
eligible = []
for host, portmap in hostmap.items():
port = portmap.get(protocol)
if port:
eligible.append(serialize_server(host, port, protocol))
return eligible
def pick_random_server(hostmap = None, protocol = 's', exclude_set = set()):
if hostmap is None:
hostmap = constants.net.DEFAULT_SERVERS
eligible = list(set(filter_protocol(hostmap, protocol)) - exclude_set)
return random.choice(eligible) if eligible else None
NetworkParameters = NamedTuple("NetworkParameters", [("host", str),
("port", str),
("protocol", str),
("proxy", Optional[dict]),
("auto_connect", bool)])
proxy_modes = ['socks4', 'socks5']
def serialize_proxy(p):
if not isinstance(p, dict):
return None
return ':'.join([p.get('mode'), p.get('host'), p.get('port'),
p.get('user', ''), p.get('password', '')])
def deserialize_proxy(s: str) -> Optional[dict]:
if not isinstance(s, str):
return None
if s.lower() == 'none':
return None
proxy = { "mode":"socks5", "host":"localhost" }
# FIXME raw IPv6 address fails here
args = s.split(':')
n = 0
if proxy_modes.count(args[n]) == 1:
proxy["mode"] = args[n]
n += 1
if len(args) > n:
proxy["host"] = args[n]
n += 1
if len(args) > n:
proxy["port"] = args[n]
n += 1
else:
proxy["port"] = "8080" if proxy["mode"] == "http" else "1080"
if len(args) > n:
proxy["user"] = args[n]
n += 1
if len(args) > n:
proxy["password"] = args[n]
return proxy
INSTANCE = None
class Network(PrintError):
"""The Network class manages a set of connections to remote electrum
servers, each connected socket is handled by an Interface() object.
Connections are initiated by a Connection() thread which stops once
the connection succeeds or fails.
Our external API:
- Member functions get_header(), get_interfaces(), get_local_height(),
get_parameters(), get_server_height(), get_status_value(),
is_connected(), set_parameters(), stop()
"""
verbosity_filter = 'n'
def __init__(self, config=None):
global INSTANCE
INSTANCE = self
if config is None:
config = {} # Do not use mutables as default values!
self.config = SimpleConfig(config) if isinstance(config, dict) else config
self.num_server = 10 if not self.config.get('oneserver') else 0
blockchain.blockchains = blockchain.read_blockchains(self.config) # note: needs self.blockchains_lock
self.print_error("blockchains", list(blockchain.blockchains.keys()))
self.blockchain_index = config.get('blockchain_index', 0)
if self.blockchain_index not in blockchain.blockchains.keys():
self.blockchain_index = 0
# Server for addresses and transactions
self.default_server = self.config.get('server', None)
# Sanitize default server
if self.default_server:
try:
deserialize_server(self.default_server)
except:
self.print_error('Warning: failed to parse server-string; falling back to random.')
self.default_server = None
if not self.default_server:
self.default_server = pick_random_server()
# locks: if you need to take multiple ones, acquire them in the order they are defined here!
self.bhi_lock = asyncio.Lock()
self.interface_lock = threading.RLock() # <- re-entrant
self.callback_lock = threading.Lock()
self.recent_servers_lock = threading.RLock() # <- re-entrant
self.blockchains_lock = threading.Lock()
self.irc_servers = {} # returned by interface (list from irc)
self.recent_servers = self.read_recent_servers() # note: needs self.recent_servers_lock
self.banner = ''
self.donation_address = ''
self.relay_fee = None
# callbacks set by the GUI
self.callbacks = defaultdict(list) # note: needs self.callback_lock
dir_path = os.path.join(self.config.path, 'certs')
util.make_dir(dir_path)
# retry times
self.server_retry_time = time.time()
self.nodes_retry_time = time.time()
# kick off the network. interface is the main server we are currently
# communicating with. interfaces is the set of servers we are connecting
# to or have an ongoing connection with
self.interface = None # note: needs self.interface_lock
self.interfaces = {} # note: needs self.interface_lock
self.auto_connect = self.config.get('auto_connect', True)
self.connecting = set()
self.requested_chunks = set()
self.socket_queue = queue.Queue()
self.start_network(deserialize_server(self.default_server)[2],
deserialize_proxy(self.config.get('proxy')))
self.asyncio_loop = asyncio.get_event_loop()
@staticmethod
def get_instance():
return INSTANCE
def with_interface_lock(func):
def func_wrapper(self, *args, **kwargs):
with self.interface_lock:
return func(self, *args, **kwargs)
return func_wrapper
def with_recent_servers_lock(func):
def func_wrapper(self, *args, **kwargs):
with self.recent_servers_lock:
return func(self, *args, **kwargs)
return func_wrapper
def register_callback(self, callback, events):
with self.callback_lock:
for event in events:
self.callbacks[event].append(callback)
def unregister_callback(self, callback):
with self.callback_lock:
for callbacks in self.callbacks.values():
if callback in callbacks:
callbacks.remove(callback)
def trigger_callback(self, event, *args):
with self.callback_lock:
callbacks = self.callbacks[event][:]
for callback in callbacks:
if asyncio.iscoroutinefunction(callback):
# FIXME: if callback throws, we will lose the traceback
asyncio.run_coroutine_threadsafe(callback(event, *args), self.asyncio_loop)
else:
callback(event, *args)
def read_recent_servers(self):
if not self.config.path:
return []
path = os.path.join(self.config.path, "recent_servers")
try:
with open(path, "r", encoding='utf-8') as f:
data = f.read()
return json.loads(data)
except:
return []
@with_recent_servers_lock
def save_recent_servers(self):
if not self.config.path:
return
path = os.path.join(self.config.path, "recent_servers")
s = json.dumps(self.recent_servers, indent=4, sort_keys=True)
try:
with open(path, "w", encoding='utf-8') as f:
f.write(s)
except:
pass
@with_interface_lock
def get_server_height(self):
return self.interface.tip if self.interface else 0
def server_is_lagging(self):
sh = self.get_server_height()
if not sh:
self.print_error('no height for main interface')
return True
lh = self.get_local_height()
result = (lh - sh) > 1
if result:
self.print_error('%s is lagging (%d vs %d)' % (self.default_server, sh, lh))
return result
def set_status(self, status):
self.connection_status = status
self.notify('status')
def is_connected(self):
return self.interface is not None and self.interface.ready.done()
def is_connecting(self):
return self.connection_status == 'connecting'
async def request_server_info(self, interface):
await interface.ready
session = interface.session
async def get_banner():
self.banner = await session.send_request('server.banner')
self.notify('banner')
async def get_donation_address():
self.donation_address = await session.send_request('server.donation_address')
async def get_server_peers():
self.irc_servers = parse_servers(await session.send_request('server.peers.subscribe'))
self.notify('servers')
async def get_relay_fee():
relayfee = await session.send_request('blockchain.relayfee')
if relayfee is None:
self.relay_fee = None
else:
relayfee = int(relayfee * COIN)
self.relay_fee = max(0, relayfee)
async with TaskGroup() as group:
await group.spawn(get_banner)
await group.spawn(get_donation_address)
await group.spawn(get_server_peers)
await group.spawn(get_relay_fee)
await group.spawn(self.request_fee_estimates(interface))
async def request_fee_estimates(self, interface):
session = interface.session
from .simple_config import FEE_ETA_TARGETS
self.config.requested_fee_estimates()
async with TaskGroup() as group:
histogram_task = await group.spawn(session.send_request('mempool.get_fee_histogram'))
fee_tasks = []
for i in FEE_ETA_TARGETS:
fee_tasks.append((i, await group.spawn(session.send_request('blockchain.estimatefee', [i]))))
self.config.mempool_fees = histogram = histogram_task.result()
self.print_error('fee_histogram', histogram)
self.notify('fee_histogram')
for i, task in fee_tasks:
fee = int(task.result() * COIN)
self.config.update_fee_estimates(i, fee)
self.print_error("fee_estimates[%d]" % i, fee)
self.notify('fee')
def get_status_value(self, key):
if key == 'status':
value = self.connection_status
elif key == 'banner':
value = self.banner
elif key == 'fee':
value = self.config.fee_estimates
elif key == 'fee_histogram':
value = self.config.mempool_fees
elif key == 'updated':
value = (self.get_local_height(), self.get_server_height())
elif key == 'servers':
value = self.get_servers()
elif key == 'interfaces':
value = self.get_interfaces()
return value
def notify(self, key):
if key in ['status', 'updated']:
self.trigger_callback(key)
else:
self.trigger_callback(key, self.get_status_value(key))
def get_parameters(self) -> NetworkParameters:
host, port, protocol = deserialize_server(self.default_server)
return NetworkParameters(host, port, protocol, self.proxy, self.auto_connect)
def get_donation_address(self):
if self.is_connected():
return self.donation_address
@with_interface_lock
def get_interfaces(self):
'''The interfaces that are in connected state'''
return list(self.interfaces.keys())
@with_recent_servers_lock
def get_servers(self):
out = constants.net.DEFAULT_SERVERS
if self.irc_servers:
out.update(filter_version(self.irc_servers.copy()))
else:
for s in self.recent_servers:
try:
host, port, protocol = deserialize_server(s)
except:
continue
if host not in out:
out[host] = {protocol: port}
if self.config.get('noonion'):
out = filter_noonion(out)
return out
@with_interface_lock
def start_interface(self, server):
if (not server in self.interfaces and not server in self.connecting):
if server == self.default_server:
self.print_error("connecting to %s as new interface" % server)
self.set_status('connecting')
self.connecting.add(server)
self.socket_queue.put(server)
def start_random_interface(self):
with self.interface_lock:
exclude_set = self.disconnected_servers.union(set(self.interfaces))
server = pick_random_server(self.get_servers(), self.protocol, exclude_set)
if server:
self.start_interface(server)
return server
def set_proxy(self, proxy: Optional[dict]):
self.proxy = proxy
# Store these somewhere so we can un-monkey-patch
if not hasattr(socket, "_getaddrinfo"):
socket._getaddrinfo = socket.getaddrinfo
if proxy:
self.print_error('setting proxy', proxy)
# prevent dns leaks, see http://stackoverflow.com/questions/13184205/dns-over-proxy
socket.getaddrinfo = lambda *args: [(socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))]
else:
if sys.platform == 'win32':
# On Windows, socket.getaddrinfo takes a mutex, and might hold it for up to 10 seconds
# when dns-resolving. To speed it up drastically, we resolve dns ourselves, outside that lock.
# see #4421
socket.getaddrinfo = self._fast_getaddrinfo
else:
socket.getaddrinfo = socket._getaddrinfo
self.trigger_callback('proxy_set', self.proxy)
@staticmethod
def _fast_getaddrinfo(host, *args, **kwargs):
def needs_dns_resolving(host2):
try:
ipaddress.ip_address(host2)
return False # already valid IP
except ValueError:
pass # not an IP
if str(host) in ('localhost', 'localhost.',):
return False
return True
try:
if needs_dns_resolving(host):
answers = dns.resolver.query(host)
addr = str(answers[0])
else:
addr = host
except dns.exception.DNSException:
# dns failed for some reason, e.g. dns.resolver.NXDOMAIN
# this is normal. Simply report back failure:
raise socket.gaierror(11001, 'getaddrinfo failed')
except BaseException as e:
# Possibly internal error in dnspython :( see #4483
# Fall back to original socket.getaddrinfo to resolve dns.
print_error('dnspython failed to resolve dns with error:', e)
addr = host
return socket._getaddrinfo(addr, *args, **kwargs)
@with_interface_lock
def start_network(self, protocol: str, proxy: Optional[dict]):
assert not self.interface and not self.interfaces
assert not self.connecting and self.socket_queue.empty()
self.print_error('starting network')
self.disconnected_servers = set([]) # note: needs self.interface_lock
self.protocol = protocol
self.set_proxy(proxy)
self.start_interface(self.default_server)
@with_interface_lock
def stop_network(self):
self.print_error("stopping network")
for interface in list(self.interfaces.values()):
self.close_interface(interface)
if self.interface:
self.close_interface(self.interface)
assert self.interface is None
assert not self.interfaces
self.connecting.clear()
# Get a new queue - no old pending connections thanks!
self.socket_queue = queue.Queue()
def set_parameters(self, net_params: NetworkParameters):
proxy = net_params.proxy
proxy_str = serialize_proxy(proxy)
host, port, protocol = net_params.host, net_params.port, net_params.protocol
server_str = serialize_server(host, port, protocol)
# sanitize parameters
try:
deserialize_server(serialize_server(host, port, protocol))
if proxy:
proxy_modes.index(proxy["mode"]) + 1
int(proxy['port'])
except:
return
self.config.set_key('auto_connect', net_params.auto_connect, False)
self.config.set_key("proxy", proxy_str, False)
self.config.set_key("server", server_str, True)
# abort if changes were not allowed by config
if self.config.get('server') != server_str or self.config.get('proxy') != proxy_str:
return
self.auto_connect = net_params.auto_connect
if self.proxy != proxy or self.protocol != protocol:
# Restart the network defaulting to the given server
with self.interface_lock:
self.stop_network()
self.default_server = server_str
self.start_network(protocol, proxy)
elif self.default_server != server_str:
self.switch_to_interface(server_str)
else:
self.switch_lagging_interface()
self.notify('updated')
def switch_to_random_interface(self):
'''Switch to a random connected server other than the current one'''
servers = self.get_interfaces() # Those in connected state
if self.default_server in servers:
servers.remove(self.default_server)
if servers:
self.switch_to_interface(random.choice(servers))
@with_interface_lock
def switch_lagging_interface(self):
'''If auto_connect and lagging, switch interface'''
if self.server_is_lagging() and self.auto_connect:
# switch to one that has the correct header (not height)
header = self.blockchain().read_header(self.get_local_height())
def filt(x):
a = x[1].tip_header
b = header
assert type(a) is type(b)
return a == b
filtered = list(map(lambda x: x[0], filter(filt, self.interfaces.items())))
if filtered:
choice = random.choice(filtered)
self.switch_to_interface(choice)
@with_interface_lock
def switch_to_interface(self, server):
'''Switch to server as our interface. If no connection exists nor
being opened, start a thread to connect. The actual switch will
happen on receipt of the connection notification. Do nothing
if server already is our interface.'''
self.default_server = server
if server not in self.interfaces:
self.interface = None
self.start_interface(server)
return
i = self.interfaces[server]
if self.interface != i:
self.print_error("switching to", server)
if self.interface is not None:
# Stop any current interface in order to terminate subscriptions,
# and to cancel tasks in interface.group.
# However, for headers sub, give preference to this interface
# over unknown ones, i.e. start it again right away.
old_server = self.interface.server
self.close_interface(self.interface)
if len(self.interfaces) <= self.num_server:
self.start_interface(old_server)
self.interface = i
asyncio.get_event_loop().create_task(
i.group.spawn(self.request_server_info(i)))
self.trigger_callback('default_server_changed')
self.set_status('connected')
self.notify('updated')
self.notify('interfaces')
@with_interface_lock
def close_interface(self, interface):
if interface:
if interface.server in self.interfaces:
self.interfaces.pop(interface.server)
if interface.server == self.default_server:
self.interface = None
interface.close()
@with_recent_servers_lock
def add_recent_server(self, server):
# list is ordered
if server in self.recent_servers:
self.recent_servers.remove(server)
self.recent_servers.insert(0, server)
self.recent_servers = self.recent_servers[0:20]
self.save_recent_servers()
@with_interface_lock
def connection_down(self, server):
'''A connection to server either went down, or was never made.
We distinguish by whether it is in self.interfaces.'''
self.disconnected_servers.add(server)
if server == self.default_server:
self.set_status('disconnected')
if server in self.interfaces:
self.close_interface(self.interfaces[server])
self.notify('interfaces')
@aiosafe
async def new_interface(self, server):
interface = Interface(self, server, self.config.path, self.proxy)
timeout = 10 if not self.proxy else 20
try:
await asyncio.wait_for(interface.ready, timeout)
except BaseException as e:
#import traceback
#traceback.print_exc()
self.print_error(interface.server, "couldn't launch because", str(e), str(type(e)))
self.connection_down(interface.server)
return
finally:
try: self.connecting.remove(server)
except KeyError: pass
with self.interface_lock:
self.interfaces[server] = interface
if server == self.default_server:
self.switch_to_interface(server)
self.add_recent_server(server)
self.notify('interfaces')
def init_headers_file(self):
b = blockchain.blockchains[0]
filename = b.path()
length = 80 * len(constants.net.CHECKPOINTS) * 2016
if not os.path.exists(filename) or os.path.getsize(filename) < length:
with open(filename, 'wb') as f:
if length>0:
f.seek(length-1)
f.write(b'\x00')
with b.lock:
b.update_size()
async def get_merkle_for_transaction(self, tx_hash, tx_height):
return await self.interface.session.send_request('blockchain.transaction.get_merkle', [tx_hash, tx_height])
def broadcast_transaction_from_non_network_thread(self, tx, timeout=10):
# note: calling this from the network thread will deadlock it
fut = asyncio.run_coroutine_threadsafe(self.broadcast_transaction(tx, timeout=timeout), self.asyncio_loop)
return fut.result()
async def broadcast_transaction(self, tx, timeout=10):
try:
out = await self.interface.session.send_request('blockchain.transaction.broadcast', [str(tx)], timeout=timeout)
except asyncio.TimeoutError as e:
return False, "error: operation timed out"
except Exception as e:
return False, "error: " + str(e)
if out != tx.txid():
return False, "error: " + out
return True, out
async def request_chunk(self, height, tip, session=None, can_return_early=False):
if session is None: session = self.interface.session
index = height // 2016
if can_return_early and index in self.requested_chunks:
return
size = 2016
if tip is not None:
size = min(size, tip - index * 2016)
size = max(size, 0)
try:
self.requested_chunks.add(index)
res = await session.send_request('blockchain.block.headers', [index * 2016, size])
finally:
try: self.requested_chunks.remove(index)
except KeyError: pass
conn = self.blockchain().connect_chunk(index, res['hex'])
if not conn:
return conn, 0
return conn, res['count']
@with_interface_lock
def blockchain(self):
if self.interface and self.interface.blockchain is not None:
self.blockchain_index = self.interface.blockchain.forkpoint
return blockchain.blockchains[self.blockchain_index]
@with_interface_lock
def get_blockchains(self):
out = {}
with self.blockchains_lock:
blockchain_items = list(blockchain.blockchains.items())
for k, b in blockchain_items:
r = list(filter(lambda i: i.blockchain==b, list(self.interfaces.values())))
if r:
out[k] = r
return out
def follow_chain(self, index):
bc = blockchain.blockchains.get(index)
if bc:
self.blockchain_index = index
self.config.set_key('blockchain_index', index)
with self.interface_lock:
interfaces = list(self.interfaces.values())
for i in interfaces:
if i.blockchain == bc:
self.switch_to_interface(i.server)
break
else:
raise Exception('blockchain not found', index)
with self.interface_lock:
if self.interface:
net_params = self.get_parameters()
host, port, protocol = deserialize_server(self.interface.server)
net_params = net_params._replace(host=host, port=port, protocol=protocol)
self.set_parameters(net_params)
def get_local_height(self):
return self.blockchain().height()
def export_checkpoints(self, path):
# run manually from the console to generate checkpoints
cp = self.blockchain().get_checkpoints()
with open(path, 'w', encoding='utf-8') as f:
f.write(json.dumps(cp, indent=4))
def start(self, fx=None):
self.main_taskgroup = TaskGroup()
async def main():
self.init_headers_file()
async with self.main_taskgroup as group:
await group.spawn(self.maintain_sessions())
if fx: await group.spawn(fx)
self._wrapper_thread = threading.Thread(target=self.asyncio_loop.run_until_complete, args=(main(),))
self._wrapper_thread.start()
def stop(self):
asyncio.run_coroutine_threadsafe(self.main_taskgroup.cancel_remaining(), self.asyncio_loop)
def join(self):
self._wrapper_thread.join(1)
async def maintain_sessions(self):
while True:
while self.socket_queue.qsize() > 0:
server = self.socket_queue.get()
asyncio.get_event_loop().create_task(self.new_interface(server))
remove = []
for k, i in self.interfaces.items():
if i.fut.done() and not i.exception:
assert False, "interface future should not finish without exception"
if i.exception:
if not i.fut.done():
try: i.fut.cancel()
except Exception as e: self.print_error('exception while cancelling fut', e)
try:
raise i.exception
except BaseException as e:
self.print_error(i.server, "errored because:", str(e), str(type(e)))
remove.append(k)
for k in remove:
self.connection_down(k)
# nodes
now = time.time()
for i in range(self.num_server - len(self.interfaces) - len(self.connecting)):
self.start_random_interface()
if now - self.nodes_retry_time > NODES_RETRY_INTERVAL:
self.print_error('network: retrying connections')
self.disconnected_servers = set([])
self.nodes_retry_time = now
# main interface
if not self.is_connected():
if self.auto_connect:
if not self.is_connecting():
self.switch_to_random_interface()
else:
if self.default_server in self.disconnected_servers:
if now - self.server_retry_time > SERVER_RETRY_INTERVAL:
self.disconnected_servers.remove(self.default_server)
self.server_retry_time = now
else:
self.switch_to_interface(self.default_server)
else:
if self.config.is_fee_estimates_update_required():
await self.interface.group.spawn(self.request_fee_estimates(self.interface))
await asyncio.sleep(0.1)
|
infoblitzd.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import base64
import json
import logging
import logging.config
import logging.config
import os
import re
import socket
import socketserver
import subprocess
import sys
import threading
import time
import urllib.parse
from datetime import timedelta
from http import HTTPStatus
from http.server import BaseHTTPRequestHandler
from optparse import OptionParser
try: # make sure that (unsupported) Python2 can fail gracefully
import configparser
from urllib.request import urlopen
from urllib.error import HTTPError
except ImportError:
pass
if sys.version_info < (3, 5, 0):
print("Python2 not supported! Please run with Python3.5+")
sys.exit(1)
CTYPE_HTML = "text/html"
CTYPE_JSON = "application/json"
BOARD_NAME = "RaspiBlitz"
BOARD_VERSION = "0.93"
NETWORK_FILE = "/home/admin/.network"
BITCOIN_HOME = "/home/bitcoin"
IF_NAME = "eth0"
TIMEOUT = 10
CRYPTO_CURRENCIES = {
"bitcoin": {
"title": "Bitcoin",
"cli": "bitcoin-cli",
"daemon": "bitcoind",
"testnet_dir": "testnet3",
"mainnet_port": 8333,
"testnet_port": 18333
},
"litecoin": {
"title": "Litecoin",
"cli": "litecoin-cli",
"daemon": "litecoind",
"testnet_dir": "testnet3", # ?!
"mainnet_port": 9333,
"testnet_port": 19333
}
}
logger = logging.getLogger()
def setup_logging(default_path='infoblitz_logging.json'):
"""Setup logging configuration"""
path = default_path
if os.path.exists(path):
with open(path, 'rt') as f:
config = json.load(f)
logging.config.dictConfig(config)
else: # is infoblitz_logging.json does not exist use the following default log setup
default_config_as_json = """
{
"version": 1,
"disable_existing_loggers": false,
"formatters": {
"simple": {
"format": "%(asctime)s (%(threadName)-10s) %(name)s - %(levelname)s - %(message)s"
},
"extended": {
"format": "%(asctime)s (%(threadName)-10s) %(name)s - %(levelname)s - %(module)s:%(lineno)d - %(message)s"
}
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"level": "ERROR",
"formatter": "simple",
"stream": "ext://sys.stdout"
},
"file_handler": {
"class": "logging.handlers.RotatingFileHandler",
"level": "DEBUG",
"formatter": "extended",
"filename": "infoblitz.log",
"maxBytes": 10485760,
"backupCount": 2,
"encoding": "utf8"
}
},
"loggers": {
"infoblitz": {
"level": "INFO",
"handlers": ["console", "file_handler"],
"propagate": "no"
}
},
"root": {
"level": "DEBUG",
"handlers": ["console", "file_handler"]
}
}
"""
config = json.loads(default_config_as_json)
logging.config.dictConfig(config)
def sigint_handler(signum, frame):
print('CTRL+C pressed - exiting!')
sys.exit(0)
def _red(string):
return "\033[91m{}\033[00m".format(string)
def _green(string):
return "\033[92m{}\033[00m".format(string)
def _yellow(string):
return "\033[93m{}\033[00m".format(string)
def _gray(string):
return "\033[97m{}\033[00m".format(string)
def _cyan(string):
return "\033[96m{}\033[00m".format(string)
def _purple(string):
return "\033[95m{}\033[00m".format(string)
def clear():
# check and make call for specific operating system
if os.name == 'posix':
_ = os.system('clear') # Linux and Mac OS
def get_ipv4_addresses(ifname):
"""get_ipv4_addresses("eth0")"""
ip_addresses = []
_res = subprocess.check_output(["ip", "-4", "addr", "show", "dev", "{}".format(ifname), "scope", "global", "up"])
for line in _res.split(b"\n"):
match = re.match(b".+inet (.+)/.+", line)
if match:
ip_addresses.append(match.groups()[0].decode('utf-8'))
return ip_addresses
def get_ipv6_addresses(ifname):
"""get_ipv6_addresses("eth0")"""
ip_addresses = []
_res = subprocess.check_output(["ip", "-6", "addr", "show", "dev", "{}".format(ifname), "scope", "global", "up"])
for line in _res.split(b"\n"):
match = re.match(b".+inet6 (.+)/.+", line)
if match and b"mngtmpaddr" not in line:
ip_addresses.append(match.groups()[0].decode('utf-8'))
return ip_addresses
def port_check(address="127.0.0.1", port=8080, timeout=1.0):
if not isinstance(port, int):
return False
if not 0 < port < 65535:
return False
s = socket.socket()
s.settimeout(timeout)
is_open = False
try:
s.connect((address, port))
is_open = True
except Exception as err:
logger.warning("Something's wrong with {}:{}. Exception is {}".format(address, port, err))
finally:
s.close()
return is_open
def run_user(cmd, shell=True, timeout=None):
if shell: # shell is potentially considered a security risk (command injection when taking user input)
if not isinstance(cmd, str):
raise ValueError("cmd to execute must be passed in a single string when shell is True")
if cmd.split(" ")[0] == "sudo":
timeout = None
else:
if not isinstance(cmd, list):
raise ValueError("cmd to execute must be passed in as list of strings when shell is False")
if cmd[0] == "sudo":
timeout = None
try:
# subprocess.run requires Python3.5+
p = subprocess.run(cmd,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True, shell=shell, timeout=timeout)
if p.returncode: # non-zero
result = p.stderr
success = False
timed_out = False
else:
result = p.stdout
success = True
timed_out = False
except subprocess.TimeoutExpired:
result = None
success = False
timed_out = True
return result, success, timed_out
class QuietBaseHTTPRequestHandler(BaseHTTPRequestHandler):
"""Quiet http request handler
Subclasses SimpleHTTPRequestHandler in order to overwrite the log_message
method, letting us reduce output generated by the handler. Only standard
messages are overwritten, so errors will still be displayed.
"""
def __init__(self, request, client_address, server, board=None, board_lock=None):
super().__init__(request, client_address, server)
self.board = board
self.board_lock = board_lock
def do_GET(self):
parts = urllib.parse.urlsplit(self.path)
if parts.path.endswith('/favicon.ico'):
ctype = 'image/x-icon'
content = bytes(base64.b64decode(
"AAABAAEAEBAAAAEAIABoBAAAFgAAACgAAAAQAAAAIAAAAAEAIAAAAAAA"
"AAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAoJiIKKCYiWgAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAoJiIgKCYiuygmIhgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAoJiJDKCYi7SgmIlIAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAoJiJz"
"KCYi/SgmIqAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAACgmIgooJiKmKCYi/ygmIuAoJiIOAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACgmIh8oJiLPKCYi/ygm"
"Iv4oJiI/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAACgmIkEoJiLrKCYi/ygmIv8oJiKMAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAACgmInAoJiL8KCYi/ygmIv8oJiL/"
"KCYiySgmIpwoJiJzKCYiKQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACgm"
"IhYoJiJyKCYinCgmIsIoJiL8KCYi/ygmIv8oJiL/KCYinygmIgkAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAoJiJTKCYi/ygm"
"Iv8oJiL5KCYiaAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAoJiIeKCYi7ygmIv8oJiLjKCYiNwAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAoJiIDKCYixCgmIv8oJiK+"
"KCYiFQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAKCYigigmIv8oJiKJKCYiAwAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKCYiPigmIvAoJiJSAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"KCYiEigmIrooJiInAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAACgmIlooJiIMAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA//8AAP/3"
"AAD/7wAA/88AAP8fAAD+PwAA/D8AAPgfAAD4DwAA/j8AAPx/AAD4/wAA"
"8f8AAPf/AADv/wAA//8AAA=="
))
elif not parts.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(HTTPStatus.MOVED_PERMANENTLY)
new_parts = (parts[0], parts[1], parts[2] + '/',
parts[3], parts[4])
new_url = urllib.parse.urlunsplit(new_parts)
self.send_header("Location", new_url)
self.end_headers()
return None
elif parts.path.endswith('/json/'):
ctype = CTYPE_JSON
with self.board_lock:
# dict_content = {"hello": "world",
# "version": self.board.version.val,
# "lnd_external": self.board.lnd_external.val}
json_content = json.loads(json.dumps(self.board.all_metrics()))
content = bytes(json.dumps(json_content), "UTF-8")
else:
ctype = CTYPE_HTML
content = bytes("<html><head><title>RaspiBlitz Info Dashboard</title></head>", "UTF-8")
content += bytes("<body><h1>RaspiBlitz Info Dashboard</h1>", "UTF-8")
content += bytes("<p>The Dashboard Version is: v{}</p>".format(self.board.version.val), "UTF-8")
content += bytes("<p>The API Endpoint (JSON) is located here: <a href=\"/json/\">/json/</a></p>", "UTF-8")
content += bytes("</body></html>", "UTF-8")
self.send_response(200)
self.send_header("Content-type", ctype)
self.send_header("Content-Length", len(content))
self.end_headers()
self.wfile.write(content)
def log_message(self, *args):
"""Overwrite so messages are not logged to STDOUT"""
pass
def log_request(self, code='-', size='-'):
"""Log an accepted request.
This is called by send_response().
"""
if isinstance(code, HTTPStatus):
code = code.value
logger.debug("{} - - [{}] \"{}\" {} {}".format(self.address_string(), self.log_date_time_string(),
self.requestline, str(code), str(size)))
class ThreadedHTTPServer(object):
"""Runs BaseHTTPServer in a thread
Lets you start and stop an instance of SimpleHTTPServer.
"""
def __init__(self, host, port, board=None, board_lock=None, name=None):
"""Prepare thread and socket server
Creates the socket server that will use the HTTP request handler. Also
prepares the thread to run the serve_forever method of the socket
server as a daemon once it is started
"""
request_handler = QuietBaseHTTPRequestHandler
request_handler.board = board
request_handler.board_lock = board_lock
socketserver.TCPServer.allow_reuse_address = True
self.server = socketserver.TCPServer((host, port), request_handler)
self.server_thread = threading.Thread(name=name, target=self.server.serve_forever)
self.server_thread.daemon = True
def __enter__(self):
self.start()
return self
def __exit__(self, type, value, traceback):
self.stop()
def start(self):
"""Start the HTTP server
Starts the serve_forever method of Socket running the request handler
as a daemon thread
"""
self.server_thread.start()
def stop(self):
"""Stop the HTTP server
Stops the server and cleans up the port assigned to the socket
"""
self.server.shutdown()
self.server.server_close()
# Benefit of using class instead of function: Can use clean signature instead of kwargs..!
class DashboardPrinter(threading.Thread):
def __init__(self, group=None, target=None, name="DB_Printer",
board=None, board_lock=None, interval=None,
daemon=True, args=(), kwargs=None, ):
super().__init__(group, target, name, daemon=daemon, args=args, kwargs=kwargs)
self.board = board
self.board_lock = board_lock
self.interval = interval
def run(self):
while True:
start = time.time()
with self.board_lock:
end = time.time()
logger.info("Getting print lock took: {:.3f} seconds".format(end - start))
clear()
self.board.display()
time.sleep(self.interval)
class DashboardUpdater(threading.Thread):
def __init__(self, group=None, target=None, name="DB_Updater",
board=None, board_lock=None, interval=None,
daemon=True, args=(), kwargs=None, ):
super().__init__(group, target, name, daemon=daemon, args=args, kwargs=kwargs)
self.board = board
self.board_lock = board_lock
self.interval = interval
def run(self):
while True:
logger.debug("Updating Dashboard")
total_start = time.time()
start = time.time()
with self.board_lock:
end = time.time()
logger.debug("Getting update1 lock took: {:.3f} seconds".format(end - start))
self.board.update_load()
self.board.update_uptime()
self.board.update_cpu_temp()
self.board.update_memory()
self.board.update_storage()
self.board.update_ip_network_data()
time.sleep(0.05)
start = time.time()
with self.board_lock:
end = time.time()
logger.debug("Getting update2 lock took: {:.3f} seconds".format(end - start))
self.board.update_network()
self.board.update_bitcoin_dir()
self.board.read_bitcoin_config()
self.board.update_chain()
time.sleep(0.05)
start = time.time()
with self.board_lock:
end = time.time()
logger.debug("Getting update3 lock took: {:.3f} seconds".format(end - start))
self.board.update_bitcoin_binaries()
self.board.check_bitcoind_is_running()
self.board.update_bitcoin_daemon_version()
self.board.update_bitcoin_data()
time.sleep(0.05)
start = time.time()
with self.board_lock:
end = time.time()
logger.debug("Getting update4 lock took: {:.3f} seconds".format(end - start))
self.board.update_lnd_dirs()
self.board.read_lnd_config()
self.board.check_lnd_is_running()
self.board.update_lnd_wallet_is_locked()
self.board.update_lnd_alias()
self.board.update_lnd_data()
time.sleep(0.05)
start = time.time()
with self.board_lock:
end = time.time()
logger.debug("Getting update5 lock took: {:.3f} seconds".format(end - start))
self.board.update_public_ip()
self.board.update_bitcoin_public_port()
self.board.check_public_ip_lnd_port()
self.board.check_public_ip_bitcoin_port()
time.sleep(0.05)
total_end = time.time()
logger.info("Dashboard Value Update took: {:.3f} seconds".format(total_end - total_start))
time.sleep(self.interval)
class Metric(object):
STYLES = ["default", "red", "green", "yellow", "gray", "cyan"]
def __init__(self, val=None, txt=None, prefix=None, suffix=None, style="default", allow_empty=False):
self.val = val # "raw" value of Metric
self._txt = txt # text of "raw" value intended for printing to console (e.g. Memory in MiB instead of Bytes)
self.prefix = prefix
self.suffix = suffix
if style not in self.STYLES:
raise ValueError("unknown style!")
self.style = style
self.allow_empty = allow_empty # when this is False (default) "prefix + n/a + suffix" will be printed
@property
def txt(self):
if self._txt:
return self._txt
elif self._txt == "":
return ""
else:
if self.val:
return "{}".format(self.val)
else:
return None
@txt.setter
def txt(self, value):
self._txt = value
def __repr__(self):
if self.val:
return "<{0}: {1}>".format(self.__class__.__name__, self.val)
return "<{0}: n/a>".format(self.__class__.__name__)
def __str__(self):
return self.apply_style(string=self.to_txt(), style=self.style)
def apply_style(self, string, style=None):
if not style:
style = "default"
if "n/a" in string:
return _purple(string)
elif string:
if style == "red":
return _red(string)
elif style == "green":
return _green(string)
elif style == "yellow":
return _yellow(string)
elif style == "gray":
return _gray(string)
elif style == "cyan":
return _cyan(string)
else:
return string
else:
if self.allow_empty:
return ""
else:
return _purple(string)
def to_dct(self):
dct = dict()
# copy dict except for _txt and allow_empty
for k, v in self.__dict__.items():
if k in ["_txt", "allow_empty"]:
continue
dct.update({k: v})
# add txt representation
dct.update({"txt": self.to_txt()})
return dct
def to_txt(self):
if self.prefix is None:
prefix = ""
else:
prefix = self.prefix
if self.suffix is None:
suffix = ""
else:
suffix = self.suffix
if self.txt:
return "{0}{1}{2}".format(prefix, self.txt, suffix)
else:
if self.allow_empty:
return ""
else:
return "{0}n/a{1}".format(prefix, suffix)
class Dashboard(object):
def __init__(self, currency, interface=IF_NAME, timeout=TIMEOUT):
self.currency = CRYPTO_CURRENCIES[currency]
# Attributes that are used internally but not displayed directly
#
self.interface = interface
self.timeout = timeout
self.ipv4_addresses = list()
self.lpv6_addresses = list()
self.bitcoin_dir = None
self.lnd_dir = None
self.lnd_macaroon_dir = None
self.bitcoin_config = None
self.lnd_config = None
self.bitcoin_daemon = None
self.bitcoin_cli = None
self.bitcoin_local_adresses = list()
self.lnd_is_running = False
self.lnd_is_syned = False
self.lnd_wallet_is_locked = True
# Dashboard Metrics (all values that are displayed somewhere) - in use
#
self.name = Metric()
self.version = Metric()
# System data
self.load_one = Metric()
self.load_five = Metric()
self.load_fifteen = Metric()
self.cpu_temp = Metric(suffix="°C")
self.memory_total = Metric(suffix="M", style="green")
self.memory_avail = Metric(suffix="M", style="green")
# Storage
self.sd_total_abs = Metric(suffix="G", style="green")
self.sd_free_abs = Metric(suffix="G", style="green")
self.sd_free = Metric(suffix="%", style="green")
self.hdd_total_abs = Metric(suffix="G", style="green")
self.hdd_free_abs = Metric(suffix="G", style="green")
self.hdd_free = Metric(suffix="%", style="green")
# IP Network/Traffic Info
self.local_ip = Metric(style="green")
self.network_tx = Metric()
self.network_rx = Metric()
self.public_ip = Metric(style="green")
self.public_bitcoin_port = Metric(style="green")
self.public_bitcoin_port_status = Metric(allow_empty=True)
# Bitcoin / Chain Info
self.network = Metric(style="default")
self.chain = Metric("main", suffix="net", style="green")
self.bitcoin_cli_version = Metric(style="green")
self.bitcoin_version = Metric(style="green")
self.bitcoin_is_running = False
self.bitcoin_log_msgs = None
self.sync_behind = Metric()
self.sync_percentage = Metric(suffix="%", style="green")
self.sync_status = Metric()
# self.last_block = Metric()
self.block_height = Metric()
self.btc_line2 = Metric()
self.mempool = Metric()
# Tor (The Onion Router)
self.tor_active = Metric(allow_empty=True)
self.onion_addr = Metric()
self.lnd_alias = Metric(style="green")
self.lnd_version = Metric(style="green")
self.lnd_lncli_version = Metric(style="green")
self.lnd_base_msg = Metric(allow_empty=True)
self.lnd_channel_msg = Metric(allow_empty=True)
self.lnd_channel_balance = Metric()
self.lnd_channels_online = Metric()
self.lnd_channels_total = Metric()
self.lnd_external = Metric(style="yellow")
self.public_ip_lnd_port_status = Metric(allow_empty=True)
self.lnd_wallet_balance = Metric()
self.lnd_wallet_lock_status = Metric()
# Dashboard Metrics (all values that are displayed somewhere) - currently not in use
#
self.uptime = Metric()
self.bitcoin_ipv4_reachable = Metric()
self.bitcoin_ipv4_limited = Metric()
self.bitcoin_ipv6_reachable = Metric()
self.bitcoin_ipv6_limited = Metric()
self.bitcoin_onion_reachable = Metric()
self.bitcoin_onion_limited = Metric()
def __repr__(self):
return "<{0}: Version: {1}>".format(self.__class__.__name__, self.version)
def all_metrics(self):
"""Introspection: return list of all attributes that are Metric instances"""
return [{m: getattr(self, m).to_dct()} for m in [a for a in dir(self)] if isinstance(getattr(self, m), Metric)]
def update_load(self):
one, five, fifteen = os.getloadavg()
_cpu_count = os.cpu_count()
self.load_one.val = one
self.load_one.txt = "{:.2f}".format(self.load_one.val)
self.load_five.val = five
self.load_five.txt = "{:.2f}".format(self.load_five.val)
self.load_fifteen.val = fifteen
self.load_fifteen.txt = "{:.2f}".format(self.load_fifteen.val)
if float(self.load_one.val) < _cpu_count * 0.5:
self.load_one.style = "green"
elif float(self.load_one.val) < _cpu_count:
self.load_one.style = "yellow"
else:
self.load_one.style = "red"
if float(self.load_five.val) < _cpu_count * 0.5:
self.load_five.style = "green"
elif float(self.load_five.val) < _cpu_count:
self.load_five.style = "yellow"
else:
self.load_five.style = "red"
if float(self.load_fifteen.val) < _cpu_count * 0.5:
self.load_fifteen.style = "green"
elif float(self.load_fifteen.val) < _cpu_count:
self.load_fifteen.style = "yellow"
else:
self.load_fifteen.style = "red"
def update_uptime(self):
if not os.path.exists("/proc/uptime"):
return
with open("/proc/uptime", "r") as f:
_uptime_seconds = float(f.readline().split()[0])
self.uptime.val = int(timedelta(seconds=_uptime_seconds).total_seconds())
self.uptime.txt = "{}".format(self.uptime.val)
def update_cpu_temp(self):
if not os.path.exists("/sys/class/thermal/thermal_zone0/temp"):
return
with open("/sys/class/thermal/thermal_zone0/temp", "r") as f:
content = int(f.readline().split("\n")[0])
self.cpu_temp.val = content / 1000.0
self.cpu_temp.txt = "{:.0f}".format(self.cpu_temp.val)
if self.cpu_temp.val > 80.0:
self.cpu_temp.style = "red"
def update_memory(self):
if not os.path.exists("/proc/meminfo"):
return
with open("/proc/meminfo", "r") as f:
content = f.readlines()
_meminfo = dict((i.split()[0].rstrip(':'), int(i.split()[1])) for i in content)
self.memory_total.val = _meminfo['MemTotal'] # e.g. 949440
self.memory_total.txt = "{:.0f}".format(self.memory_total.val / 1024)
self.memory_avail.val = _meminfo['MemAvailable'] # e.g. 457424
self.memory_avail.txt = "{:.0f}".format(self.memory_avail.val / 1024)
if self.memory_avail.val < 100000:
self.memory_total.style = "yellow"
self.memory_avail.style = "yellow"
def update_storage(self):
"""use statvfs interface to get free/used disk space
statvfs.f_frsize * statvfs.f_blocks # Size of filesystem in bytes
statvfs.f_frsize * statvfs.f_bfree # Actual number of free bytes
statvfs.f_frsize * statvfs.f_bavail # Number of free bytes that ordinary users are allowed to use
"""
if not os.path.exists("/"):
return
statvfs_sd = os.statvfs('/')
_sd_total_abs = statvfs_sd.f_frsize * statvfs_sd.f_blocks
_sd_free_abs = statvfs_sd.f_frsize * statvfs_sd.f_bavail
_sd_free = _sd_free_abs / _sd_total_abs * 100
if not os.path.exists("/mnt/hdd"):
return
statvfs_hdd = os.statvfs("/mnt/hdd")
_hdd_total_abs = statvfs_hdd.f_frsize * statvfs_hdd.f_blocks
# _hdd_free_abs = statvfs_hdd.f_frsize * statvfs_hdd.f_bfree
_hdd_free_abs = statvfs_hdd.f_frsize * statvfs_hdd.f_bavail
_hdd_free = _hdd_free_abs / _hdd_total_abs * 100
self.sd_total_abs.val = _sd_total_abs / 1024.0 / 1024.0 / 1024.0
self.sd_total_abs.txt = "{:.0f}".format(self.sd_total_abs.val)
self.sd_free_abs.val = _sd_free_abs / 1024.0 / 1024.0 / 1024.0
self.sd_free_abs.txt = "{:.0f}".format(self.sd_free_abs.val)
self.sd_free.val = _sd_free
self.sd_free.txt = "{:.0f}".format(self.sd_free.val)
self.hdd_total_abs.val = _hdd_total_abs / 1024.0 / 1024.0 / 1024.0
self.hdd_total_abs.txt = "{:.0f}".format(self.hdd_total_abs.val)
self.hdd_free_abs.val = _hdd_free_abs / 1024.0 / 1024.0 / 1024.0
self.hdd_free_abs.txt = "{:.0f}".format(self.hdd_free_abs.val)
self.hdd_free.val = _hdd_free
self.hdd_free.txt = "{:.0f}".format(self.hdd_free.val)
if self.hdd_free.val < 20:
self.hdd_free.style = "yellow"
elif self.hdd_free.val < 10:
self.hdd_free.style = "red"
def update_ip_network_data(self):
self.ipv4_addresses = get_ipv4_addresses(self.interface)
self.ipv6_addresses = get_ipv6_addresses(self.interface)
self.local_ip.val = self.ipv4_addresses[0]
if not os.path.exists("/sys/class/net/{0}/statistics/rx_bytes".format(self.interface)):
return
with open("/sys/class/net/{0}/statistics/rx_bytes".format(self.interface), 'r') as f:
_rx_bytes = float(f.readline().split()[0])
if not os.path.exists("/sys/class/net/{0}/statistics/tx_bytes".format(self.interface)):
return
with open("/sys/class/net/{0}/statistics/tx_bytes".format(self.interface), 'r') as f:
_tx_bytes = float(f.readline().split()[0])
if _tx_bytes / 1024.0 / 1024.0 / 1024.0 / 1024.0 > 1:
_tx_suffix = "TiB"
_tx_bytes_val = _tx_bytes / 1024.0 / 1024.0 / 1024.0 / 1024.0
elif _tx_bytes / 1024.0 / 1024.0 / 1024.0 > 1:
_tx_suffix = "GiB"
_tx_bytes_val = _tx_bytes / 1024.0 / 1024.0 / 1024.0
elif _tx_bytes / 1024.0 / 1024.0 > 1:
_tx_suffix = "MiB"
_tx_bytes_val = _tx_bytes / 1024.0 / 1024.0
elif _tx_bytes / 1024.0 > 1:
_tx_suffix = "KiB"
_tx_bytes_val = _tx_bytes / 1024.0
else:
_tx_suffix = "Byte"
_tx_bytes_val = _tx_bytes
if _rx_bytes / 1024.0 / 1024.0 / 1024.0 / 1024.0 > 1:
_rx_suffix = "TiB"
_rx_bytes_val = _rx_bytes / 1024.0 / 1024.0 / 1024.0 / 1024.0
elif _rx_bytes / 1024.0 / 1024.0 / 1024.0 > 1:
_rx_suffix = "GiB"
_rx_bytes_val = _rx_bytes / 1024.0 / 1024.0 / 1024.0
elif _rx_bytes / 1024.0 / 1024.0 > 1:
_rx_suffix = "MiB"
_rx_bytes_val = _rx_bytes / 1024.0 / 1024.0
elif _rx_bytes / 1024.0 > 1:
_rx_suffix = "KiB"
_rx_bytes_val = _rx_bytes / 1024.0
else:
_rx_suffix = "Byte"
_rx_bytes_val = _rx_bytes
self.network_rx = Metric(_rx_bytes_val, txt="{:.1f}".format(_rx_bytes_val), suffix=_rx_suffix)
self.network_tx = Metric(_tx_bytes_val, txt="{:.1f}".format(_tx_bytes_val), suffix=_tx_suffix)
def update_network(self):
# load network (bitcoin, litecoin, ..?!)
with open(NETWORK_FILE) as f:
content = f.readline().split("\n")[0]
if content not in list(CRYPTO_CURRENCIES.keys()):
raise ValueError("unexpected value in {}: {}".format(NETWORK_FILE, content))
self.network.val = content
if not self.network.val == self.currency["title"].lower():
raise ValueError("Crypto Currency in {} does not match selection!".format(NETWORK_FILE))
def update_bitcoin_dir(self):
self.bitcoin_dir = "{0}/.{1}".format(BITCOIN_HOME, self.network.val)
def read_bitcoin_config(self):
_bitcoin_conf = "{0}/{1}.conf".format(self.bitcoin_dir, self.network.val)
if not os.path.exists(_bitcoin_conf):
logger.warning("{} config not found: {}".format(self.currency["title"], _bitcoin_conf))
return
# need to do a little "hack" here as ConfigParser expects sections which bitcoin.conf does not have
with open(_bitcoin_conf, 'r') as f:
_config_string = '[DEFAULT]\n' + f.read()
config = configparser.ConfigParser(strict=False)
config.read_string(_config_string)
self.bitcoin_config = config # access with self.bitcoin_config["DEFAULT"]...
def update_chain(self):
# get chain (mainnet or testnet)
try:
if self.bitcoin_config["DEFAULT"]["testnet"] == "1":
self.chain.val = "test"
except KeyError:
pass # this is expected - if testnet is not present then mainnet is active
except TypeError as err: # catch if None, expected index/key not present
logger.warning("Error: {}".format(err))
def update_bitcoin_binaries(self):
cmds = "which {}d".format(self.network.val)
_bitcoind, success, timed_out = run_user(cmds, timeout=self.timeout)
if success:
try:
self.bitcoin_daemon = _bitcoind.split("\n")[0]
except IndexError as err:
logger.warning("Error: {}".format(err))
else:
raise Exception("could not find network chain daemin tool: {}d".format(self.network.val))
cmds = "which {}-cli".format(self.network.val)
_bitcoin_cli, success, timed_out = run_user(cmds, timeout=self.timeout)
if success:
try:
self.bitcoin_cli = _bitcoin_cli.split("\n")[0]
except IndexError as err:
logger.warning("Error: {}".format(err))
else:
raise Exception("could not find network chain cli tool: {}-cli".format(self.network.val))
def check_bitcoind_is_running(self):
# check if bitcoind is running
cmds = "ps aux | grep -e \"{}.*-daemon\" | grep -v grep | wc -l".format(self.currency['daemon'])
_bitcoind_running, success, timed_out = run_user(cmds, timeout=self.timeout)
if success:
try:
if _bitcoind_running.split("\n")[0] == "0":
self.bitcoin_is_running = False
logger.warning("{} is not running".format(self.currency['daemon']))
return True
else:
self.bitcoin_is_running = True
return True
except IndexError as err:
logger.warning("Error: {}".format(err))
return False
def update_bitcoind_log(self):
# check bitcoind log
if self.chain.val == "test":
cmds = "sudo -u bitcoin tail -n 20 {}/{}/debug.log".format(self.bitcoin_dir, self.currency["testnet_dir"])
else:
cmds = "sudo -u bitcoin tail -n 20 {}/debug.log".format(self.bitcoin_dir)
_bitcoind_log, success, timed_out = run_user(cmds, timeout=self.timeout)
if success:
try:
self.bitcoin_log_msgs = [_bitcoind_log.split("\n")[-3], _bitcoind_log.split("\n")[-2]]
except IndexError as err:
logger.warning("Error: {}".format(err))
def update_bitcoin_daemon_version(self):
# get bitcoin version from daemon (bitcoind -version)
cmds = "{} -datadir={} -version".format(self.bitcoin_cli, self.bitcoin_dir)
_version_info, success, timed_out = run_user(cmds, timeout=self.timeout)
if success:
self.bitcoin_version.val = re.match("^.* v(.*$)", _version_info).groups()[0]
self.bitcoin_version.prefix = "v"
def update_bitcoin_data(self):
self.sync_status.val = None
self.sync_status.txt = None
self.sync_status.style = "default"
self.sync_percentage.val = None
self.sync_percentage.txt = None
self.sync_percentage.style = "green"
# block count/height
cmds = "{} -datadir={} getblockcount".format(self.bitcoin_cli, self.bitcoin_dir)
_block_count, success, timed_out = run_user(cmds, timeout=self.timeout)
if success:
# reset self.bitcoin_log_msgs - which might have been set by update_bitcoind_log()
self.bitcoin_log_msgs = None
try:
self.block_height.val = int(_block_count.split("\n")[0])
self.block_height.txt = "{}".format(self.block_height.val)
except IndexError as err:
logger.warning("Error: {}".format(err))
else: # unable to run getblockcount.. maybe bitcoind is processing a long running job (e.g. txindex) TODO
# try:
# last_line = _block_count.split("\n")[-2]
# except AttributeError:
# pass
self.update_bitcoind_log()
# get blockchain (sync) status/percentage
cmds = "{} -datadir={} getblockchaininfo".format(self.bitcoin_cli, self.bitcoin_dir)
_chain_info, success, timed_out = run_user(cmds, timeout=self.timeout)
if success:
try:
_block_verified = json.loads(_chain_info)["blocks"]
_block_diff = int(self.block_height.val) - int(_block_verified)
_progress = json.loads(_chain_info)["verificationprogress"]
self.sync_percentage.val = _progress
self.sync_percentage.txt = "{:.2f}".format(self.sync_percentage.val * 100)
if _block_diff == 0: # fully synced
self.sync_status.val = _block_diff
self.sync_status.txt = "OK"
self.sync_status.style = "green"
self.sync_behind = " "
elif _block_diff == 1: # fully synced
self.sync_status.val = _block_diff
self.sync_status.txt = "OK"
self.sync_status.style = "green"
self.sync_behind = "-1 block"
elif _block_diff <= 10:
self.sync_status.val = _block_diff
self.sync_status.txt = "catchup"
self.sync_status.style = "red"
self.sync_percentage.style = "red"
self.sync_behind = "-{} blocks".format(_block_diff)
else:
self.sync_status.val = _block_diff
self.sync_status.txt = "progress"
self.sync_status.style = "red"
self.sync_percentage.style = "red"
self.sync_behind = "-{} blocks".format(_block_diff)
except (KeyError, TypeError) as err: # catch if result is None or expected key not present
logger.warning("Error: {}".format(err))
else:
logger.debug("Error: getblockchaininfo")
# mempool info
cmds = "{} -datadir={} getmempoolinfo".format(self.bitcoin_cli, self.bitcoin_dir)
_mempool_info, success, timed_out = run_user(cmds, timeout=self.timeout)
if success:
try:
self.mempool.val = json.loads(_mempool_info)["size"]
except (KeyError, TypeError) as err: # catch if None, expected index/key not present
logger.warning("Error: {}".format(err))
# bitcoin network connectivity info
cmds = "{} -datadir={} getnetworkinfo".format(self.bitcoin_cli, self.bitcoin_dir)
_network_info, success, timed_out = run_user(cmds, timeout=self.timeout)
if success:
try:
for nw in json.loads(_network_info)["networks"]:
if nw["name"] == "ipv4":
if nw["reachable"]:
self.bitcoin_ipv4_reachable.val = True
self.bitcoin_ipv4_reachable.txt = "True"
self.bitcoin_ipv4_reachable.style = "green"
else:
self.bitcoin_ipv4_reachable.val = False
self.bitcoin_ipv4_reachable.txt = "False"
self.bitcoin_ipv4_reachable.style = "red"
if nw["limited"]:
self.bitcoin_ipv4_limited.val = True
self.bitcoin_ipv4_limited.txt = "True"
self.bitcoin_ipv4_limited.style = "green"
else:
self.bitcoin_ipv4_limited.val = False
self.bitcoin_ipv4_limited.txt = "False"
self.bitcoin_ipv4_limited.style = "red"
if nw["name"] == "ipv6":
if nw["reachable"]:
self.bitcoin_ipv6_reachable.val = True
self.bitcoin_ipv6_reachable.txt = "True"
self.bitcoin_ipv6_reachable.style = "green"
else:
self.bitcoin_ipv6_reachable.val = False
self.bitcoin_ipv6_reachable.txt = "False"
self.bitcoin_ipv6_reachable.style = "red"
if nw["limited"]:
self.bitcoin_ipv6_limited.val = True
self.bitcoin_ipv6_limited.txt = "True"
self.bitcoin_ipv6_limited.style = "green"
else:
self.bitcoin_ipv6_limited.val = False
self.bitcoin_ipv6_limited.txt = "False"
self.bitcoin_ipv6_limited.style = "red"
if nw["name"] == "onion":
if nw["reachable"]:
self.bitcoin_onion_reachable.val = True
self.bitcoin_onion_reachable.txt = "True"
self.bitcoin_onion_reachable.style = "green"
else:
self.bitcoin_onion_reachable.val = False
self.bitcoin_onion_reachable.txt = "False"
self.bitcoin_onion_reachable.style = "red"
if nw["limited"]:
self.bitcoin_onion_limited.val = True
self.bitcoin_onion_limited.txt = "True"
self.bitcoin_onion_limited.style = "green"
else:
self.bitcoin_onion_limited.val = False
self.bitcoin_onion_limited.txt = "False"
self.bitcoin_onion_limited.style = "red"
except (KeyError, TypeError) as err: # catch if None, expected index/key not present
logger.warning("Error: {}".format(err))
self.bitcoin_local_adresses = list()
try:
for la in json.loads(_network_info)["localaddresses"]:
if ":" in la["address"]:
if la["address"] in self.ipv6_addresses:
self.bitcoin_local_adresses.append("[{}]:{}".format(la["address"], la["port"]))
elif ".onion" in la["address"]:
self.bitcoin_local_adresses.append("{}:{}".format(la["address"], la["port"]))
if self.bitcoin_onion_reachable:
self.tor_active = Metric("+ Tor")
else:
self.tor_active = Metric("+ Tor?")
else:
self.bitcoin_local_adresses.append("{}:{}".format(la["address"], la["port"]))
except (KeyError, TypeError) as err: # catch if None, expected index/key not present
logger.warning("Error: {}".format(err))
def update_lnd_dirs(self):
# set datadir - requires network and chain to be set/checked
self.lnd_dir = "/home/bitcoin/.lnd"
self.lnd_macaroon_dir = "/home/bitcoin/.lnd/data/chain/{0}/{1}net".format(self.network.val, self.chain.val)
def read_lnd_config(self):
_lnd_conf = "{}/lnd.conf".format(self.lnd_dir)
if not os.path.exists(_lnd_conf):
return
config = configparser.ConfigParser(strict=False)
config.read(_lnd_conf)
self.lnd_config = config
def check_lnd_is_running(self):
# check if lnd is running
cmds = "ps aux | grep -e \"bin\/lnd\" | grep -v grep | wc -l"
_lnd_running, success, timed_out = run_user(cmds, timeout=self.timeout)
if success:
try:
if _lnd_running.split("\n")[0] == "0":
self.lnd_is_running = False
# print("WARN: LND not running!")
else:
self.lnd_is_running = True
return True
except IndexError as err:
logger.warning("Error: {}".format(err))
return False
def update_lnd_wallet_is_locked(self):
# LN Wallet Lock Status
cmds = "sudo tail -n 1 /mnt/hdd/lnd/logs/{0}/{1}net/lnd.log".format(self.network.val, self.chain.val)
_ln_lock_status_log, success, timed_out = run_user(cmds)
if success:
if re.match(".*unlock.*", _ln_lock_status_log):
self.lnd_wallet_lock_status = Metric("\U0001F512", style="red")
self.lnd_wallet_lock_status.val = True
self.lnd_wallet_is_locked = True
else:
self.lnd_wallet_lock_status = Metric("\U0001F513", style="green")
self.lnd_wallet_lock_status.val = False
self.lnd_wallet_is_locked = False
return False
return True
# def _update_lncli_version(self):
# # get lnd client version client
# cmds = "/usr/local/bin/lncli --version"
# _ln_client_version, success, timed_out = run_user(cmds, timeout=self.timeout)
# if success:
# try:
# line = _ln_client_version.split("\n")[0]
# self.lnd_lncli_version.raw = line.split(" ")[2]
# self.lnd_lncli_version = self.lnd_lncli_version.raw
# except IndexError as err:
# logger.warning("Error: {}".format(err))
def update_lnd_alias(self):
try:
self.lnd_alias.val = self.lnd_config["Application Options"]["alias"]
except (KeyError, TypeError) as err: # catch if None, expected index/key not present
logger.warning("Error: {}".format(err))
def update_lnd_data(self):
# reset any data that might be changed in this method
self.lnd_base_msg.val = None
self.lnd_base_msg.txt = None
self.lnd_base_msg.style = "default"
self.lnd_version.val = None
self.lnd_version.txt = None
self.lnd_version.style = "green"
self.lnd_external.val = None
self.lnd_external.txt = None
self.lnd_external.style = "yellow"
self.lnd_channel_msg.val = None
self.lnd_channel_msg.txt = None
self.lnd_channel_msg.style = "default"
self.lnd_wallet_balance.val = None
self.lnd_wallet_balance.txt = None
self.lnd_wallet_balance.style = "default"
self.lnd_channel_balance.val = None
self.lnd_channel_balance.txt = None
self.lnd_channel_balance.style = "default"
self.lnd_channels_online.val = None
self.lnd_channels_online.txt = None
self.lnd_channels_online.style = "default"
self.lnd_channels_total.val = None
self.lnd_channels_total.txt = None
self.lnd_channels_total.style = "default"
self.lnd_is_syned = False
# If LND is not running exit
if not self.lnd_is_running:
return
# If LN wallet is locked exit
if self.lnd_wallet_is_locked:
self.lnd_base_msg.val = "\U0001F512Locked"
self.lnd_base_msg.style = "red"
return
cmds = ("sudo -u bitcoin /usr/local/bin/lncli --macaroonpath={}/readonly.macaroon "
"--tlscertpath={}/tls.cert getinfo 2>/dev/null".format(self.lnd_macaroon_dir, self.lnd_dir))
_ln_get_info, success, timed_out = run_user(cmds)
if success:
if not _ln_get_info:
self.lnd_base_msg.val = "Not Started/Ready Yet"
self.lnd_base_msg.style = "red"
else:
try:
self.lnd_version.val = json.loads(_ln_get_info)["version"].split(" ")[0]
except (IndexError, KeyError, TypeError) as err: # catch if None, expected index/key not present
logger.warning("Error: {}".format(err))
try:
self.lnd_external.val = json.loads(_ln_get_info)["uris"][0]
except (IndexError, KeyError, TypeError) as err: # catch if None, expected index/key not present
logger.warning("Error: {}".format(err))
try:
if not json.loads(_ln_get_info)["synced_to_chain"]:
self.lnd_is_syned = False
else:
self.lnd_is_syned = True
except (KeyError, TypeError) as err: # catch if None, expected index/key not present
logger.warning("Error: {}".format(err))
if self.lnd_is_syned:
# synched_to_chain is True
cmds = ("sudo -u bitcoin /usr/local/bin/lncli "
"--macaroonpath={}/readonly.macaroon --tlscertpath={}/tls.cert "
"walletbalance 2>/dev/null".format(self.lnd_macaroon_dir, self.lnd_dir))
_ln_wallet_balance, success, timed_out = run_user(cmds)
if success:
try:
self.lnd_wallet_balance.val = int(json.loads(_ln_wallet_balance)["confirmed_balance"])
self.lnd_wallet_balance.txt = "{}".format(self.lnd_wallet_balance.val)
self.lnd_wallet_balance.style = "yellow"
except (KeyError, TypeError) as err: # catch if None, expected index/key not present
logger.warning("Error: {}".format(err))
self.lnd_wallet_balance.val = None
self.lnd_wallet_balance.txt = None
cmds = ("sudo -u bitcoin /usr/local/bin/lncli "
"--macaroonpath={}/readonly.macaroon --tlscertpath={}/tls.cert "
"channelbalance 2>/dev/null".format(self.lnd_macaroon_dir, self.lnd_dir))
_ln_channel_balance, success, timed_out = run_user(cmds)
if success:
try:
self.lnd_channel_balance.val = int(json.loads(_ln_channel_balance)["balance"])
self.lnd_channel_balance.txt = "{}".format(self.lnd_channel_balance.val)
self.lnd_channel_balance.style = "yellow"
except (KeyError, TypeError) as err: # catch if None, expected index/key not present
logger.warning("Error: {}".format(err))
self.lnd_channel_balance.val = None
self.lnd_channel_balance.txt = None
try:
self.lnd_channels_online.val = int(json.loads(_ln_get_info)["num_active_channels"])
self.lnd_channels_online.txt = "{}".format(self.lnd_channels_online.val)
except (KeyError, TypeError) as err: # catch if None, expected index/key not present
logger.warning("Error: {}".format(err))
self.lnd_channels_online.val = None
self.lnd_channels_online.txt = None
except json.decoder.JSONDecodeError as err: # catch if LND is unable to respond
logger.warning("Error: {}".format(err))
self.lnd_channels_online.val = None
self.lnd_channels_online.txt = None
cmds = ("sudo -u bitcoin /usr/local/bin/lncli "
"--macaroonpath={}/readonly.macaroon --tlscertpath={}/tls.cert "
"listchannels 2>/dev/null".format(self.lnd_macaroon_dir, self.lnd_dir))
_ln_list_channels, success, timed_out = run_user(cmds)
if success:
try:
self.lnd_channels_total.val = len(json.loads(_ln_list_channels)["channels"])
except (KeyError, TypeError) as err: # catch if None, expected index/key not present
logger.warning("Error: {}".format(err))
else: # LND is not synched
# is Bitcoind running?!
if not self.bitcoin_is_running:
self.lnd_base_msg.val = "{} not running or not ready".format(self.currency['daemon'])
self.lnd_base_msg.vale = self.lnd_base_msg.val
self.lnd_base_msg.style = "red"
return
self.lnd_base_msg.val = "Waiting for chain sync"
self.lnd_base_msg.txt = self.lnd_base_msg.val
self.lnd_base_msg.style = "red"
cmds = ("sudo -u bitcoin tail -n 10000 "
"/mnt/hdd/lnd/logs/{}/{}net/lnd.log".format(self.network.val, self.chain.val))
_ln_item, success, timed_out = run_user(cmds)
if not success:
self.lnd_channel_msg.val = "?!"
self.lnd_channel_msg.style = "red"
else:
_last_match = ""
for line in _ln_item.split("\n"):
obj = re.match(".*\(height=(\d+).*", line)
if obj:
_last_match = obj.groups()[0]
else:
obj = re.match(".*Caught up to height (\d+)$", line)
if obj:
_last_match = obj.groups()[0]
try:
_last_match = int(_last_match)
except ValueError:
_last_match = 0
if self.block_height.val:
if int(_last_match) > 0:
self.lnd_channel_msg.val = int(_last_match)
self.lnd_channel_msg.txt = "-> scanning {}/{}".format(_last_match, self.block_height)
self.lnd_channel_msg.style = "red"
else:
self.lnd_channel_msg.val = int(_last_match)
self.lnd_channel_msg.txt = "-> scanning ??/{}".format(self.block_height)
self.lnd_channel_msg.style = "red"
def update_public_ip(self):
try:
f = urlopen('http://v4.ipv6-test.com/api/myip.php')
self.public_ip.val = f.read(100).decode('utf-8')
except Exception as err:
logger.warning("_update_public_ip failed: {}".format(err))
def update_bitcoin_public_port(self):
try:
_public_bitcoin_port = self.bitcoin_config["DEFAULT"]["port"]
except KeyError:
if self.chain.val == "test":
_public_bitcoin_port = self.currency["testnet_port"]
else:
_public_bitcoin_port = self.currency["mainnet_port"]
self.public_bitcoin_port.val = _public_bitcoin_port
def check_public_ip_bitcoin_port(self):
if port_check(self.public_ip.val, self.public_bitcoin_port.val, timeout=2.0):
self.public_bitcoin_port_status.val = True
self.public_bitcoin_port_status.txt = ""
self.public_ip.style = "green"
self.public_bitcoin_port.style = "green"
else:
self.public_bitcoin_port_status.val = False
self.public_bitcoin_port_status.txt = "not reachable"
self.public_bitcoin_port_status.style = "red"
self.public_ip.style = "red"
self.public_bitcoin_port.style = "red"
def check_public_ip_lnd_port(self):
if not self.lnd_external.val:
return
try:
_public_lnd_port = int(self.lnd_external.val.split(":")[1])
if _public_lnd_port:
if port_check(self.public_ip.val, _public_lnd_port, timeout=2.0):
self.public_ip_lnd_port_status.val = True
self.public_ip_lnd_port_status.txt = ""
else:
self.public_ip_lnd_port_status.val = False
self.public_ip_lnd_port_status.txt = "not reachable"
self.public_ip_lnd_port_status.style = "red"
except IndexError as err:
logger.warning("Error: {}".format(err))
def update(self):
"""update Metrics directly or call helper methods"""
pass
# self.update_load()
# self.update_uptime()
# self.update_cpu_temp()
# self.update_memory()
# self.update_storage()
# self.update_ip_network_data()
# self.update_network()
#
# self.update_bitcoin_dir()
# self.read_bitcoin_config()
#
# self.update_chain()
#
# self.update_bitcoin_binaries()
# self.check_bitcoind_is_running()
# self.update_bitcoin_daemon_version()
# self.update_bitcoin_data()
# self.update_lnd_dirs()
# self.read_lnd_config()
# self.check_lnd_is_running()
# self.update_lnd_wallet_is_locked()
# self.update_lnd_alias()
# self.update_lnd_data()
#
# self.update_public_ip()
# self.update_bitcoin_public_port()
# self.check_public_ip_lnd_port()
# self.check_public_ip_bitcoin_port()
def display(self):
logo0 = _yellow(" ")
logo1 = _yellow(" ,/ ")
logo2 = _yellow(" ,'/ ")
logo3 = _yellow(" ,' / ")
logo4 = _yellow(" ,' /_____, ")
logo5 = _yellow(" .'____ ,' ")
logo6 = _yellow(" / ,' ")
logo7 = _yellow(" / ,' ")
logo8 = _yellow(" /,' ")
logo9 = _yellow(" /' ")
if self.lnd_is_running:
if self.lnd_wallet_is_locked:
lnd_info = Metric("Running", style="yellow")
else:
lnd_info = self.lnd_version
else:
lnd_info = Metric("Not Running", style="red")
line9 = "LND {}".format(lnd_info)
if self.lnd_base_msg.val and self.lnd_channel_msg.val:
line9 = "{} {}\n {}".format(line9, self.lnd_base_msg, self.lnd_channel_msg)
elif self.lnd_base_msg.val:
line9 = "{} {}".format(line9, self.lnd_base_msg)
elif self.lnd_channel_msg.val:
line9 = "{} {}".format(line9, self.lnd_channel_msg)
if not (self.lnd_channels_online.val and self.lnd_channels_total.val):
pass
else:
if self.lnd_channels_online.val <= self.lnd_channels_total.val:
self.lnd_channels_online.style = "yellow"
self.lnd_channels_total.style = "yellow"
elif self.lnd_channels_online.val == self.lnd_channels_total.val:
self.lnd_channels_online.style = "green"
self.lnd_channels_total.style = "green"
else:
self.lnd_channels_online.style = "red"
self.lnd_channels_total.style = "red"
lines = [
logo0,
logo0 + "{} {} {}".format(self.name, self.version, self.lnd_alias),
logo0 + "{} {} {}".format(self.network, "Fullnode + Lightning Network", self.tor_active),
logo1 + _yellow("-------------------------------------------"),
logo2 + "{} {}, {}, {} {} {}".format("load average:", self.load_one, self.load_five, self.load_fifteen,
"CPU:", self.cpu_temp),
logo3 + "{} {} / {} {} {} ({})".format("Free Mem:", self.memory_avail, self.memory_total,
"Free HDD:", self.hdd_free_abs, self.hdd_free),
logo4 + "{}{} ▼{} ▲{}".format("ssh admin@", self.local_ip, self.network_rx, self.network_tx),
logo5,
logo6 + "{} {} {} {} {} ({})".format(self.network, self.bitcoin_version, self.chain,
"Sync", self.sync_status, self.sync_percentage),
logo7 + "{} {}:{} {}".format("Public", self.public_ip, self.public_bitcoin_port,
self.public_bitcoin_port_status),
logo8 + "{} {} {}".format("", "", ""),
logo9 + line9,
logo0 + "Wallet {} sat {}/{} Chan {} sat".format(self.lnd_wallet_balance,
self.lnd_channels_online, self.lnd_channels_total,
self.lnd_channel_balance),
logo0,
"{} {}".format(self.lnd_external, self.public_ip_lnd_port_status)
]
if self.bitcoin_log_msgs:
lines.append(_yellow("Last lines of: ") + _red("bitcoin/debug.log"))
for msg in self.bitcoin_log_msgs:
if len(msg) <= 60:
lines.append(msg)
else:
lines.append(msg[0:57] + "...")
if len(self.bitcoin_local_adresses) == 1:
lines.append("\nAdditional Public Address (e.g. IPv6)")
lines.append("* {}".format(self.bitcoin_local_adresses[0]))
elif len(self.bitcoin_local_adresses) >= 1:
lines.append("\nAdditional Public Addresses (e.g. IPv6) only showing first")
lines.append("* {}".format(self.bitcoin_local_adresses[0]))
for line in lines:
print(line)
# def update_and_display(self):
# self.update()
# clear()
# self.display()
def main():
setup_logging()
usage = "usage: %prog [Options]"
parser = OptionParser(usage=usage, version="%prog {}".format(BOARD_VERSION))
parser.add_option("-H", "--host", dest="host", type="string", default="localhost",
help="Host to listen on (default localhost)")
parser.add_option("-P", "--port", dest="port", type="int", default="8000",
help="Port to listen on (default 8000)")
parser.add_option("-c", "--crypto-currency", dest="crypto_currency", type="string", default="bitcoin",
help="Currency/Network to report on (default bitcoin)")
parser.add_option("-t", "--timeout", dest="timeout", type="int", default=TIMEOUT,
help="how long to wait for data to be collected (default {} sec)".format(TIMEOUT))
parser.add_option("-r", "--refresh", dest="refresh", type="int", default=5,
help="interval to refresh data when looping (default 5 sec)")
parser.add_option("--interface", dest="interface", type="string", default=IF_NAME,
help="network interface to report on (default {})".format(IF_NAME))
options, args = parser.parse_args()
crypto_currency = options.crypto_currency.lower()
if crypto_currency not in list(CRYPTO_CURRENCIES.keys()):
raise ValueError("Unexpected Crypto Currency given: {}".format(options.crypto_currency))
logger.info("Starting infoBlitz...")
board = Dashboard(crypto_currency)
board.timeout = 120
board.interface = options.interface
board.name = Metric(BOARD_NAME, style="yellow")
board.version = Metric(BOARD_VERSION, style="yellow")
# use a threading.Lock() to ensure access to the same data from different threads
board_lock = threading.Lock()
dashboard_updater_thread = DashboardUpdater(board=board, board_lock=board_lock, interval=options.refresh)
dashboard_printer_thread = DashboardPrinter(board=board, board_lock=board_lock, interval=options.refresh + 10)
web_server_thread = ThreadedHTTPServer(options.host, options.port, board, board_lock, name="Web_Server")
logger.info("Starting Dashboard Updater")
dashboard_updater_thread.start()
logger.info("Starting Dashboard Printer")
dashboard_printer_thread.start()
logger.info("Starting Web Server: http://{}:{}".format(options.host, options.port))
web_server_thread.start()
# for info/debug only
logger.debug("Threads: [{}]".format("; ".join([t.getName() for t in threading.enumerate()])))
try:
while True: # run in loop that can be interrupted with CTRL+c
time.sleep(0.2) # ToDO check.. not quite sure..
except KeyboardInterrupt:
logger.debug("Stopping server loop")
web_server_thread.stop()
sys.exit(0)
if __name__ == "__main__":
main()
|
e2elive.py | #!/usr/bin/env python3
#
import atexit
import glob
import gzip
import io
import json
import logging
import os
import random
import shutil
import sqlite3
import subprocess
import sys
import tempfile
import threading
import time
import urllib.request
from util import xrun, atexitrun, find_indexer, ensure_test_db, firstFromS3Prefix
logger = logging.getLogger(__name__)
def main():
start = time.time()
import argparse
ap = argparse.ArgumentParser()
ap.add_argument('--keep-temps', default=False, action='store_true')
ap.add_argument('--indexer-bin', default=None, help='path to algorand-indexer binary, otherwise search PATH')
ap.add_argument('--indexer-port', default=None, type=int, help='port to run indexer on. defaults to random in [4000,30000]')
ap.add_argument('--connection-string', help='Use this connection string instead of attempting to manage a local database.')
ap.add_argument('--source-net', help='Path to test network directory containing Primary and other nodes. May be a tar file.')
ap.add_argument('--verbose', default=False, action='store_true')
args = ap.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
indexer_bin = find_indexer(args.indexer_bin)
sourcenet = args.source_net
source_is_tar = False
if not sourcenet:
e2edata = os.getenv('E2EDATA')
sourcenet = e2edata and os.path.join(e2edata, 'net')
if sourcenet and hassuffix(sourcenet, '.tar', '.tar.gz', '.tar.bz2', '.tar.xz'):
source_is_tar = True
tempdir = tempfile.mkdtemp()
if not args.keep_temps:
atexit.register(shutil.rmtree, tempdir, onerror=logger.error)
else:
logger.info("leaving temp dir %r", tempdir)
if not (source_is_tar or (sourcenet and os.path.isdir(sourcenet))):
# fetch test data from S3
bucket = 'algorand-testdata'
import boto3
from botocore.config import Config
from botocore import UNSIGNED
s3 = boto3.client('s3', config=Config(signature_version=UNSIGNED))
tarname = 'net_done.tar.bz2'
tarpath = os.path.join(tempdir, tarname)
firstFromS3Prefix(s3, bucket, 'indexer/e2e2', tarname, outpath=tarpath)
source_is_tar = True
sourcenet = tarpath
tempnet = os.path.join(tempdir, 'net')
if source_is_tar:
xrun(['tar', '-C', tempdir, '-x', '-f', sourcenet])
else:
xrun(['rsync', '-a', sourcenet + '/', tempnet + '/'])
blockfiles = glob.glob(os.path.join(tempdir, 'net', 'Primary', '*', '*.block.sqlite'))
lastblock = countblocks(blockfiles[0])
#subprocess.run(['find', tempnet, '-type', 'f'])
xrun(['goal', 'network', 'start', '-r', tempnet])
atexitrun(['goal', 'network', 'stop', '-r', tempnet])
psqlstring = ensure_test_db(args.connection_string, args.keep_temps)
algoddir = os.path.join(tempnet, 'Primary')
aiport = args.indexer_port or random.randint(4000,30000)
cmd = [indexer_bin, 'daemon', '-P', psqlstring, '--dev-mode', '--algod', algoddir, '--server', ':{}'.format(aiport)]
logger.debug("%s", ' '.join(map(repr,cmd)))
indexerdp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
indexerout = subslurp(indexerdp.stdout)
indexerout.start()
atexit.register(indexerdp.kill)
time.sleep(0.2)
indexerurl = 'http://localhost:{}/'.format(aiport)
healthurl = indexerurl + 'health'
for attempt in range(20):
(ok, json) = tryhealthurl(healthurl, args.verbose, waitforround=lastblock)
if ok:
logger.debug('health round={} OK'.format(lastblock))
break
time.sleep(0.5)
if not ok:
logger.error('could not get indexer health, or did not reach round={}\n{}'.format(lastblock, json))
sys.stderr.write(indexerout.dump())
return 1
try:
xrun(['python3', 'misc/validate_accounting.py', '--verbose', '--algod', algoddir, '--indexer', indexerurl], timeout=20)
xrun(['go', 'run', 'cmd/e2equeries/main.go', '-pg', psqlstring, '-q'], timeout=15)
except Exception:
sys.stderr.write(indexerout.dump())
raise
dt = time.time() - start
sys.stdout.write("indexer e2etest OK ({:.1f}s)\n".format(dt))
return 0
def hassuffix(x, *suffixes):
for s in suffixes:
if x.endswith(s):
return True
return False
def countblocks(path):
db = sqlite3.connect(path)
cursor = db.cursor()
cursor.execute("SELECT max(rnd) FROM blocks")
row = cursor.fetchone()
cursor.close()
db.close()
return row[0]
def tryhealthurl(healthurl, verbose=False, waitforround=100):
try:
response = urllib.request.urlopen(healthurl)
if response.code != 200:
return (False, "")
raw = response.read()
logger.debug('health %r', raw)
ob = json.loads(raw)
rt = ob.get('message')
if not rt:
return (False, raw)
return (int(rt) >= waitforround, raw)
except Exception as e:
if verbose:
logging.warning('GET %s %s', healthurl, e)
return (False, "")
class subslurp:
# asynchronously accumulate stdout or stderr from a subprocess and hold it for debugging if something goes wrong
def __init__(self, f):
self.f = f
self.buf = io.BytesIO()
self.gz = gzip.open(self.buf, 'wb')
self.l = threading.Lock()
self.t = None
def run(self):
for line in self.f:
with self.l:
if self.gz is None:
return
self.gz.write(line)
def dump(self):
with self.l:
self.gz.close()
self.gz = None
self.buf.seek(0)
r = gzip.open(self.buf, 'rt')
return r.read()
def start(self):
self.t = threading.Thread(target=self.run)
self.t.daemon = True
self.t.start()
if __name__ == '__main__':
sys.exit(main())
|
plugin.py | # -*- coding: utf-8 -*-
import json
import logging
import time
import threading
import zmq
from lucena.io2.networking import create_pipe
logger = logging.getLogger(__name__)
class Plugin(object):
def __init__(self, zmq_context):
self.zmq_context = zmq_context
self.poller = zmq.Poller()
self.socket, self.worker_socket = create_pipe(self.zmq_context)
self.signal = threading.Event()
self.thread = None
def start(self):
if self.thread:
raise RuntimeError("Worker already started.")
self.thread = threading.Thread(target=self._start_thread)
self.thread.daemon = False
self.thread.start()
self.signal.wait()
def stop(self):
if self.thread is None:
logger.warning("Worker already stopped.")
return
self.socket.set(zmq.SNDTIMEO, 0)
self.socket.send_unicode("$TERM")
self.signal.wait()
def _run(self):
raise NotImplementedError("Implement me in a subclass")
def _start_thread(self):
self.signal.set()
self._run()
# At this point the worker has finished
# and we can clean up everything.
self.socket.close()
self.worker_socket.close()
self.socket = None
self.worker_socket = None
self.thread = None
self.signal.set()
def send(self, *args, **kwargs):
return self.socket.send(*args, **kwargs)
def send_unicode(self, *args, **kwargs):
return self.socket.send_unicode(*args, **kwargs)
def send_multipart(self, *args, **kwargs):
return self.socket.send_multipart(*args, **kwargs)
def send_json(self, *args, **kwargs):
return self.socket.send_json(*args, **kwargs)
def recv(self, *args, **kwargs):
return self.socket.recv(*args, **kwargs)
def recv_unicode(self, *args, **kwargs):
return self.socket.recv_unicode(*args, **kwargs)
def recv_multipart(self, *args, **kwargs):
return self.socket.recv_multipart(*args, **kwargs)
def handle_pipe(self):
# Get just the commands off the pipe
request = self.pipe.recv_multipart()
try:
json_request = json.loads(request[0].decode('utf-8'))
command = json_request.get('command')
except Exception:
command = request.pop(0).decode('UTF-8')
if not command:
return -1 # Interrupted
elif command == "CONFIGURE":
port = json_request.get('port')
self.configure(port)
elif command == "PUBLISH":
self.transmit = request.pop(0)
if self.interval == 0:
self.interval = INTERVAL_DFLT
# Start broadcasting immediately
self.ping_at = time.time()
elif command == "SILENCE":
self.transmit = None
elif command == "SUBSCRIBE":
self.filter = json_request.get('filter')
elif command == "UNSUBSCRIBE":
self.filter = None
elif command == "$TERM":
self.terminated = True
else:
logger.error("zbeacon: - invalid command: {0}".format(command))
def run(self):
# Signal actor successfully initialized
self.pipe.signal()
self.poller = zmq.Poller()
self.poller.register(self.pipe, zmq.POLLIN)
self.poller.register(self.udp_socket, zmq.POLLIN)
while not self.terminated:
timeout = 1
if self.transmit:
timeout = self.ping_at - time.time()
if timeout < 0:
timeout = 0
# Poll on API pipe and on UDP socket
items = dict(self.poller.poll(timeout * 1000))
if self.pipe in items and items[self.pipe] == zmq.POLLIN:
self.handle_pipe()
if self.udp_socket.fileno() in items \
and items[self.udp_socket.fileno()] == zmq.POLLIN:
self.handle_udp()
if self.transmit and time.time() >= self.ping_at:
self.send_beacon()
self.ping_at = time.time() + self.interval
|
active_object.py | import string
import time
import random
from concurrent.futures import ThreadPoolExecutor, wait
from queue import Queue
from threading import Thread
from typing import Any
class Websocket:
""" Execution class """
def __init__(self):
self.run = True
self.queue = Queue() # List of pending requests
self.run_forever_thr = Thread(target=self.run_forever)
self.run_forever_thr.start()
def run_forever(self):
""" Scheduler can implement other scheduling policies """
print('Start run_forever')
while self.run:
msg = self.queue.get()
print(f'Send : {msg}')
time.sleep(random.random())
def enqueue_item(self, item: str):
if self.run:
self.queue.put(item)
def stop(self):
self.run = False
class Client:
""" Invocation class """
def __init__(self, name: str, ws: Websocket):
self.name = name
self.ws = ws
def send_random_msg(self, prefix: Any):
msg = f'[{self.name}] {prefix}-{"".join(random.sample(string.ascii_letters, 6))}'
self.ws.enqueue_item(msg)
def main():
ws = Websocket()
client_1 = Client('c1', ws)
client_2 = Client('c2', ws)
# call `Client.send_random_msg` asynchronously
fs = []
with ThreadPoolExecutor() as executor:
for i in range(0, 10):
fs.extend([
executor.submit(client_1.send_random_msg, i),
executor.submit(client_2.send_random_msg, i),
])
# wait until all messages are sent
wait(fs)
# stop thread
ws.stop()
if __name__ == '__main__':
main()
|
presence.py | from __future__ import unicode_literals
from logging import disable
from threading import Thread
import time
from typing import Optional
import humanfriendly
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from octoprint_discordremote import DiscordRemotePlugin
from octoprint_discordremote import DiscordImpl
class Presence:
def __init__(self):
self.plugin: Optional['DiscordRemotePlugin'] = None
self.discord: Optional['DiscordImpl'] = None
self.presence_cycle_id: int = 0
self.presence_thread: Optional[Thread] = None
def configure_presence(self, plugin: 'DiscordRemotePlugin', discord: 'DiscordImpl'):
self.plugin = plugin
self.discord = discord
# Setup presence thread
if not self.presence_thread or not self.presence_thread.is_alive():
self.discord.logger.info("Starting Presence thread")
self.presence_thread = Thread(target=self.presence)
self.presence_thread.start()
def generate_status(self):
if self.plugin.get_printer().is_operational():
if self.plugin.get_printer().is_printing():
job_name = self.plugin.get_printer().get_current_data()['job']['file']['name']
job_percent = self.plugin.get_printer().get_current_data()['progress']['completion']
return "Printing {} - {}%".format(job_name,
humanfriendly.format_number(job_percent, num_decimals=2))
else:
return "Idle."
else:
return "Not operational."
def presence(self):
presence_cycle = {
0: "{}help".format(self.plugin.get_settings().get(["prefix"])),
1: self.generate_status()
}
while not self.discord.shutdown_event.is_set():
if self.plugin.get_settings().get(['presence']):
presence_cycle[1] = "{}".format(self.generate_status())
self.discord.update_presence(presence_cycle[self.presence_cycle_id % len(presence_cycle)])
self.presence_cycle_id += 1
if self.presence_cycle_id == len(presence_cycle):
self.presence_cycle_id = 0
else:
self.discord.update_presence(None)
for i in range(int(self.plugin.get_settings().get(['presence_cycle_time']))):
if not self.discord.shutdown_event.is_set():
time.sleep(1)
|
demo7.py | # -*- coding:utf-8 -*-
# @Time : 2019/10/10 17:26
# @Author : Dg
from multiprocessing import Manager, Process, Lock
import os
m = 0
def work(n, lock=None):
# with lock: #不加锁而操作共享的数据,肯定会出现数据错乱
global m
m += 1
print(m)
if __name__ == '__main__':
lock = Lock()
p_l = []
n = 0
for i in range(10):
p = Process(target=work, args=(n, ))
p_l.append(p)
p.start()
for p in p_l:
p.join()
print(m)
#{'count': 94}
|
main.py | #!/usr/bin/env python3
import csv
import threading
import time
from pathlib import Path
import blobconverter
import cv2
import depthai as dai
import numpy as np
# Get argument first
# Start defining a pipeline
pipeline = dai.Pipeline()
# Define a source - color camera
cam_rgb = pipeline.createColorCamera()
cam_rgb.setPreviewSize(300, 300)
cam_rgb.setInterleaved(False)
# Define a neural network that will make predictions based on the source frames
detection_nn = pipeline.createMobileNetDetectionNetwork()
detection_nn.setConfidenceThreshold(0.5)
detection_nn.setBlobPath(str(blobconverter.from_zoo(name="mobilenet-ssd", shaves=13)))
cam_rgb.preview.link(detection_nn.input)
# Create outputs
xout_rgb = pipeline.createXLinkOut()
xout_rgb.setStreamName("rgb")
cam_rgb.preview.link(xout_rgb.input)
xout_nn = pipeline.createXLinkOut()
xout_nn.setStreamName("nn")
detection_nn.out.link(xout_nn.input)
# MobilenetSSD label texts
texts = ["background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow",
"diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
for text in texts:
(Path(__file__).parent / Path(f'data/{text}')).mkdir(parents=True, exist_ok=True)
(Path(__file__).parent / Path(f'data/raw')).mkdir(parents=True, exist_ok=True)
# Pipeline defined, now the device is connected to
with dai.Device() as device, open('data/dataset.csv', 'w') as dataset_file:
dataset = csv.DictWriter(
dataset_file,
["timestamp", "label", "left", "top", "right", "bottom", "raw_frame", "overlay_frame", "cropped_frame"]
)
dataset.writeheader()
# nn data, being the bounding box locations, are in <0..1> range - they need to be normalized with frame width/height
def frame_norm(frame, bbox):
norm_vals = np.full(len(bbox), frame.shape[0])
norm_vals[::2] = frame.shape[1]
return (np.clip(np.array(bbox), 0, 1) * norm_vals).astype(int)
def store_data(in_frame, detections):
timestamp = int(time.time() * 10000)
raw_frame_path = f'data/raw/{timestamp}.jpg'
cv2.imwrite(raw_frame_path, in_frame)
for detection in detections:
debug_frame = in_frame.copy()
bbox = frame_norm(frame, (detection.xmin, detection.ymin, detection.xmax, detection.ymax))
det_frame = debug_frame[bbox[1]:bbox[3], bbox[0]:bbox[2]]
cropped_path = f'data/{texts[detection.label]}/{timestamp}_cropped.jpg'
cv2.imwrite(cropped_path, det_frame)
cv2.rectangle(debug_frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (255, 0, 0), 2)
cv2.putText(debug_frame, texts[detection.label], (bbox[0] + 10, bbox[1] + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
overlay_path = f'data/{texts[detection.label]}/{timestamp}_overlay.jpg'
cv2.imwrite(overlay_path, debug_frame)
data = {
"timestamp": timestamp,
"label": texts[detection.label],
"left": bbox[0],
"top": bbox[1],
"right": bbox[2],
"bottom": bbox[3],
"raw_frame": raw_frame_path,
"overlay_frame": overlay_path,
"cropped_frame": cropped_path,
}
dataset.writerow(data)
# Start pipeline
device.startPipeline(pipeline)
# Output queues will be used to get the rgb frames and nn data from the outputs defined above
q_rgb = device.getOutputQueue(name="rgb", maxSize=4, blocking=False)
q_nn = device.getOutputQueue(name="nn", maxSize=4, blocking=False)
frame = None
thread = None
detections = []
while True:
# instead of get (blocking) used tryGet (nonblocking) which will return the available data or None otherwise
in_rgb = q_rgb.tryGet()
in_nn = q_nn.tryGet()
if in_rgb is not None:
# if the data from the rgb camera is available, transform the 1D data into a HxWxC frame
shape = (3, in_rgb.getHeight(), in_rgb.getWidth())
frame = in_rgb.getData().reshape(shape).transpose(1, 2, 0).astype(np.uint8)
frame = np.ascontiguousarray(frame)
if in_nn is not None:
detections = in_nn.detections
if frame is not None:
thread = threading.Thread(target=store_data, args=(frame, detections))
thread.start()
if frame is not None:
debug_frame = frame.copy()
# if the frame is available, draw bounding boxes on it and show the frame
for detection in detections:
bbox = frame_norm(frame, (detection.xmin, detection.ymin, detection.xmax, detection.ymax))
cv2.rectangle(debug_frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (255, 0, 0), 2)
cv2.putText(debug_frame, texts[detection.label], (bbox[0] + 10, bbox[1] + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
cv2.imshow("rgb", debug_frame)
if cv2.waitKey(1) == ord('q'):
break
if KeyboardInterrupt:
pass
if thread is not None:
thread.join()
|
fan_logic.py | import time, logging, json, schedule, threading
import thermostat_database, thermostat_controller, config
# Setup logging
l = logging.getLogger(__name__)
query_db = thermostat_database.query_db
get_info = thermostat_controller.get_info
thermostat_control = thermostat_controller.thermostat_control
def set_fan_state(state): # Turn fan on or off, state value 1 or 0
valid_states = ['0', '1']
if state not in valid_states:
return "Fan state value not valid, use 1 or 0", 400
l.info("Getting current state")
current_state = json.loads(get_info(info='info'))
mode = current_state['mode']
fan = current_state['fan']
heattemp = current_state['heattemp']
cooltemp = current_state['cooltemp']
l.info("Current mode: " + str(mode) + " fan: " + str(fan) + " heat: " + str(heattemp) + " cool: " + str(cooltemp))
l.info("Setting mode: " + str(mode) + " fan: " + state + " heat: " + str(heattemp) + " cool: " + str(cooltemp))
return thermostat_control(mode=mode, fan=state, heat_temp=heattemp, cool_temp=cooltemp)
def fan_timer_thread(cycle_time): # Runs fan for $x amount of time
l.info("Turning fan on for " + str(cycle_time) + " minutes.")
cycle_time = int(cycle_time) * config.timescale
set_fan_state('1')
time.sleep(cycle_time)
l.info("Time's up, turning fan off")
set_fan_state('0')
return "Timer done", 200
def fan_scheduler(my_uuid, action_state): # Runs fan on a scheduled interval
try:
cycle_time = action_state.split(',')[0]
interval = action_state.split(',')[1]
l.info("Scheduling fan to turn on for " + str(cycle_time) + " every " + str(interval) + " minutes for UUID: " + str(my_uuid))
schedule.every(int(interval)).minutes.do(lambda: fan_threads(threadname='fan_timer_thread',
my_uuid=my_uuid,
action_state=action_state))
l.info("Starting first run of fans since the scheduler doesn't start it first")
fan_threads(threadname='fan_timer_thread', my_uuid=my_uuid, action_state=action_state)
while query_db(task='fetch', table='fan', data='') == my_uuid:
l.debug("fan scheduler looping: " + my_uuid)
schedule.run_pending()
time.sleep(.5)
l.info("UUID No longer valid. Old: " + str(my_uuid) + " Stopping job")
except Exception:
l.exception("Something went wrong with the fan scheduler")
raise
def fan_threads(threadname, my_uuid, action_state): # Spawns threads to control fans
if threadname == 'fan_timer_thread':
l.info("Starting fan_timer_thread")
cycle_time = str(action_state.split(',')[0])
l.info("CYCLETIME: " + cycle_time)
t1 = threading.Thread(target=fan_timer_thread, args=(cycle_time,))
t1.start()
return "Thread started", 200
elif threadname == 'fan_scheduler':
l.info("Starting fan_scheduler thread")
t1 = threading.Thread(target=fan_scheduler, args=(my_uuid, action_state))
t1.start()
return "Thread started", 200
else:
return "Invalid threadname", 400
def fan_controller(my_uuid, action_type, action_state): # fan command router
try:
l.info("Setting up fan controller")
if action_type == 'simple':
if action_state == '0':
l.info("Turning fan off")
response = set_fan_state(state=action_state)
l.info(response)
return response
elif action_state == '1':
l.info("Turning fan on")
response = set_fan_state(state=action_state)
return response
else:
return "Invalid aciton state for simple fan setting. Needs to be 1 or 0", 400
elif action_type == 'scheduled':
response = fan_threads(threadname="fan_scheduler", my_uuid=my_uuid, action_state=action_state)
return response
else:
l.exception("")
return "Something went wrong with fan_controller"
except Exception:
l.exception("")
raise |
xcvrd.py | #!/usr/bin/env python2
"""
xcvrd
Transceiver information update daemon for SONiC
"""
try:
import ast
import copy
import functools
import json
import multiprocessing
import os
import signal
import sys
import threading
import time
from sonic_py_common import daemon_base, device_info, logger
from sonic_py_common import multi_asic
from swsscommon import swsscommon
from .xcvrd_utilities import sfp_status_helper
from .xcvrd_utilities import y_cable_helper
from .xcvrd_utilities import port_mapping
except ImportError as e:
raise ImportError(str(e) + " - required module not found")
#
# Constants ====================================================================
#
SYSLOG_IDENTIFIER = "xcvrd"
PLATFORM_SPECIFIC_MODULE_NAME = "sfputil"
PLATFORM_SPECIFIC_CLASS_NAME = "SfpUtil"
TRANSCEIVER_INFO_TABLE = 'TRANSCEIVER_INFO'
TRANSCEIVER_DOM_SENSOR_TABLE = 'TRANSCEIVER_DOM_SENSOR'
TRANSCEIVER_STATUS_TABLE = 'TRANSCEIVER_STATUS'
# Mgminit time required as per CMIS spec
MGMT_INIT_TIME_DELAY_SECS = 2
# SFP insert event poll duration
SFP_INSERT_EVENT_POLL_PERIOD_MSECS = 1000
DOM_INFO_UPDATE_PERIOD_SECS = 60
STATE_MACHINE_UPDATE_PERIOD_MSECS = 60000
TIME_FOR_SFP_READY_SECS = 1
EVENT_ON_ALL_SFP = '-1'
# events definition
SYSTEM_NOT_READY = 'system_not_ready'
SYSTEM_BECOME_READY = 'system_become_ready'
SYSTEM_FAIL = 'system_fail'
NORMAL_EVENT = 'normal'
# states definition
STATE_INIT = 0
STATE_NORMAL = 1
STATE_EXIT = 2
PHYSICAL_PORT_NOT_EXIST = -1
SFP_EEPROM_NOT_READY = -2
SFPUTIL_LOAD_ERROR = 1
PORT_CONFIG_LOAD_ERROR = 2
NOT_IMPLEMENTED_ERROR = 3
SFP_SYSTEM_ERROR = 4
RETRY_TIMES_FOR_SYSTEM_READY = 24
RETRY_PERIOD_FOR_SYSTEM_READY_MSECS = 5000
RETRY_TIMES_FOR_SYSTEM_FAIL = 24
RETRY_PERIOD_FOR_SYSTEM_FAIL_MSECS = 5000
TEMP_UNIT = 'C'
VOLT_UNIT = 'Volts'
POWER_UNIT = 'dBm'
BIAS_UNIT = 'mA'
g_dict = {}
# Global platform specific sfputil class instance
platform_sfputil = None
# Global chassis object based on new platform api
platform_chassis = None
# Global xcvr table helper
xcvr_table_helper = None
# Global logger instance for helper functions and classes
# TODO: Refactor so that we only need the logger inherited
# by DaemonXcvrd
helper_logger = logger.Logger(SYSLOG_IDENTIFIER)
#
# Helper functions =============================================================
#
# Get physical port name
def get_physical_port_name(logical_port, physical_port, ganged):
if ganged:
return logical_port + ":{} (ganged)".format(physical_port)
else:
return logical_port
# Strip units and beautify
def strip_unit_and_beautify(value, unit):
# Strip unit from raw data
if type(value) is str:
width = len(unit)
if value[-width:] == unit:
value = value[:-width]
return value
else:
return str(value)
def _wrapper_get_presence(physical_port):
if platform_chassis is not None:
try:
return platform_chassis.get_sfp(physical_port).get_presence()
except NotImplementedError:
pass
return platform_sfputil.get_presence(physical_port)
def _wrapper_is_replaceable(physical_port):
if platform_chassis is not None:
try:
return platform_chassis.get_sfp(physical_port).is_replaceable()
except NotImplementedError:
pass
return False
def _wrapper_get_transceiver_info(physical_port):
if platform_chassis is not None:
try:
return platform_chassis.get_sfp(physical_port).get_transceiver_info()
except NotImplementedError:
pass
return platform_sfputil.get_transceiver_info_dict(physical_port)
def _wrapper_get_transceiver_dom_info(physical_port):
if platform_chassis is not None:
try:
return platform_chassis.get_sfp(physical_port).get_transceiver_bulk_status()
except NotImplementedError:
pass
return platform_sfputil.get_transceiver_dom_info_dict(physical_port)
def _wrapper_get_transceiver_dom_threshold_info(physical_port):
if platform_chassis is not None:
try:
return platform_chassis.get_sfp(physical_port).get_transceiver_threshold_info()
except NotImplementedError:
pass
return platform_sfputil.get_transceiver_dom_threshold_info_dict(physical_port)
# Soak SFP insert event until management init completes
def _wrapper_soak_sfp_insert_event(sfp_insert_events, port_dict):
for key, value in list(port_dict.items()):
if value == sfp_status_helper.SFP_STATUS_INSERTED:
sfp_insert_events[key] = time.time()
del port_dict[key]
elif value == sfp_status_helper.SFP_STATUS_REMOVED:
if key in sfp_insert_events:
del sfp_insert_events[key]
for key, itime in list(sfp_insert_events.items()):
if time.time() - itime >= MGMT_INIT_TIME_DELAY_SECS:
port_dict[key] = sfp_status_helper.SFP_STATUS_INSERTED
del sfp_insert_events[key]
def _wrapper_get_transceiver_change_event(timeout):
if platform_chassis is not None:
try:
status, events = platform_chassis.get_change_event(timeout)
sfp_events = events.get('sfp')
sfp_errors = events.get('sfp_error')
return status, sfp_events, sfp_errors
except NotImplementedError:
pass
status, events = platform_sfputil.get_transceiver_change_event(timeout)
return status, events, None
def _wrapper_get_sfp_type(physical_port):
if platform_chassis:
try:
return platform_chassis.get_sfp(physical_port).sfp_type
except (NotImplementedError, AttributeError):
pass
return None
def _wrapper_get_sfp_error_description(physical_port):
if platform_chassis:
try:
return platform_chassis.get_sfp(physical_port).get_error_description()
except NotImplementedError:
pass
return None
# Remove unnecessary unit from the raw data
def beautify_dom_info_dict(dom_info_dict, physical_port):
dom_info_dict['temperature'] = strip_unit_and_beautify(dom_info_dict['temperature'], TEMP_UNIT)
dom_info_dict['voltage'] = strip_unit_and_beautify(dom_info_dict['voltage'], VOLT_UNIT)
dom_info_dict['rx1power'] = strip_unit_and_beautify(dom_info_dict['rx1power'], POWER_UNIT)
dom_info_dict['rx2power'] = strip_unit_and_beautify(dom_info_dict['rx2power'], POWER_UNIT)
dom_info_dict['rx3power'] = strip_unit_and_beautify(dom_info_dict['rx3power'], POWER_UNIT)
dom_info_dict['rx4power'] = strip_unit_and_beautify(dom_info_dict['rx4power'], POWER_UNIT)
dom_info_dict['tx1bias'] = strip_unit_and_beautify(dom_info_dict['tx1bias'], BIAS_UNIT)
dom_info_dict['tx2bias'] = strip_unit_and_beautify(dom_info_dict['tx2bias'], BIAS_UNIT)
dom_info_dict['tx3bias'] = strip_unit_and_beautify(dom_info_dict['tx3bias'], BIAS_UNIT)
dom_info_dict['tx4bias'] = strip_unit_and_beautify(dom_info_dict['tx4bias'], BIAS_UNIT)
dom_info_dict['tx1power'] = strip_unit_and_beautify(dom_info_dict['tx1power'], POWER_UNIT)
dom_info_dict['tx2power'] = strip_unit_and_beautify(dom_info_dict['tx2power'], POWER_UNIT)
dom_info_dict['tx3power'] = strip_unit_and_beautify(dom_info_dict['tx3power'], POWER_UNIT)
dom_info_dict['tx4power'] = strip_unit_and_beautify(dom_info_dict['tx4power'], POWER_UNIT)
if _wrapper_get_sfp_type(physical_port) == 'QSFP_DD':
dom_info_dict['rx5power'] = strip_unit_and_beautify(dom_info_dict['rx5power'], POWER_UNIT)
dom_info_dict['rx6power'] = strip_unit_and_beautify(dom_info_dict['rx6power'], POWER_UNIT)
dom_info_dict['rx7power'] = strip_unit_and_beautify(dom_info_dict['rx7power'], POWER_UNIT)
dom_info_dict['rx8power'] = strip_unit_and_beautify(dom_info_dict['rx8power'], POWER_UNIT)
dom_info_dict['tx5bias'] = strip_unit_and_beautify(dom_info_dict['tx5bias'], BIAS_UNIT)
dom_info_dict['tx6bias'] = strip_unit_and_beautify(dom_info_dict['tx6bias'], BIAS_UNIT)
dom_info_dict['tx7bias'] = strip_unit_and_beautify(dom_info_dict['tx7bias'], BIAS_UNIT)
dom_info_dict['tx8bias'] = strip_unit_and_beautify(dom_info_dict['tx8bias'], BIAS_UNIT)
dom_info_dict['tx5power'] = strip_unit_and_beautify(dom_info_dict['tx5power'], POWER_UNIT)
dom_info_dict['tx6power'] = strip_unit_and_beautify(dom_info_dict['tx6power'], POWER_UNIT)
dom_info_dict['tx7power'] = strip_unit_and_beautify(dom_info_dict['tx7power'], POWER_UNIT)
dom_info_dict['tx8power'] = strip_unit_and_beautify(dom_info_dict['tx8power'], POWER_UNIT)
def beautify_dom_threshold_info_dict(dom_info_dict):
dom_info_dict['temphighalarm'] = strip_unit_and_beautify(dom_info_dict['temphighalarm'], TEMP_UNIT)
dom_info_dict['temphighwarning'] = strip_unit_and_beautify(dom_info_dict['temphighwarning'], TEMP_UNIT)
dom_info_dict['templowalarm'] = strip_unit_and_beautify(dom_info_dict['templowalarm'], TEMP_UNIT)
dom_info_dict['templowwarning'] = strip_unit_and_beautify(dom_info_dict['templowwarning'], TEMP_UNIT)
dom_info_dict['vcchighalarm'] = strip_unit_and_beautify(dom_info_dict['vcchighalarm'], VOLT_UNIT)
dom_info_dict['vcchighwarning'] = strip_unit_and_beautify(dom_info_dict['vcchighwarning'], VOLT_UNIT)
dom_info_dict['vcclowalarm'] = strip_unit_and_beautify(dom_info_dict['vcclowalarm'], VOLT_UNIT)
dom_info_dict['vcclowwarning'] = strip_unit_and_beautify(dom_info_dict['vcclowwarning'], VOLT_UNIT)
dom_info_dict['txpowerhighalarm'] = strip_unit_and_beautify(dom_info_dict['txpowerhighalarm'], POWER_UNIT)
dom_info_dict['txpowerlowalarm'] = strip_unit_and_beautify(dom_info_dict['txpowerlowalarm'], POWER_UNIT)
dom_info_dict['txpowerhighwarning'] = strip_unit_and_beautify(dom_info_dict['txpowerhighwarning'], POWER_UNIT)
dom_info_dict['txpowerlowwarning'] = strip_unit_and_beautify(dom_info_dict['txpowerlowwarning'], POWER_UNIT)
dom_info_dict['rxpowerhighalarm'] = strip_unit_and_beautify(dom_info_dict['rxpowerhighalarm'], POWER_UNIT)
dom_info_dict['rxpowerlowalarm'] = strip_unit_and_beautify(dom_info_dict['rxpowerlowalarm'], POWER_UNIT)
dom_info_dict['rxpowerhighwarning'] = strip_unit_and_beautify(dom_info_dict['rxpowerhighwarning'], POWER_UNIT)
dom_info_dict['rxpowerlowwarning'] = strip_unit_and_beautify(dom_info_dict['rxpowerlowwarning'], POWER_UNIT)
dom_info_dict['txbiashighalarm'] = strip_unit_and_beautify(dom_info_dict['txbiashighalarm'], BIAS_UNIT)
dom_info_dict['txbiaslowalarm'] = strip_unit_and_beautify(dom_info_dict['txbiaslowalarm'], BIAS_UNIT)
dom_info_dict['txbiashighwarning'] = strip_unit_and_beautify(dom_info_dict['txbiashighwarning'], BIAS_UNIT)
dom_info_dict['txbiaslowwarning'] = strip_unit_and_beautify(dom_info_dict['txbiaslowwarning'], BIAS_UNIT)
# Update port sfp info in db
def post_port_sfp_info_to_db(logical_port_name, port_mapping, table, transceiver_dict,
stop_event=threading.Event()):
ganged_port = False
ganged_member_num = 1
physical_port_list = port_mapping.logical_port_name_to_physical_port_list(logical_port_name)
if physical_port_list is None:
helper_logger.log_error("No physical ports found for logical port '{}'".format(logical_port_name))
return PHYSICAL_PORT_NOT_EXIST
if len(physical_port_list) > 1:
ganged_port = True
for physical_port in physical_port_list:
if stop_event.is_set():
break
if not _wrapper_get_presence(physical_port):
continue
port_name = get_physical_port_name(logical_port_name, ganged_member_num, ganged_port)
ganged_member_num += 1
try:
port_info_dict = _wrapper_get_transceiver_info(physical_port)
if port_info_dict is not None:
is_replaceable = _wrapper_is_replaceable(physical_port)
transceiver_dict[physical_port] = port_info_dict
fvs = swsscommon.FieldValuePairs(
[('type', port_info_dict['type']),
('hardware_rev', port_info_dict['hardware_rev']),
('serial', port_info_dict['serial']),
('manufacturer', port_info_dict['manufacturer']),
('model', port_info_dict['model']),
('vendor_oui', port_info_dict['vendor_oui']),
('vendor_date', port_info_dict['vendor_date']),
('connector', port_info_dict['connector']),
('encoding', port_info_dict['encoding']),
('ext_identifier', port_info_dict['ext_identifier']),
('ext_rateselect_compliance', port_info_dict['ext_rateselect_compliance']),
('cable_type', port_info_dict['cable_type']),
('cable_length', str(port_info_dict['cable_length'])),
('specification_compliance', port_info_dict['specification_compliance']),
('nominal_bit_rate', str(port_info_dict['nominal_bit_rate'])),
('application_advertisement', port_info_dict['application_advertisement']
if 'application_advertisement' in port_info_dict else 'N/A'),
('is_replaceable', str(is_replaceable)),
('dom_capability', port_info_dict['dom_capability']
if 'dom_capability' in port_info_dict else 'N/A'),
])
table.set(port_name, fvs)
else:
return SFP_EEPROM_NOT_READY
except NotImplementedError:
helper_logger.log_error("This functionality is currently not implemented for this platform")
sys.exit(NOT_IMPLEMENTED_ERROR)
# Update port dom threshold info in db
def post_port_dom_threshold_info_to_db(logical_port_name, port_mapping, table,
stop=threading.Event(), dom_th_info_cache=None):
ganged_port = False
ganged_member_num = 1
physical_port_list = port_mapping.logical_port_name_to_physical_port_list(logical_port_name)
if physical_port_list is None:
helper_logger.log_error("No physical ports found for logical port '{}'".format(logical_port_name))
return PHYSICAL_PORT_NOT_EXIST
if len(physical_port_list) > 1:
ganged_port = True
for physical_port in physical_port_list:
if stop.is_set():
break
if not _wrapper_get_presence(physical_port):
continue
port_name = get_physical_port_name(logical_port_name,
ganged_member_num, ganged_port)
ganged_member_num += 1
try:
if dom_th_info_cache is not None and physical_port in dom_th_info_cache:
# If cache is enabled and there is a cache, no need read from EEPROM, just read from cache
dom_info_dict = dom_th_info_cache[physical_port]
else:
dom_info_dict = _wrapper_get_transceiver_dom_threshold_info(physical_port)
if dom_th_info_cache is not None:
# If cache is enabled, put dom threshold infomation to cache
dom_th_info_cache[physical_port] = dom_info_dict
if dom_info_dict is not None:
beautify_dom_threshold_info_dict(dom_info_dict)
fvs = swsscommon.FieldValuePairs(
[('temphighalarm', dom_info_dict['temphighalarm']),
('temphighwarning', dom_info_dict['temphighwarning']),
('templowalarm', dom_info_dict['templowalarm']),
('templowwarning', dom_info_dict['templowwarning']),
('vcchighalarm', dom_info_dict['vcchighalarm']),
('vcchighwarning', dom_info_dict['vcchighwarning']),
('vcclowalarm', dom_info_dict['vcclowalarm']),
('vcclowwarning', dom_info_dict['vcclowwarning']),
('txpowerhighalarm', dom_info_dict['txpowerhighalarm']),
('txpowerlowalarm', dom_info_dict['txpowerlowalarm']),
('txpowerhighwarning', dom_info_dict['txpowerhighwarning']),
('txpowerlowwarning', dom_info_dict['txpowerlowwarning']),
('rxpowerhighalarm', dom_info_dict['rxpowerhighalarm']),
('rxpowerlowalarm', dom_info_dict['rxpowerlowalarm']),
('rxpowerhighwarning', dom_info_dict['rxpowerhighwarning']),
('rxpowerlowwarning', dom_info_dict['rxpowerlowwarning']),
('txbiashighalarm', dom_info_dict['txbiashighalarm']),
('txbiaslowalarm', dom_info_dict['txbiaslowalarm']),
('txbiashighwarning', dom_info_dict['txbiashighwarning']),
('txbiaslowwarning', dom_info_dict['txbiaslowwarning'])
])
table.set(port_name, fvs)
else:
return SFP_EEPROM_NOT_READY
except NotImplementedError:
helper_logger.log_error("This functionality is currently not implemented for this platform")
sys.exit(NOT_IMPLEMENTED_ERROR)
# Update port dom sensor info in db
def post_port_dom_info_to_db(logical_port_name, port_mapping, table, stop_event=threading.Event(), dom_info_cache=None):
ganged_port = False
ganged_member_num = 1
physical_port_list = port_mapping.logical_port_name_to_physical_port_list(logical_port_name)
if physical_port_list is None:
helper_logger.log_error("No physical ports found for logical port '{}'".format(logical_port_name))
return PHYSICAL_PORT_NOT_EXIST
if len(physical_port_list) > 1:
ganged_port = True
for physical_port in physical_port_list:
if stop_event.is_set():
break
if not _wrapper_get_presence(physical_port):
continue
port_name = get_physical_port_name(logical_port_name, ganged_member_num, ganged_port)
ganged_member_num += 1
try:
if dom_info_cache is not None and physical_port in dom_info_cache:
# If cache is enabled and dom information is in cache, just read from cache, no need read from EEPROM
dom_info_dict = dom_info_cache[physical_port]
else:
dom_info_dict = _wrapper_get_transceiver_dom_info(physical_port)
if dom_info_cache is not None:
# If cache is enabled, put dom information to cache
dom_info_cache[physical_port] = dom_info_dict
if dom_info_dict is not None:
beautify_dom_info_dict(dom_info_dict, physical_port)
if _wrapper_get_sfp_type(physical_port) == 'QSFP_DD':
fvs = swsscommon.FieldValuePairs(
[('temperature', dom_info_dict['temperature']),
('voltage', dom_info_dict['voltage']),
('rx1power', dom_info_dict['rx1power']),
('rx2power', dom_info_dict['rx2power']),
('rx3power', dom_info_dict['rx3power']),
('rx4power', dom_info_dict['rx4power']),
('rx5power', dom_info_dict['rx5power']),
('rx6power', dom_info_dict['rx6power']),
('rx7power', dom_info_dict['rx7power']),
('rx8power', dom_info_dict['rx8power']),
('tx1bias', dom_info_dict['tx1bias']),
('tx2bias', dom_info_dict['tx2bias']),
('tx3bias', dom_info_dict['tx3bias']),
('tx4bias', dom_info_dict['tx4bias']),
('tx5bias', dom_info_dict['tx5bias']),
('tx6bias', dom_info_dict['tx6bias']),
('tx7bias', dom_info_dict['tx7bias']),
('tx8bias', dom_info_dict['tx8bias']),
('tx1power', dom_info_dict['tx1power']),
('tx2power', dom_info_dict['tx2power']),
('tx3power', dom_info_dict['tx3power']),
('tx4power', dom_info_dict['tx4power']),
('tx5power', dom_info_dict['tx5power']),
('tx6power', dom_info_dict['tx6power']),
('tx7power', dom_info_dict['tx7power']),
('tx8power', dom_info_dict['tx8power'])
])
else:
fvs = swsscommon.FieldValuePairs(
[('temperature', dom_info_dict['temperature']),
('voltage', dom_info_dict['voltage']),
('rx1power', dom_info_dict['rx1power']),
('rx2power', dom_info_dict['rx2power']),
('rx3power', dom_info_dict['rx3power']),
('rx4power', dom_info_dict['rx4power']),
('tx1bias', dom_info_dict['tx1bias']),
('tx2bias', dom_info_dict['tx2bias']),
('tx3bias', dom_info_dict['tx3bias']),
('tx4bias', dom_info_dict['tx4bias']),
('tx1power', dom_info_dict['tx1power']),
('tx2power', dom_info_dict['tx2power']),
('tx3power', dom_info_dict['tx3power']),
('tx4power', dom_info_dict['tx4power'])
])
table.set(port_name, fvs)
else:
return SFP_EEPROM_NOT_READY
except NotImplementedError:
helper_logger.log_error("This functionality is currently not implemented for this platform")
sys.exit(NOT_IMPLEMENTED_ERROR)
# Update port dom/sfp info in db
def post_port_sfp_dom_info_to_db(is_warm_start, port_mapping, stop_event=threading.Event()):
# Connect to STATE_DB and create transceiver dom/sfp info tables
transceiver_dict = {}
retry_eeprom_set = set()
# Post all the current interface dom/sfp info to STATE_DB
logical_port_list = port_mapping.logical_port_list
for logical_port_name in logical_port_list:
if stop_event.is_set():
break
# Get the asic to which this port belongs
asic_index = port_mapping.get_asic_id_for_logical_port(logical_port_name)
if asic_index is None:
helper_logger.log_warning("Got invalid asic index for {}, ignored".format(logical_port_name))
continue
rc = post_port_sfp_info_to_db(logical_port_name, port_mapping, xcvr_table_helper.get_int_tbl(asic_index), transceiver_dict, stop_event)
if rc != SFP_EEPROM_NOT_READY:
post_port_dom_info_to_db(logical_port_name, port_mapping, xcvr_table_helper.get_dom_tbl(asic_index), stop_event)
post_port_dom_threshold_info_to_db(logical_port_name, port_mapping, xcvr_table_helper.get_dom_tbl(asic_index), stop_event)
# Do not notify media settings during warm reboot to avoid dataplane traffic impact
if is_warm_start == False:
notify_media_setting(logical_port_name, transceiver_dict, xcvr_table_helper.get_app_port_tbl(asic_index), port_mapping)
transceiver_dict.clear()
else:
retry_eeprom_set.add(logical_port_name)
return retry_eeprom_set
# Delete port dom/sfp info from db
def del_port_sfp_dom_info_from_db(logical_port_name, port_mapping, int_tbl, dom_tbl):
ganged_port = False
ganged_member_num = 1
physical_port_list = port_mapping.logical_port_name_to_physical_port_list(logical_port_name)
if physical_port_list is None:
helper_logger.log_error("No physical ports found for logical port '{}'".format(logical_port_name))
return PHYSICAL_PORT_NOT_EXIST
if len(physical_port_list) > 1:
ganged_port = True
for physical_port in physical_port_list:
port_name = get_physical_port_name(logical_port_name, ganged_member_num, ganged_port)
ganged_member_num += 1
try:
if int_tbl != None:
int_tbl._del(port_name)
if dom_tbl != None:
dom_tbl._del(port_name)
except NotImplementedError:
helper_logger.log_error("This functionality is currently not implemented for this platform")
sys.exit(NOT_IMPLEMENTED_ERROR)
def check_port_in_range(range_str, physical_port):
RANGE_SEPARATOR = '-'
range_list = range_str.split(RANGE_SEPARATOR)
start_num = int(range_list[0].strip())
end_num = int(range_list[1].strip())
if start_num <= physical_port <= end_num:
return True
return False
def get_media_settings_value(physical_port, key):
GLOBAL_MEDIA_SETTINGS_KEY = 'GLOBAL_MEDIA_SETTINGS'
PORT_MEDIA_SETTINGS_KEY = 'PORT_MEDIA_SETTINGS'
DEFAULT_KEY = 'Default'
RANGE_SEPARATOR = '-'
COMMA_SEPARATOR = ','
media_dict = {}
default_dict = {}
# Keys under global media settings can be a list or range or list of ranges
# of physical port numbers. Below are some examples
# 1-32
# 1,2,3,4,5
# 1-4,9-12
if GLOBAL_MEDIA_SETTINGS_KEY in g_dict:
for keys in g_dict[GLOBAL_MEDIA_SETTINGS_KEY]:
if COMMA_SEPARATOR in keys:
port_list = keys.split(COMMA_SEPARATOR)
for port in port_list:
if RANGE_SEPARATOR in port:
if check_port_in_range(port, physical_port):
media_dict = g_dict[GLOBAL_MEDIA_SETTINGS_KEY][keys]
break
elif str(physical_port) == port:
media_dict = g_dict[GLOBAL_MEDIA_SETTINGS_KEY][keys]
break
elif RANGE_SEPARATOR in keys:
if check_port_in_range(keys, physical_port):
media_dict = g_dict[GLOBAL_MEDIA_SETTINGS_KEY][keys]
# If there is a match in the global profile for a media type,
# fetch those values
if key[0] in media_dict:
return media_dict[key[0]]
elif key[1] in media_dict:
return media_dict[key[1]]
elif DEFAULT_KEY in media_dict:
default_dict = media_dict[DEFAULT_KEY]
media_dict = {}
if PORT_MEDIA_SETTINGS_KEY in g_dict:
for keys in g_dict[PORT_MEDIA_SETTINGS_KEY]:
if int(keys) == physical_port:
media_dict = g_dict[PORT_MEDIA_SETTINGS_KEY][keys]
break
if len(media_dict) == 0:
if len(default_dict) != 0:
return default_dict
else:
helper_logger.log_error("Error: No values for physical port '{}'".format(physical_port))
return {}
if key[0] in media_dict:
return media_dict[key[0]]
elif key[1] in media_dict:
return media_dict[key[1]]
elif DEFAULT_KEY in media_dict:
return media_dict[DEFAULT_KEY]
elif len(default_dict) != 0:
return default_dict
else:
if len(default_dict) != 0:
return default_dict
return {}
def get_media_settings_key(physical_port, transceiver_dict):
sup_compliance_str = '10/40G Ethernet Compliance Code'
sup_len_str = 'Length Cable Assembly(m)'
vendor_name_str = transceiver_dict[physical_port]['manufacturer']
vendor_pn_str = transceiver_dict[physical_port]['model']
vendor_key = vendor_name_str.upper() + '-' + vendor_pn_str
media_len = ''
if transceiver_dict[physical_port]['cable_type'] == sup_len_str:
media_len = transceiver_dict[physical_port]['cable_length']
media_compliance_dict_str = transceiver_dict[physical_port]['specification_compliance']
media_compliance_code = ''
media_type = ''
media_key = ''
media_compliance_dict = {}
try:
if _wrapper_get_sfp_type(physical_port) == 'QSFP_DD':
media_compliance_code = media_compliance_dict_str
else:
media_compliance_dict = ast.literal_eval(media_compliance_dict_str)
if sup_compliance_str in media_compliance_dict:
media_compliance_code = media_compliance_dict[sup_compliance_str]
except ValueError as e:
helper_logger.log_error("Invalid value for port {} 'specification_compliance': {}".format(physical_port, media_compliance_dict_str))
media_type = transceiver_dict[physical_port]['type_abbrv_name']
if len(media_type) != 0:
media_key += media_type
if len(media_compliance_code) != 0:
media_key += '-' + media_compliance_code
if _wrapper_get_sfp_type(physical_port) == 'QSFP_DD':
if media_compliance_code == "passive_copper_media_interface":
if media_len != 0:
media_key += '-' + str(media_len) + 'M'
else:
if media_len != 0:
media_key += '-' + str(media_len) + 'M'
else:
media_key += '-' + '*'
return [vendor_key, media_key]
def get_media_val_str_from_dict(media_dict):
LANE_STR = 'lane'
LANE_SEPARATOR = ','
media_str = ''
tmp_dict = {}
for keys in media_dict:
lane_num = int(keys.strip()[len(LANE_STR):])
tmp_dict[lane_num] = media_dict[keys]
for key in range(0, len(tmp_dict)):
media_str += tmp_dict[key]
if key != list(tmp_dict.keys())[-1]:
media_str += LANE_SEPARATOR
return media_str
def get_media_val_str(num_logical_ports, lane_dict, logical_idx):
LANE_STR = 'lane'
logical_media_dict = {}
num_lanes_on_port = len(lane_dict)
# The physical ports has more than one logical port meaning it is
# in breakout mode. So fetch the corresponding lanes from the file
media_val_str = ''
if (num_logical_ports > 1) and \
(num_lanes_on_port >= num_logical_ports):
num_lanes_per_logical_port = num_lanes_on_port//num_logical_ports
start_lane = logical_idx * num_lanes_per_logical_port
for lane_idx in range(start_lane, start_lane +
num_lanes_per_logical_port):
lane_idx_str = LANE_STR + str(lane_idx)
logical_lane_idx_str = LANE_STR + str(lane_idx - start_lane)
logical_media_dict[logical_lane_idx_str] = lane_dict[lane_idx_str]
media_val_str = get_media_val_str_from_dict(logical_media_dict)
else:
media_val_str = get_media_val_str_from_dict(lane_dict)
return media_val_str
def notify_media_setting(logical_port_name, transceiver_dict,
app_port_tbl, port_mapping):
if not g_dict:
return
ganged_port = False
ganged_member_num = 1
physical_port_list = port_mapping.logical_port_name_to_physical_port_list(logical_port_name)
if physical_port_list is None:
helper_logger.log_error("Error: No physical ports found for logical port '{}'".format(logical_port_name))
return PHYSICAL_PORT_NOT_EXIST
if len(physical_port_list) > 1:
ganged_port = True
for physical_port in physical_port_list:
logical_port_list = port_mapping.get_physical_to_logical(physical_port)
num_logical_ports = len(logical_port_list)
logical_idx = logical_port_list.index(logical_port_name)
if not _wrapper_get_presence(physical_port):
helper_logger.log_info("Media {} presence not detected during notify".format(physical_port))
continue
if physical_port not in transceiver_dict:
helper_logger.log_error("Media {} eeprom not populated in transceiver dict".format(physical_port))
continue
port_name = get_physical_port_name(logical_port_name,
ganged_member_num, ganged_port)
ganged_member_num += 1
key = get_media_settings_key(physical_port, transceiver_dict)
media_dict = get_media_settings_value(physical_port, key)
if len(media_dict) == 0:
helper_logger.log_error("Error in obtaining media setting for {}".format(logical_port_name))
return
fvs = swsscommon.FieldValuePairs(len(media_dict))
index = 0
for media_key in media_dict:
if type(media_dict[media_key]) is dict:
media_val_str = get_media_val_str(num_logical_ports,
media_dict[media_key],
logical_idx)
else:
media_val_str = media_dict[media_key]
fvs[index] = (str(media_key), str(media_val_str))
index += 1
app_port_tbl.set(port_name, fvs)
def waiting_time_compensation_with_sleep(time_start, time_to_wait):
time_now = time.time()
time_diff = time_now - time_start
if time_diff < time_to_wait:
time.sleep(time_to_wait - time_diff)
# Update port SFP status table on receiving SFP change event
def update_port_transceiver_status_table(logical_port_name, status_tbl, status, error_descriptions='N/A'):
fvs = swsscommon.FieldValuePairs([('status', status), ('error', error_descriptions)])
status_tbl.set(logical_port_name, fvs)
# Delete port from SFP status table
def delete_port_from_status_table(logical_port_name, status_tbl):
status_tbl._del(logical_port_name)
# Init TRANSCEIVER_STATUS table
def init_port_sfp_status_tbl(port_mapping, stop_event=threading.Event()):
# Init TRANSCEIVER_STATUS table
logical_port_list = port_mapping.logical_port_list
for logical_port_name in logical_port_list:
if stop_event.is_set():
break
# Get the asic to which this port belongs
asic_index = port_mapping.get_asic_id_for_logical_port(logical_port_name)
if asic_index is None:
helper_logger.log_warning("Got invalid asic index for {}, ignored".format(logical_port_name))
continue
physical_port_list = port_mapping.logical_port_name_to_physical_port_list(logical_port_name)
if physical_port_list is None:
helper_logger.log_error("No physical ports found for logical port '{}'".format(logical_port_name))
update_port_transceiver_status_table(logical_port_name, xcvr_table_helper.get_status_tbl(asic_index), sfp_status_helper.SFP_STATUS_REMOVED)
for physical_port in physical_port_list:
if stop_event.is_set():
break
if not _wrapper_get_presence(physical_port):
update_port_transceiver_status_table(logical_port_name, xcvr_table_helper.get_status_tbl(asic_index), sfp_status_helper.SFP_STATUS_REMOVED)
else:
update_port_transceiver_status_table(logical_port_name, xcvr_table_helper.get_status_tbl(asic_index), sfp_status_helper.SFP_STATUS_INSERTED)
#
# Helper classes ===============================================================
#
# Thread wrapper class to update dom info periodically
class DomInfoUpdateTask(object):
def __init__(self, port_mapping):
self.task_thread = None
self.task_stopping_event = threading.Event()
self.port_mapping = copy.deepcopy(port_mapping)
def task_worker(self, y_cable_presence):
helper_logger.log_info("Start DOM monitoring loop")
mux_tbl = {}
dom_info_cache = {}
dom_th_info_cache = {}
sel, asic_context = port_mapping.subscribe_port_config_change()
# Start loop to update dom info in DB periodically
while not self.task_stopping_event.wait(DOM_INFO_UPDATE_PERIOD_SECS):
# Clear the cache at the begin of the loop to make sure it will be clear each time
dom_info_cache.clear()
dom_th_info_cache.clear()
# Handle port change event from main thread
port_mapping.handle_port_config_change(sel, asic_context, self.task_stopping_event, self.port_mapping, helper_logger, self.on_port_config_change)
logical_port_list = self.port_mapping.logical_port_list
for logical_port_name in logical_port_list:
# Get the asic to which this port belongs
asic_index = self.port_mapping.get_asic_id_for_logical_port(logical_port_name)
if asic_index is None:
helper_logger.log_warning("Got invalid asic index for {}, ignored".format(logical_port_name))
continue
if not sfp_status_helper.detect_port_in_error_status(logical_port_name, xcvr_table_helper.get_status_tbl(asic_index)):
post_port_dom_info_to_db(logical_port_name, self.port_mapping, xcvr_table_helper.get_dom_tbl(asic_index), self.task_stopping_event, dom_info_cache=dom_info_cache)
post_port_dom_threshold_info_to_db(logical_port_name, self.port_mapping, xcvr_table_helper.get_dom_tbl(asic_index), self.task_stopping_event, dom_th_info_cache=dom_th_info_cache)
if y_cable_presence[0] is True:
y_cable_helper.check_identifier_presence_and_update_mux_info_entry(xcvr_table_helper.get_state_db(asic_index), mux_tbl, asic_index, logical_port_name)
helper_logger.log_info("Stop DOM monitoring loop")
def task_run(self, y_cable_presence):
if self.task_stopping_event.is_set():
return
self.task_thread = threading.Thread(target=self.task_worker, args=(y_cable_presence,))
self.task_thread.start()
def task_stop(self):
self.task_stopping_event.set()
self.task_thread.join()
def on_port_config_change(self, port_change_event):
if port_change_event.event_type == port_mapping.PortChangeEvent.PORT_REMOVE:
self.on_remove_logical_port(port_change_event)
self.port_mapping.handle_port_change_event(port_change_event)
def on_remove_logical_port(self, port_change_event):
"""Called when a logical port is removed from CONFIG_DB
Args:
port_change_event (object): port change event
"""
# To avoid race condition, remove the entry TRANSCEIVER_DOM_INFO table.
# This thread only update TRANSCEIVER_DOM_INFO table, so we don't have to remove entries from
# TRANSCEIVER_INFO and TRANSCEIVER_STATUS_INFO
del_port_sfp_dom_info_from_db(port_change_event.port_name,
self.port_mapping,
None,
xcvr_table_helper.get_dom_tbl(port_change_event.asic_id))
# Process wrapper class to update sfp state info periodically
class SfpStateUpdateTask(object):
RETRY_EEPROM_READING_INTERVAL = 60
def __init__(self, port_mapping, retry_eeprom_set):
self.task_process = None
self.task_stopping_event = multiprocessing.Event()
self.port_mapping = copy.deepcopy(port_mapping)
# A set to hold those logical port name who fail to read EEPROM
self.retry_eeprom_set = retry_eeprom_set
# To avoid retry EEPROM read too fast, record the last EEPROM read timestamp in this member
self.last_retry_eeprom_time = 0
# A dict to hold SFP error event, for SFP insert/remove event, it is not necessary to cache them
# because _wrapper_get_presence returns the SFP presence status
self.sfp_error_dict = {}
self.sfp_insert_events = {}
def _mapping_event_from_change_event(self, status, port_dict):
"""
mapping from what get_transceiver_change_event returns to event defined in the state machine
the logic is pretty straightforword
"""
if status:
if bool(port_dict):
event = NORMAL_EVENT
else:
event = SYSTEM_BECOME_READY
# here, a simple timeout event whose port_dict is empty is mapped
# into a SYSTEM_BECOME_READY event so that it can be handled
port_dict[EVENT_ON_ALL_SFP] = SYSTEM_BECOME_READY
else:
if EVENT_ON_ALL_SFP in port_dict.keys():
event = port_dict[EVENT_ON_ALL_SFP]
else:
# this should not happen. just for protection
event = SYSTEM_FAIL
port_dict[EVENT_ON_ALL_SFP] = SYSTEM_FAIL
helper_logger.log_debug("mapping from {} {} to {}".format(status, port_dict, event))
return event
def task_worker(self, stopping_event, sfp_error_event, y_cable_presence):
helper_logger.log_info("Start SFP monitoring loop")
transceiver_dict = {}
# Start main loop to listen to the SFP change event.
# The state migrating sequence:
# 1. When the system starts, it is in "INIT" state, calling get_transceiver_change_event
# with RETRY_PERIOD_FOR_SYSTEM_READY_MSECS as timeout for before reach RETRY_TIMES_FOR_SYSTEM_READY
# times, otherwise it will transition to "EXIT" state
# 2. Once 'system_become_ready' returned, the system enters "SYSTEM_READY" state and starts to monitor
# the insertion/removal event of all the SFP modules.
# In this state, receiving any system level event will be treated as an error and cause transition to
# "INIT" state
# 3. When system back to "INIT" state, it will continue to handle system fail event, and retry until reach
# RETRY_TIMES_FOR_SYSTEM_READY times, otherwise it will transition to "EXIT" state
# states definition
# - Initial state: INIT, before received system ready or a normal event
# - Final state: EXIT
# - other state: NORMAL, after has received system-ready or a normal event
# events definition
# - SYSTEM_NOT_READY
# - SYSTEM_BECOME_READY
# -
# - NORMAL_EVENT
# - sfp insertion/removal
# - timeout returned by sfputil.get_change_event with status = true
# - SYSTEM_FAIL
# State transition:
# 1. SYSTEM_NOT_READY
# - INIT
# - retry < RETRY_TIMES_FOR_SYSTEM_READY
# retry ++
# - else
# max retry reached, treat as fatal, transition to EXIT
# - NORMAL
# Treat as an error, transition to INIT
# 2. SYSTEM_BECOME_READY
# - INIT
# transition to NORMAL
# - NORMAL
# log the event
# nop
# 3. NORMAL_EVENT
# - INIT (for the vendors who don't implement SYSTEM_BECOME_READY)
# transition to NORMAL
# handle the event normally
# - NORMAL
# handle the event normally
# 4. SYSTEM_FAIL
# - INIT
# - retry < RETRY_TIMES_FOR_SYSTEM_READY
# retry ++
# - else
# max retry reached, treat as fatal, transition to EXIT
# - NORMAL
# Treat as an error, transition to INIT
# State event next state
# INIT SYSTEM NOT READY INIT / EXIT
# INIT SYSTEM FAIL INIT / EXIT
# INIT SYSTEM BECOME READY NORMAL
# NORMAL SYSTEM BECOME READY NORMAL
# NORMAL SYSTEM FAIL INIT
# INIT/NORMAL NORMAL EVENT NORMAL
# NORMAL SYSTEM NOT READY INIT
# EXIT -
retry = 0
timeout = RETRY_PERIOD_FOR_SYSTEM_READY_MSECS
state = STATE_INIT
sel, asic_context = port_mapping.subscribe_port_config_change()
port_change_event_handler = functools.partial(self.on_port_config_change, stopping_event, y_cable_presence)
while not stopping_event.is_set():
port_mapping.handle_port_config_change(sel, asic_context, stopping_event, self.port_mapping, helper_logger, port_change_event_handler)
# Retry those logical ports whose EEPROM reading failed or timeout when the SFP is inserted
self.retry_eeprom_reading()
next_state = state
time_start = time.time()
# Ensure not to block for any event if sfp insert event is pending
if self.sfp_insert_events:
timeout = SFP_INSERT_EVENT_POLL_PERIOD_MSECS
status, port_dict, error_dict = _wrapper_get_transceiver_change_event(timeout)
if status:
# Soak SFP insert events across various ports (updates port_dict)
_wrapper_soak_sfp_insert_event(self.sfp_insert_events, port_dict)
if not port_dict:
continue
helper_logger.log_debug("Got event {} {} in state {}".format(status, port_dict, state))
event = self._mapping_event_from_change_event(status, port_dict)
if event == SYSTEM_NOT_READY:
if state == STATE_INIT:
# system not ready, wait and retry
if retry >= RETRY_TIMES_FOR_SYSTEM_READY:
helper_logger.log_error("System failed to get ready in {} secs or received system error. Exiting...".format(
(RETRY_PERIOD_FOR_SYSTEM_READY_MSECS/1000)*RETRY_TIMES_FOR_SYSTEM_READY))
next_state = STATE_EXIT
sfp_error_event.set()
else:
retry = retry + 1
# get_transceiver_change_event may return immediately,
# we want the retry expired in expected time period,
# So need to calc the time diff,
# if time diff less that the pre-defined waiting time,
# use sleep() to complete the time.
time_now = time.time()
time_diff = time_now - time_start
if time_diff < RETRY_PERIOD_FOR_SYSTEM_READY_MSECS/1000:
time.sleep(RETRY_PERIOD_FOR_SYSTEM_READY_MSECS/1000 - time_diff)
elif state == STATE_NORMAL:
helper_logger.log_error("Got system_not_ready in normal state, treat as fatal. Exiting...")
next_state = STATE_EXIT
else:
next_state = STATE_EXIT
elif event == SYSTEM_BECOME_READY:
if state == STATE_INIT:
next_state = STATE_NORMAL
helper_logger.log_info("Got system_become_ready in init state, transition to normal state")
elif state == STATE_NORMAL:
helper_logger.log_info("Got system_become_ready in normal state, ignored")
else:
next_state = STATE_EXIT
elif event == NORMAL_EVENT:
if state == STATE_NORMAL or state == STATE_INIT:
if state == STATE_INIT:
next_state = STATE_NORMAL
# this is the originally logic that handled the transceiver change event
# this can be reached in two cases:
# 1. the state has been normal before got the event
# 2. the state was init and transition to normal after got the event.
# this is for the vendors who don't implement "system_not_ready/system_becom_ready" logic
logical_port_dict = {}
for key, value in port_dict.items():
# SFP error event should be cached because: when a logical port is created, there is no way to
# detect the SFP error by platform API.
if value != sfp_status_helper.SFP_STATUS_INSERTED and value != sfp_status_helper.SFP_STATUS_REMOVED:
self.sfp_error_dict[key] = (value, error_dict)
else:
self.sfp_error_dict.pop(key, None)
logical_port_list = self.port_mapping.get_physical_to_logical(key)
if logical_port_list is None:
helper_logger.log_warning("Got unknown FP port index {}, ignored".format(key))
continue
for logical_port in logical_port_list:
logical_port_dict[logical_port] = value
# Get the asic to which this port belongs
asic_index = self.port_mapping.get_asic_id_for_logical_port(logical_port)
if asic_index is None:
helper_logger.log_warning("Got invalid asic index for {}, ignored".format(logical_port))
continue
if value == sfp_status_helper.SFP_STATUS_INSERTED:
helper_logger.log_info("Got SFP inserted event")
# A plugin event will clear the error state.
update_port_transceiver_status_table(
logical_port, xcvr_table_helper.get_status_tbl(asic_index), sfp_status_helper.SFP_STATUS_INSERTED)
helper_logger.log_info("receive plug in and update port sfp status table.")
rc = post_port_sfp_info_to_db(logical_port, self.port_mapping, xcvr_table_helper.get_intf_tbl(asic_index), transceiver_dict)
# If we didn't get the sfp info, assuming the eeprom is not ready, give a try again.
if rc == SFP_EEPROM_NOT_READY:
helper_logger.log_warning("SFP EEPROM is not ready. One more try...")
time.sleep(TIME_FOR_SFP_READY_SECS)
rc = post_port_sfp_info_to_db(logical_port, self.port_mapping, xcvr_table_helper.get_intf_tbl(asic_index), transceiver_dict)
if rc == SFP_EEPROM_NOT_READY:
# If still failed to read EEPROM, put it to retry set
self.retry_eeprom_set.add(logical_port)
if rc != SFP_EEPROM_NOT_READY:
post_port_dom_info_to_db(logical_port, self.port_mapping, xcvr_table_helper.get_dom_tbl(asic_index))
post_port_dom_threshold_info_to_db(logical_port, self.port_mapping, xcvr_table_helper.get_dom_tbl(asic_index))
notify_media_setting(logical_port, transceiver_dict, xcvr_table_helper.get_app_port_tbl(asic_index), self.port_mapping)
transceiver_dict.clear()
elif value == sfp_status_helper.SFP_STATUS_REMOVED:
helper_logger.log_info("Got SFP removed event")
update_port_transceiver_status_table(
logical_port, xcvr_table_helper.get_status_tbl(asic_index), sfp_status_helper.SFP_STATUS_REMOVED)
helper_logger.log_info("receive plug out and pdate port sfp status table.")
del_port_sfp_dom_info_from_db(logical_port, self.port_mapping, xcvr_table_helper.get_intf_tbl(asic_index), xcvr_table_helper.get_dom_tbl(asic_index))
else:
try:
error_bits = int(value)
helper_logger.log_info("Got SFP error event {}".format(value))
error_descriptions = sfp_status_helper.fetch_generic_error_description(error_bits)
if sfp_status_helper.has_vendor_specific_error(error_bits):
if error_dict:
vendor_specific_error_description = error_dict.get(key)
else:
vendor_specific_error_description = _wrapper_get_sfp_error_description(key)
error_descriptions.append(vendor_specific_error_description)
# Add error info to database
# Any existing error will be replaced by the new one.
update_port_transceiver_status_table(logical_port, xcvr_table_helper.get_status_tbl(asic_index), value, '|'.join(error_descriptions))
helper_logger.log_info("Receive error update port sfp status table.")
# In this case EEPROM is not accessible. The DOM info will be removed since it can be out-of-date.
# The interface info remains in the DB since it is static.
if sfp_status_helper.is_error_block_eeprom_reading(error_bits):
del_port_sfp_dom_info_from_db(logical_port, None, xcvr_table_helper.get_dom_tbl(asic_index))
except (TypeError, ValueError) as e:
helper_logger.log_error("Got unrecognized event {}, ignored".format(value))
# Since ports could be connected to a mux cable, if there is a change event process the change for being on a Y cable Port
y_cable_helper.change_ports_status_for_y_cable_change_event(
logical_port_dict, self.port_mapping, y_cable_presence, stopping_event)
else:
next_state = STATE_EXIT
elif event == SYSTEM_FAIL:
if state == STATE_INIT:
# To overcome a case that system is only temporarily not available,
# when get system fail event will wait and retry for a certain period,
# if system recovered in this period xcvrd will transit to INIT state
# and continue run, if can not recover then exit.
if retry >= RETRY_TIMES_FOR_SYSTEM_FAIL:
helper_logger.log_error("System failed to recover in {} secs. Exiting...".format(
(RETRY_PERIOD_FOR_SYSTEM_FAIL_MSECS/1000)*RETRY_TIMES_FOR_SYSTEM_FAIL))
next_state = STATE_EXIT
sfp_error_event.set()
else:
retry = retry + 1
waiting_time_compensation_with_sleep(time_start, RETRY_PERIOD_FOR_SYSTEM_FAIL_MSECS/1000)
elif state == STATE_NORMAL:
helper_logger.log_error("Got system_fail in normal state, treat as error, transition to INIT...")
next_state = STATE_INIT
timeout = RETRY_PERIOD_FOR_SYSTEM_FAIL_MSECS
retry = 0
else:
next_state = STATE_EXIT
else:
helper_logger.log_warning("Got unknown event {} on state {}.".format(event, state))
if next_state != state:
helper_logger.log_debug("State transition from {} to {}".format(state, next_state))
state = next_state
if next_state == STATE_EXIT:
os.kill(os.getppid(), signal.SIGTERM)
break
elif next_state == STATE_NORMAL:
timeout = STATE_MACHINE_UPDATE_PERIOD_MSECS
helper_logger.log_info("Stop SFP monitoring loop")
def task_run(self, sfp_error_event, y_cable_presence):
if self.task_stopping_event.is_set():
return
self.task_process = multiprocessing.Process(target=self.task_worker, args=(
self.task_stopping_event, sfp_error_event, y_cable_presence))
self.task_process.start()
def task_stop(self):
self.task_stopping_event.set()
os.kill(self.task_process.pid, signal.SIGKILL)
def on_port_config_change(self, stopping_event, y_cable_presence, port_change_event):
if port_change_event.event_type == port_mapping.PortChangeEvent.PORT_REMOVE:
self.on_remove_logical_port(port_change_event)
# Update y_cable related database once a logical port is removed
y_cable_helper.change_ports_status_for_y_cable_change_event(
{port_change_event.port_name:sfp_status_helper.SFP_STATUS_REMOVED},
self.port_mapping,
y_cable_presence,
stopping_event)
self.port_mapping.handle_port_change_event(port_change_event)
elif port_change_event.event_type == port_mapping.PortChangeEvent.PORT_ADD:
self.port_mapping.handle_port_change_event(port_change_event)
logical_port_event_dict = self.on_add_logical_port(port_change_event)
# Update y_cable related database once a logical port is added
y_cable_helper.change_ports_status_for_y_cable_change_event(
logical_port_event_dict,
self.port_mapping,
y_cable_presence,
stopping_event)
def on_remove_logical_port(self, port_change_event):
"""Called when a logical port is removed from CONFIG_DB.
Args:
port_change_event (object): port change event
"""
# To avoid race condition, remove the entry TRANSCEIVER_DOM_INFO, TRANSCEIVER_STATUS_INFO and TRANSCEIVER_INFO table.
# The operation to remove entry from TRANSCEIVER_DOM_INFO is duplicate with DomInfoUpdateTask.on_remove_logical_port,
# but it is necessary because TRANSCEIVER_DOM_INFO is also updated in this sub process when a new SFP is inserted.
del_port_sfp_dom_info_from_db(port_change_event.port_name,
self.port_mapping,
xcvr_table_helper.get_intf_tbl(port_change_event.asic_id),
xcvr_table_helper.get_dom_tbl(port_change_event.asic_id))
delete_port_from_status_table(port_change_event.port_name, xcvr_table_helper.get_status_tbl(port_change_event.asic_id))
# The logical port has been removed, no need retry EEPROM reading
if port_change_event.port_name in self.retry_eeprom_set:
self.retry_eeprom_set.remove(port_change_event.port_name)
def on_add_logical_port(self, port_change_event):
"""Called when a logical port is added
Args:
port_change_event (object): port change event
Returns:
dict: key is logical port name, value is SFP status
"""
# A logical port is created. There could be 3 cases:
# 1. SFP information is already in DB, which means that a logical port with the same physical index is in DB before.
# Need copy the data from existing logical port and insert it into TRANSCEIVER_DOM_INFO, TRANSCEIVER_STATUS_INFO
# and TRANSCEIVER_INFO table.
# 2. SFP information is not in DB and SFP is present with no SFP error. Need query the SFP status by platform API and
# insert the data to DB.
# 3. SFP information is not in DB and SFP is present with SFP error. If the SFP error does not block EEPROM reading,
# just query transceiver information and DOM sensor information via platform API and update the data to DB; otherwise,
# just update TRANSCEIVER_STATUS table with the error.
# 4. SFP information is not in DB and SFP is not present. Only update TRANSCEIVER_STATUS_INFO table.
logical_port_event_dict = {}
sfp_status = None
sibling_port = None
status_tbl = xcvr_table_helper.get_status_tbl(port_change_event.asic_id)
int_tbl = xcvr_table_helper.get_intf_tbl(port_change_event.asic_id)
dom_tbl = xcvr_table_helper.get_dom_tbl(port_change_event.asic_id)
physical_port_list = self.port_mapping.logical_port_name_to_physical_port_list(port_change_event.port_name)
# Try to find a logical port with same physical index in DB
for physical_port in physical_port_list:
logical_port_list = self.port_mapping.get_physical_to_logical(physical_port)
if not logical_port_list:
continue
for logical_port in logical_port_list:
found, sfp_status = status_tbl.get(logical_port)
if found:
sibling_port = logical_port
break
if sfp_status:
break
if sfp_status:
# SFP information is in DB
status_tbl.set(port_change_event.port_name, sfp_status)
logical_port_event_dict[port_change_event.port_name] = dict(sfp_status)['status']
found, sfp_info = int_tbl.get(sibling_port)
if found:
int_tbl.set(port_change_event.port_name, sfp_info)
found, dom_info = dom_tbl.get(sibling_port)
if found:
dom_tbl.set(port_change_event.port_name, dom_info)
else:
error_description = 'N/A'
status = None
read_eeprom = True
if port_change_event.port_index in self.sfp_error_dict:
value, error_dict = self.sfp_error_dict[port_change_event.port_index]
status = value
error_bits = int(value)
helper_logger.log_info("Got SFP error event {}".format(value))
error_descriptions = sfp_status_helper.fetch_generic_error_description(error_bits)
if sfp_status_helper.has_vendor_specific_error(error_bits):
if error_dict:
vendor_specific_error_description = error_dict.get(port_change_event.port_index)
else:
vendor_specific_error_description = _wrapper_get_sfp_error_description(port_change_event.port_index)
error_descriptions.append(vendor_specific_error_description)
error_description = '|'.join(error_descriptions)
helper_logger.log_info("Receive error update port sfp status table.")
if sfp_status_helper.is_error_block_eeprom_reading(error_bits):
read_eeprom = False
# SFP information not in DB
if _wrapper_get_presence(port_change_event.port_index) and read_eeprom:
logical_port_event_dict[port_change_event.port_name] = sfp_status_helper.SFP_STATUS_INSERTED
transceiver_dict = {}
status = sfp_status_helper.SFP_STATUS_INSERTED if not status else status
rc = post_port_sfp_info_to_db(port_change_event.port_name, self.port_mapping, int_tbl, transceiver_dict)
if rc == SFP_EEPROM_NOT_READY:
# Failed to read EEPROM, put it to retry set
self.retry_eeprom_set.add(port_change_event.port_name)
else:
post_port_dom_info_to_db(port_change_event.port_name, self.port_mapping, dom_tbl)
post_port_dom_threshold_info_to_db(port_change_event.port_name, self.port_mapping, dom_tbl)
notify_media_setting(port_change_event.port_name, transceiver_dict, xcvr_table_helper.get_app_port_tbl(port_change_event.asic_id), self.port_mapping)
else:
status = sfp_status_helper.SFP_STATUS_REMOVED if not status else status
logical_port_event_dict[port_change_event.port_name] = status
update_port_transceiver_status_table(port_change_event.port_name, status_tbl, status, error_description)
return logical_port_event_dict
def retry_eeprom_reading(self):
"""Retry EEPROM reading, if retry succeed, remove the logical port from the retry set
"""
if not self.retry_eeprom_set:
return
# Retry eeprom with an interval RETRY_EEPROM_READING_INTERVAL. No need to put sleep here
# because _wrapper_get_transceiver_change_event has a timeout argument.
now = time.time()
if now - self.last_retry_eeprom_time < self.RETRY_EEPROM_READING_INTERVAL:
return
self.last_retry_eeprom_time = now
transceiver_dict = {}
retry_success_set = set()
for logical_port in self.retry_eeprom_set:
asic_index = self.port_mapping.get_asic_id_for_logical_port(logical_port)
rc = post_port_sfp_info_to_db(logical_port, self.port_mapping, xcvr_table_helper.get_intf_tbl(asic_index), transceiver_dict)
if rc != SFP_EEPROM_NOT_READY:
post_port_dom_info_to_db(logical_port, self.port_mapping, xcvr_table_helper.get_dom_tbl(asic_index))
post_port_dom_threshold_info_to_db(logical_port, self.port_mapping, xcvr_table_helper.get_dom_tbl(asic_index))
notify_media_setting(logical_port, transceiver_dict, xcvr_table_helper.get_app_port_tbl(asic_index), self.port_mapping)
transceiver_dict.clear()
retry_success_set.add(logical_port)
# Update retry EEPROM set
self.retry_eeprom_set -= retry_success_set
#
# Daemon =======================================================================
#
class DaemonXcvrd(daemon_base.DaemonBase):
def __init__(self, log_identifier):
super(DaemonXcvrd, self).__init__(log_identifier)
self.stop_event = threading.Event()
self.sfp_error_event = multiprocessing.Event()
self.y_cable_presence = [False]
# Signal handler
def signal_handler(self, sig, frame):
if sig == signal.SIGHUP:
self.log_info("Caught SIGHUP - ignoring...")
elif sig == signal.SIGINT:
self.log_info("Caught SIGINT - exiting...")
self.stop_event.set()
elif sig == signal.SIGTERM:
self.log_info("Caught SIGTERM - exiting...")
self.stop_event.set()
else:
self.log_warning("Caught unhandled signal '" + sig + "'")
# Wait for port config is done
def wait_for_port_config_done(self, namespace):
# Connect to APPL_DB and subscribe to PORT table notifications
appl_db = daemon_base.db_connect("APPL_DB", namespace=namespace)
sel = swsscommon.Select()
port_tbl = swsscommon.SubscriberStateTable(appl_db, swsscommon.APP_PORT_TABLE_NAME)
sel.addSelectable(port_tbl)
# Make sure this daemon started after all port configured
while not self.stop_event.is_set():
(state, c) = sel.select(port_mapping.SELECT_TIMEOUT_MSECS)
if state == swsscommon.Select.TIMEOUT:
continue
if state != swsscommon.Select.OBJECT:
self.log_warning("sel.select() did not return swsscommon.Select.OBJECT")
continue
(key, op, fvp) = port_tbl.pop()
if key in ["PortConfigDone", "PortInitDone"]:
break
def load_media_settings(self):
global g_dict
(platform_path, _) = device_info.get_paths_to_platform_and_hwsku_dirs()
media_settings_file_path = os.path.join(platform_path, "media_settings.json")
if not os.path.isfile(media_settings_file_path):
self.log_info("xcvrd: No media file exists")
return {}
with open(media_settings_file_path, "r") as media_file:
g_dict = json.load(media_file)
# Initialize daemon
def init(self):
global platform_sfputil
global platform_chassis
global xcvr_table_helper
self.log_info("Start daemon init...")
# Load new platform api class
try:
import sonic_platform.platform
import sonic_platform_base.sonic_sfp.sfputilhelper
platform_chassis = sonic_platform.platform.Platform().get_chassis()
self.log_info("chassis loaded {}".format(platform_chassis))
# we have to make use of sfputil for some features
# even though when new platform api is used for all vendors.
# in this sense, we treat it as a part of new platform api.
# we have already moved sfputil to sonic_platform_base
# which is the root of new platform api.
platform_sfputil = sonic_platform_base.sonic_sfp.sfputilhelper.SfpUtilHelper()
except Exception as e:
self.log_warning("Failed to load chassis due to {}".format(repr(e)))
# Load platform specific sfputil class
if platform_chassis is None or platform_sfputil is None:
try:
platform_sfputil = self.load_platform_util(PLATFORM_SPECIFIC_MODULE_NAME, PLATFORM_SPECIFIC_CLASS_NAME)
except Exception as e:
self.log_error("Failed to load sfputil: {}".format(str(e)), True)
sys.exit(SFPUTIL_LOAD_ERROR)
if multi_asic.is_multi_asic():
# Load the namespace details first from the database_global.json file.
swsscommon.SonicDBConfig.initializeGlobalConfig()
# Initialize xcvr table helper
xcvr_table_helper = XcvrTableHelper()
self.load_media_settings()
warmstart = swsscommon.WarmStart()
warmstart.initialize("xcvrd", "pmon")
warmstart.checkWarmStart("xcvrd", "pmon", False)
is_warm_start = warmstart.isWarmStart()
# Make sure this daemon started after all port configured
self.log_info("Wait for port config is done")
for namespace in xcvr_table_helper.namespaces:
self.wait_for_port_config_done(namespace)
port_mapping_data = port_mapping.get_port_mapping()
# Post all the current interface dom/sfp info to STATE_DB
self.log_info("Post all port DOM/SFP info to DB")
retry_eeprom_set = post_port_sfp_dom_info_to_db(is_warm_start, port_mapping_data, self.stop_event)
# Init port sfp status table
self.log_info("Init port sfp status table")
init_port_sfp_status_tbl(port_mapping_data, self.stop_event)
# Init port y_cable status table
y_cable_helper.init_ports_status_for_y_cable(
platform_sfputil, platform_chassis, self.y_cable_presence, port_mapping_data, self.stop_event)
return port_mapping_data, retry_eeprom_set
# Deinitialize daemon
def deinit(self):
self.log_info("Start daemon deinit...")
# Delete all the information from DB and then exit
port_mapping_data = port_mapping.get_port_mapping()
logical_port_list = port_mapping_data.logical_port_list
for logical_port_name in logical_port_list:
# Get the asic to which this port belongs
asic_index = port_mapping_data.get_asic_id_for_logical_port(logical_port_name)
if asic_index is None:
helper_logger.log_warning("Got invalid asic index for {}, ignored".format(logical_port_name))
continue
del_port_sfp_dom_info_from_db(logical_port_name, port_mapping_data, xcvr_table_helper.get_int_tbl(asic_index), xcvr_table_helper.get_dom_tbl(asic_index))
delete_port_from_status_table(logical_port_name, xcvr_table_helper.get_status_tbl(asic_index))
if self.y_cable_presence[0] is True:
y_cable_helper.delete_ports_status_for_y_cable(port_mapping_data)
del globals()['platform_chassis']
# Run daemon
def run(self):
self.log_info("Starting up...")
# Start daemon initialization sequence
port_mapping_data, retry_eeprom_set = self.init()
# Start the dom sensor info update thread
dom_info_update = DomInfoUpdateTask(port_mapping_data)
dom_info_update.task_run(self.y_cable_presence)
# Start the sfp state info update process
sfp_state_update = SfpStateUpdateTask(port_mapping_data, retry_eeprom_set)
sfp_state_update.task_run(self.sfp_error_event, self.y_cable_presence)
# Start the Y-cable state info update process if Y cable presence established
y_cable_state_update = None
if self.y_cable_presence[0] is True:
y_cable_state_update = y_cable_helper.YCableTableUpdateTask(port_mapping_data)
y_cable_state_update.task_run()
# Start main loop
self.log_info("Start daemon main loop")
self.stop_event.wait()
self.log_info("Stop daemon main loop")
# Stop the dom sensor info update thread
dom_info_update.task_stop()
# Stop the sfp state info update process
sfp_state_update.task_stop()
# Stop the Y-cable state info update process
if self.y_cable_presence[0] is True:
y_cable_state_update.task_stop()
# Start daemon deinitialization sequence
self.deinit()
self.log_info("Shutting down...")
if self.sfp_error_event.is_set():
sys.exit(SFP_SYSTEM_ERROR)
class XcvrTableHelper:
def __init__(self):
self.int_tbl, self.dom_tbl, self.status_tbl, self.app_port_tbl = {}, {}, {}, {}
self.state_db = {}
self.namespaces = multi_asic.get_front_end_namespaces()
for namespace in self.namespaces:
asic_id = multi_asic.get_asic_index_from_namespace(namespace)
self.state_db[asic_id] = daemon_base.db_connect("STATE_DB", namespace)
self.int_tbl[asic_id] = swsscommon.Table(self.state_db[asic_id], TRANSCEIVER_INFO_TABLE)
self.dom_tbl[asic_id] = swsscommon.Table(self.state_db[asic_id], TRANSCEIVER_DOM_SENSOR_TABLE)
self.status_tbl[asic_id] = swsscommon.Table(self.state_db[asic_id], TRANSCEIVER_STATUS_TABLE)
appl_db = daemon_base.db_connect("APPL_DB", namespace)
self.app_port_tbl[asic_id] = swsscommon.ProducerStateTable(appl_db, swsscommon.APP_PORT_TABLE_NAME)
def get_intf_tbl(self, asic_id):
return self.int_tbl[asic_id]
def get_dom_tbl(self, asic_id):
return self.dom_tbl[asic_id]
def get_status_tbl(self, asic_id):
return self.status_tbl[asic_id]
def get_app_port_tbl(self, asic_id):
return self.app_port_tbl[asic_id]
def get_state_db(self, asic_id):
return self.state_db[asic_id]
#
# Main =========================================================================
#
# This is our main entry point for xcvrd script
def main():
xcvrd = DaemonXcvrd(SYSLOG_IDENTIFIER)
xcvrd.run()
if __name__ == '__main__':
main()
|
MainFrame.py | # -*- coding: utf-8 -*-
#
import os
import sys
import wx
import threading
from form.panel.FilePanel import FilePanel
from form.panel.ParamPanel import ParamPanel
from form.panel.ParamAdvancePanel import ParamAdvancePanel
from form.panel.ParamBonePanel import ParamBonePanel
from utils import MFormUtils, MFileUtils # noqa
from utils.MLogger import MLogger # noqa
from form.worker.ExportWorkerThread import ExportWorkerThread
from form.worker.LoadWorkerThread import LoadWorkerThread
if os.name == "nt":
import winsound # Windows版のみインポート
logger = MLogger(__name__)
# イベント
(TailorThreadEvent, EVT_TAILOR_THREAD) = wx.lib.newevent.NewEvent()
(LoadThreadEvent, EVT_LOAD_THREAD) = wx.lib.newevent.NewEvent()
class MainFrame(wx.Frame):
def __init__(self, parent, mydir_path: str, version_name: str, logging_level: int, is_saving: bool, is_out_log: bool):
self.version_name = version_name
self.logging_level = logging_level
self.is_out_log = is_out_log
self.is_saving = is_saving
self.mydir_path = mydir_path
self.elapsed_time = 0
self.popuped_finger_warning = False
self.worker = None
self.load_worker = None
self.my_program = 'PmxTailor'
frame_size = wx.Size(600, 700)
if logger.target_lang == "en_US":
frame_size = wx.Size(800, 700)
elif logger.target_lang == "zh_CN":
frame_size = wx.Size(700, 700)
frame_title = logger.transtext(f'{self.my_program} ローカル版') + f' {self.version_name}'
wx.Frame.__init__(self, parent, id=wx.ID_ANY, title=frame_title, \
pos=wx.DefaultPosition, size=frame_size, style=wx.DEFAULT_FRAME_STYLE | wx.TAB_TRAVERSAL)
# ファイル履歴読み込み
self.file_hitories = MFileUtils.read_history(self.mydir_path)
# ---------------------------------------------
self.SetSizeHints(wx.DefaultSize, wx.DefaultSize)
bSizer1 = wx.BoxSizer(wx.VERTICAL)
self.note_ctrl = wx.Notebook(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, 0)
if self.logging_level == MLogger.FULL or self.logging_level == MLogger.DEBUG_FULL:
# フルデータの場合
self.note_ctrl.SetBackgroundColour("RED")
elif self.logging_level == MLogger.DEBUG:
# テスト(デバッグ版)の場合
self.note_ctrl.SetBackgroundColour("CORAL")
elif self.logging_level == MLogger.TIMER:
# 時間計測の場合
self.note_ctrl.SetBackgroundColour("YELLOW")
elif not is_saving:
# ログありの場合、色変え
self.note_ctrl.SetBackgroundColour("BLUE")
elif is_out_log:
# ログありの場合、色変え
self.note_ctrl.SetBackgroundColour("AQUAMARINE")
else:
self.note_ctrl.SetBackgroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNSHADOW))
# ---------------------------------------------
# ファイルタブ
self.file_panel_ctrl = FilePanel(self, self.note_ctrl, 0)
self.note_ctrl.AddPage(self.file_panel_ctrl, logger.transtext("ファイル"), False)
# パラ調整タブ
self.simple_param_panel_ctrl = ParamPanel(self, self.note_ctrl, 1)
self.note_ctrl.AddPage(self.simple_param_panel_ctrl, logger.transtext("パラ調整"), False)
# パラ調整(詳細)タブ
self.advance_param_panel_ctrl = ParamAdvancePanel(self, self.note_ctrl, 2)
self.note_ctrl.AddPage(self.advance_param_panel_ctrl, logger.transtext("パラ調整(詳細)"), False)
# パラ調整(ボーン)タブ
self.bone_param_panel_ctrl = ParamBonePanel(self, self.note_ctrl, 3)
self.note_ctrl.AddPage(self.bone_param_panel_ctrl, logger.transtext("パラ調整(ボーン)"), False)
# ---------------------------------------------
# タブ押下時の処理
self.note_ctrl.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED, self.on_tab_change)
# イベントバインド
self.Bind(EVT_TAILOR_THREAD, self.on_exec_result)
self.Bind(EVT_LOAD_THREAD, self.on_load_result)
# ---------------------------------------------
bSizer1.Add(self.note_ctrl, 1, wx.EXPAND, 5)
# デフォルトの出力先はファイルタブのコンソール
sys.stdout = self.file_panel_ctrl.console_ctrl
self.SetSizer(bSizer1)
self.Layout()
self.Centre(wx.BOTH)
def on_idle(self, event: wx.Event):
pass
def on_tab_change(self, event: wx.Event):
if self.file_panel_ctrl.is_fix_tab:
self.note_ctrl.ChangeSelection(self.file_panel_ctrl.tab_idx)
event.Skip()
return
if self.note_ctrl.GetSelection() == self.simple_param_panel_ctrl.tab_idx:
# コンソールクリア
self.file_panel_ctrl.console_ctrl.Clear()
wx.GetApp().Yield()
# 一旦ファイルタブに固定
self.note_ctrl.SetSelection(self.file_panel_ctrl.tab_idx)
self.file_panel_ctrl.fix_tab()
logger.info(logger.transtext("パラ調整タブ表示準備開始\nファイル読み込み処理を実行します。少しお待ちください...."), decoration=MLogger.DECORATION_BOX)
# 読み込み処理実行
self.load(event, is_param=True)
if self.note_ctrl.GetSelection() == self.advance_param_panel_ctrl.tab_idx:
# コンソールクリア
self.file_panel_ctrl.console_ctrl.Clear()
wx.GetApp().Yield()
# 一旦ファイルタブに固定
self.note_ctrl.SetSelection(self.file_panel_ctrl.tab_idx)
self.file_panel_ctrl.fix_tab()
logger.info(logger.transtext("パラ調整(詳細)タブ表示準備開始\nファイル読み込み処理を実行します。少しお待ちください...."), decoration=MLogger.DECORATION_BOX)
# 読み込み処理実行
self.load(event, is_param_advance=True)
if self.note_ctrl.GetSelection() == self.bone_param_panel_ctrl.tab_idx:
# コンソールクリア
self.file_panel_ctrl.console_ctrl.Clear()
wx.GetApp().Yield()
# 一旦ファイルタブに固定
self.note_ctrl.SetSelection(self.file_panel_ctrl.tab_idx)
self.file_panel_ctrl.fix_tab()
logger.info(logger.transtext("パラ調整(ボーン)タブ表示準備開始\nファイル読み込み処理を実行します。少しお待ちください...."), decoration=MLogger.DECORATION_BOX)
# 読み込み処理実行
self.load(event, is_param_bone=True)
# タブ移動可
def release_tab(self):
self.file_panel_ctrl.release_tab()
self.simple_param_panel_ctrl.release_tab()
self.advance_param_panel_ctrl.release_tab()
self.bone_param_panel_ctrl.release_tab()
# フォーム入力可
def enable(self):
self.file_panel_ctrl.enable()
def show_worked_time(self):
# 経過秒数を時分秒に変換
td_m, td_s = divmod(self.elapsed_time, 60)
if td_m == 0:
if logger.target_lang == "ja_JP":
worked_time = "{0:02d}秒".format(int(td_s))
else:
worked_time = "{0:02d}s".format(int(td_s))
else:
if logger.target_lang == "ja_JP":
worked_time = "{0:02d}分{1:02d}秒".format(int(td_m), int(td_s))
else:
worked_time = "{0:02d}m{1:02d}s".format(int(td_m), int(td_s))
return worked_time
# ファイルセットの入力可否チェック
def is_valid(self):
result = True
result = self.file_panel_ctrl.org_model_file_ctrl.is_valid() and result
return result
# 読み込み
def load(self, event, is_exec=False, is_param=False, is_param_advance=False, is_param_bone=False):
# フォーム無効化
self.file_panel_ctrl.disable()
# タブ固定
self.file_panel_ctrl.fix_tab()
self.elapsed_time = 0
result = True
result = self.is_valid() and result
if not result:
if is_param or is_param_advance or is_param_bone:
tab_name = logger.transtext("パラ調整")
if is_param_advance:
tab_name = logger.transtext("パラ調整(詳細)")
if is_param_bone:
tab_name = logger.transtext("パラ調整(ボーン)")
# 読み込み出来なかったらエラー
logger.error("「ファイル」タブで対象モデルファイルパスが指定されていないため、「%s」タブが開けません。" \
+ "\n既に指定済みの場合、現在読み込み中の可能性があります。" \
+ "\n「■読み込み成功」のログが出てから、「%s」タブを開いてください。", tab_name, tab_name, decoration=MLogger.DECORATION_BOX)
# タブ移動可
self.release_tab()
# フォーム有効化
self.enable()
return result
# 読み込み開始
if self.load_worker:
logger.error(logger.transtext("まだ処理が実行中です。終了してから再度実行してください。"), decoration=MLogger.DECORATION_BOX)
else:
# 停止ボタンに切り替え
self.file_panel_ctrl.export_btn_ctrl.SetLabel(logger.transtext("読み込み処理停止"))
self.file_panel_ctrl.export_btn_ctrl.Enable()
# 別スレッドで実行
self.load_worker = LoadWorkerThread(self, LoadThreadEvent, is_exec, is_param, is_param_advance, is_param_bone)
self.load_worker.start()
return result
def is_loaded_valid(self):
return len(self.simple_param_panel_ctrl.get_param_options(is_show_error=True)) > 0
# 読み込み完了処理
def on_load_result(self, event: wx.Event):
self.elapsed_time = event.elapsed_time
# タブ移動可
self.release_tab()
# フォーム有効化
self.enable()
# ワーカー終了
self.load_worker = None
# プログレス非表示
self.file_panel_ctrl.gauge_ctrl.SetValue(0)
# チェックボタンに切り替え
self.file_panel_ctrl.export_btn_ctrl.SetLabel(logger.transtext("PmxTailor実行"))
self.file_panel_ctrl.export_btn_ctrl.Enable()
if not event.result:
# 終了音を鳴らす
self.sound_finish()
# タブ移動可
self.release_tab()
# フォーム有効化
self.enable()
event.Skip()
return False
logger.info(logger.transtext("ファイルデータ読み込みが完了しました"), decoration=MLogger.DECORATION_BOX, title="OK")
if event.is_exec:
if not self.is_loaded_valid():
# 終了音を鳴らす
self.sound_finish()
# タブ移動可
self.release_tab()
# フォーム有効化
self.enable()
event.Skip()
return False
# そのまま実行する場合、変換実行処理に遷移
# フォーム無効化
self.file_panel_ctrl.disable()
# タブ固定
self.file_panel_ctrl.fix_tab()
if self.worker:
logger.error(logger.transtext("まだ処理が実行中です。終了してから再度実行してください。"), decoration=MLogger.DECORATION_BOX)
else:
# 停止ボタンに切り替え
self.file_panel_ctrl.export_btn_ctrl.SetLabel(self.file_panel_ctrl.txt_stop)
self.file_panel_ctrl.export_btn_ctrl.Enable()
# 別スレッドで実行
self.worker = ExportWorkerThread(self, TailorThreadEvent, self.is_saving, self.is_out_log)
self.worker.start()
elif event.is_param:
# パラ調整タブを開く場合、パラ調整タブ初期化処理実行
self.note_ctrl.ChangeSelection(self.simple_param_panel_ctrl.tab_idx)
self.simple_param_panel_ctrl.initialize(event)
elif event.is_param_advance:
# パラ調整(詳細)タブを開く場合、パラ調整(詳細)タブ初期化処理実行
self.note_ctrl.ChangeSelection(self.advance_param_panel_ctrl.tab_idx)
self.advance_param_panel_ctrl.initialize(event)
elif event.is_param_bone:
# パラ調整(ボーン)タブを開く場合、パラ調整(ボーン)タブ初期化処理実行
self.note_ctrl.ChangeSelection(self.bone_param_panel_ctrl.tab_idx)
self.bone_param_panel_ctrl.initialize(event)
else:
# 終了音を鳴らす
self.sound_finish()
logger.info("\n処理時間: %s", self.show_worked_time())
event.Skip()
return True
# スレッド実行結果
def on_exec_result(self, event: wx.Event):
# 実行ボタンに切り替え
self.file_panel_ctrl.export_btn_ctrl.SetLabel(logger.transtext("PmxTailor実行"))
self.file_panel_ctrl.export_btn_ctrl.Enable()
self.elapsed_time += event.elapsed_time
logger.info("\n処理時間: %s", self.show_worked_time())
# ワーカー終了
self.worker = None
# ファイルタブのコンソール
sys.stdout = self.file_panel_ctrl.console_ctrl
# 終了音を鳴らす
self.sound_finish()
# タブ移動可
self.release_tab()
# フォーム有効化
self.enable()
# プログレス非表示
self.file_panel_ctrl.gauge_ctrl.SetValue(0)
def sound_finish(self):
threading.Thread(target=self.sound_finish_thread).start()
def sound_finish_thread(self):
# 終了音を鳴らす
if os.name == "nt":
# Windows
try:
winsound.PlaySound("SystemAsterisk", winsound.SND_ALIAS)
except Exception:
pass
def on_wheel_spin_ctrl(self, event: wx.Event, inc=0.1):
# スピンコントロール変更時
if event.GetWheelRotation() > 0:
event.GetEventObject().SetValue(event.GetEventObject().GetValue() + inc)
if event.GetEventObject().GetValue() >= 0:
event.GetEventObject().SetBackgroundColour("WHITE")
else:
event.GetEventObject().SetValue(event.GetEventObject().GetValue() - inc)
if event.GetEventObject().GetValue() < 0:
event.GetEventObject().SetBackgroundColour("TURQUOISE")
# スピンコントロールでもタイムスタンプ変更
self.file_panel_ctrl.on_change_file(event)
|
testing.py | """
Contains testing infrastructure for QCFractal.
"""
import os
import pkgutil
import shutil
import signal
import socket
import subprocess
import sys
import threading
import time
from collections import Mapping
from contextlib import contextmanager
import numpy as np
import pandas as pd
import pytest
import qcengine as qcng
import requests
from qcelemental.models import Molecule
from tornado.ioloop import IOLoop
from .interface import FractalClient
from .postgres_harness import PostgresHarness, TemporaryPostgres
from .queue import build_queue_adapter
from .server import FractalServer
from .snowflake import FractalSnowflake
from .storage_sockets import storage_socket_factory
### Addon testing capabilities
def pytest_addoption(parser):
"""
Additional PyTest CLI flags to add
See `pytest_collection_modifyitems` for handling and `pytest_configure` for adding known in-line marks.
"""
parser.addoption("--runslow", action="store_true", default=False, help="run slow tests")
parser.addoption("--runexamples", action="store_true", default=False, help="run example tests")
def pytest_collection_modifyitems(config, items):
"""
Handle test triggers based on the CLI flags
Use decorators:
@pytest.mark.slow
@pyrest.mark.example
"""
runslow = config.getoption("--runslow")
runexamples = config.getoption("--runexamples")
skip_slow = pytest.mark.skip(reason="need --runslow option to run")
skip_example = pytest.mark.skip(reason="need --runexamples option to run")
for item in items:
if "slow" in item.keywords and not runslow:
item.add_marker(skip_slow)
if "example" in item.keywords and not runexamples:
item.add_marker(skip_example)
def pytest_configure(config):
import sys
sys._called_from_test = True
config.addinivalue_line("markers", "example: Mark a given test as an example which can be run")
config.addinivalue_line(
"markers", "slow: Mark a given test as slower than most other tests, needing a special " "flag to run."
)
def pytest_unconfigure(config):
import sys
del sys._called_from_test
def _plugin_import(plug):
plug_spec = pkgutil.find_loader(plug)
if plug_spec is None:
return False
else:
return True
_adapter_testing = ["pool", "dask", "fireworks", "parsl"]
# Figure out what is imported
_programs = {
"fireworks": _plugin_import("fireworks"),
"rdkit": _plugin_import("rdkit"),
"psi4": _plugin_import("psi4"),
"parsl": _plugin_import("parsl"),
"dask": _plugin_import("dask"),
"dask_jobqueue": _plugin_import("dask_jobqueue"),
"geometric": _plugin_import("geometric"),
"torsiondrive": _plugin_import("torsiondrive"),
"torchani": _plugin_import("torchani"),
}
if _programs["dask"]:
_programs["dask.distributed"] = _plugin_import("dask.distributed")
else:
_programs["dask.distributed"] = False
_programs["dftd3"] = "dftd3" in qcng.list_available_programs()
def has_module(name):
return _programs[name]
def check_has_module(program):
import_message = "Not detecting module {}. Install package if necessary to enable tests."
if has_module(program) is False:
pytest.skip(import_message.format(program))
def _build_pytest_skip(program):
import_message = "Not detecting module {}. Install package if necessary to enable tests."
return pytest.mark.skipif(has_module(program) is False, reason=import_message.format(program))
# Add a number of module testing options
using_dask = _build_pytest_skip("dask.distributed")
using_dask_jobqueue = _build_pytest_skip("dask_jobqueue")
using_dftd3 = _build_pytest_skip("dftd3")
using_fireworks = _build_pytest_skip("fireworks")
using_geometric = _build_pytest_skip("geometric")
using_parsl = _build_pytest_skip("parsl")
using_psi4 = _build_pytest_skip("psi4")
using_rdkit = _build_pytest_skip("rdkit")
using_torsiondrive = _build_pytest_skip("torsiondrive")
using_unix = pytest.mark.skipif(
os.name.lower() != "posix", reason="Not on Unix operating system, " "assuming Bash is not present"
)
### Generic helpers
def recursive_dict_merge(base_dict, dict_to_merge_in):
"""Recursive merge for more complex than a simple top-level merge {**x, **y} which does not handle nested dict."""
for k, v in dict_to_merge_in.items():
if k in base_dict and isinstance(base_dict[k], dict) and isinstance(dict_to_merge_in[k], Mapping):
recursive_dict_merge(base_dict[k], dict_to_merge_in[k])
else:
base_dict[k] = dict_to_merge_in[k]
def find_open_port():
"""
Use socket's built in ability to find an open port.
"""
sock = socket.socket()
sock.bind(("", 0))
host, port = sock.getsockname()
return port
@contextmanager
def preserve_cwd():
"""Always returns to CWD on exit"""
cwd = os.getcwd()
try:
yield cwd
finally:
os.chdir(cwd)
def await_true(wait_time, func, *args, **kwargs):
wait_period = kwargs.pop("period", 4)
periods = max(int(wait_time / wait_period), 1)
for period in range(periods):
ret = func(*args, **kwargs)
if ret:
return True
time.sleep(wait_period)
return False
### Background thread loops
@contextmanager
def pristine_loop():
"""
Builds a clean IOLoop for using as a background request.
Courtesy of Dask Distributed
"""
IOLoop.clear_instance()
IOLoop.clear_current()
loop = IOLoop()
loop.make_current()
assert IOLoop.current() is loop
try:
yield loop
finally:
try:
loop.close(all_fds=True)
except (ValueError, KeyError, RuntimeError):
pass
IOLoop.clear_instance()
IOLoop.clear_current()
@contextmanager
def loop_in_thread():
with pristine_loop() as loop:
# Add the IOloop to a thread daemon
thread = threading.Thread(target=loop.start, name="test IOLoop")
thread.daemon = True
thread.start()
loop_started = threading.Event()
loop.add_callback(loop_started.set)
loop_started.wait()
try:
yield loop
finally:
try:
loop.add_callback(loop.stop)
thread.join(timeout=5)
except:
pass
def terminate_process(proc):
if proc.poll() is None:
# Sigint (keyboard interupt)
if sys.platform.startswith("win"):
proc.send_signal(signal.CTRL_BREAK_EVENT)
else:
proc.send_signal(signal.SIGINT)
try:
start = time.time()
while (proc.poll() is None) and (time.time() < (start + 15)):
time.sleep(0.02)
# Flat kill
finally:
proc.kill()
@contextmanager
def popen(args, **kwargs):
"""
Opens a background task.
Code and idea from dask.distributed's testing suite
https://github.com/dask/distributed
"""
args = list(args)
# Bin prefix
if sys.platform.startswith("win"):
bin_prefix = os.path.join(sys.prefix, "Scripts")
else:
bin_prefix = os.path.join(sys.prefix, "bin")
# Do we prefix with Python?
if kwargs.pop("append_prefix", True):
args[0] = os.path.join(bin_prefix, args[0])
# Add coverage testing
if kwargs.pop("coverage", False):
coverage_dir = os.path.join(bin_prefix, "coverage")
if not os.path.exists(coverage_dir):
print("Could not find Python coverage, skipping cov.")
else:
src_dir = os.path.dirname(os.path.abspath(__file__))
coverage_flags = [coverage_dir, "run", "--parallel-mode", "--source=" + src_dir]
# If python script, skip the python bin
if args[0].endswith("python"):
args.pop(0)
args = coverage_flags + args
# Do we optionally dumpstdout?
dump_stdout = kwargs.pop("dump_stdout", False)
if sys.platform.startswith("win"):
# Allow using CTRL_C_EVENT / CTRL_BREAK_EVENT
kwargs["creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP
kwargs["stdout"] = subprocess.PIPE
kwargs["stderr"] = subprocess.PIPE
proc = subprocess.Popen(args, **kwargs)
try:
yield proc
except Exception:
dump_stdout = True
raise
finally:
try:
terminate_process(proc)
finally:
output, error = proc.communicate()
if dump_stdout:
print("\n" + "-" * 30)
print("\n|| Process command: {}".format(" ".join(args)))
print("\n|| Process stderr: \n{}".format(error.decode()))
print("-" * 30)
print("\n|| Process stdout: \n{}".format(output.decode()))
print("-" * 30)
def run_process(args, **kwargs):
"""
Runs a process in the background until complete.
Returns True if exit code zero.
"""
timeout = kwargs.pop("timeout", 30)
terminate_after = kwargs.pop("interupt_after", None)
with popen(args, **kwargs) as proc:
if terminate_after is None:
proc.wait(timeout=timeout)
else:
time.sleep(terminate_after)
terminate_process(proc)
retcode = proc.poll()
return retcode == 0
### Server testing mechanics
@pytest.fixture(scope="session")
def postgres_server():
if shutil.which("psql") is None:
pytest.skip("Postgres is not installed on this server and no active postgres could be found.")
storage = None
psql = PostgresHarness({"database": {"port": 5432}})
# psql = PostgresHarness({"database": {"port": 5432, "username": "qcarchive", "password": "mypass"}})
if not psql.is_alive():
print()
print(
f"Could not connect to a Postgres server at {psql.config.database_uri()}, this will increase time per test session by ~3 seconds."
)
print()
storage = TemporaryPostgres()
psql = storage.psql
print("Using Database: ", psql.config.database_uri())
yield psql
if storage:
storage.stop()
def reset_server_database(server):
"""Resets the server database for testing."""
if "QCFRACTAL_RESET_TESTING_DB" in os.environ:
server.storage._clear_db(server.storage._project_name)
server.storage._delete_DB_data(server.storage._project_name)
# Force a heartbeat after database clean if a manager is present.
if server.queue_socket:
server.await_results()
@pytest.fixture(scope="module")
def test_server(request, postgres_server):
"""
Builds a server instance with the event loop running in a thread.
"""
# Storage name
storage_name = "test_qcfractal_server"
postgres_server.create_database(storage_name)
with FractalSnowflake(
max_workers=0,
storage_project_name="test_qcfractal_server",
storage_uri=postgres_server.database_uri(),
start_server=False,
reset_database=True,
) as server:
# Clean and re-init the database
yield server
def build_adapter_clients(mtype, storage_name="test_qcfractal_compute_server"):
# Basic boot and loop information
if mtype == "pool":
from multiprocessing import Pool, set_start_method
from .cli.qcfractal_manager import _initialize_signals_process_pool
adapter_client = Pool(processes=2, initializer=_initialize_signals_process_pool)
elif mtype == "dask":
dd = pytest.importorskip("dask.distributed")
adapter_client = dd.Client(n_workers=2, threads_per_worker=1, resources={"process": 1})
# Not super happy about this line, but shuts up dangling reference errors
adapter_client._should_close_loop = False
elif mtype == "fireworks":
fireworks = pytest.importorskip("fireworks")
fireworks_name = storage_name + "_fireworks_queue"
adapter_client = fireworks.LaunchPad(name=fireworks_name, logdir="/tmp/", strm_lvl="CRITICAL")
elif mtype == "parsl":
parsl = pytest.importorskip("parsl")
# Must only be a single thread as we run thread unsafe applications.
adapter_client = parsl.config.Config(executors=[parsl.executors.threads.ThreadPoolExecutor(max_threads=1)])
else:
raise TypeError("fractal_compute_server: internal parametrize error")
return adapter_client
@pytest.fixture(scope="module", params=_adapter_testing)
def adapter_client_fixture(request):
adapter_client = build_adapter_clients(request.param)
yield adapter_client
# Do a final close with existing tech
build_queue_adapter(adapter_client).close()
@pytest.fixture(scope="module", params=_adapter_testing)
def managed_compute_server(request, postgres_server):
"""
A FractalServer with compute associated parametrize for all managers.
"""
storage_name = "test_qcfractal_compute_server"
postgres_server.create_database(storage_name)
adapter_client = build_adapter_clients(request.param, storage_name=storage_name)
# Build a server with the thread in a outer context loop
# Not all adapters play well with internal loops
with loop_in_thread() as loop:
server = FractalServer(
port=find_open_port(),
storage_project_name=storage_name,
storage_uri=postgres_server.database_uri(),
loop=loop,
queue_socket=adapter_client,
ssl_options=False,
skip_storage_version_check=True,
)
# Clean and re-init the database
reset_server_database(server)
# Build Client and Manager
from qcfractal.interface import FractalClient
client = FractalClient(server)
from qcfractal.queue import QueueManager
manager = QueueManager(client, adapter_client)
yield client, server, manager
# Close down and clean the adapter
manager.close_adapter()
manager.stop()
@pytest.fixture(scope="module")
def fractal_compute_server(postgres_server):
"""
A FractalServer with a local Pool manager.
"""
# Storage name
storage_name = "test_qcfractal_compute_snowflake"
postgres_server.create_database(storage_name)
with FractalSnowflake(
max_workers=2,
storage_project_name=storage_name,
storage_uri=postgres_server.database_uri(),
reset_database=True,
start_server=False,
) as server:
reset_server_database(server)
yield server
def build_socket_fixture(stype, server=None):
print("")
# Check mongo
storage_name = "test_qcfractal_storage" + stype
# IP/port/drop table is specific to build
if stype == "sqlalchemy":
server.create_database(storage_name)
storage = storage_socket_factory(server.database_uri(), storage_name, db_type=stype, sql_echo=False)
# Clean and re-init the database
storage._clear_db(storage_name)
else:
raise KeyError("Storage type {} not understood".format(stype))
yield storage
if stype == "sqlalchemy":
# todo: drop db
# storage._clear_db(storage_name)
pass
else:
raise KeyError("Storage type {} not understood".format(stype))
@pytest.fixture(scope="module", params=["sqlalchemy"])
def socket_fixture(request):
yield from build_socket_fixture(request.param)
@pytest.fixture(scope="module")
def sqlalchemy_socket_fixture(request, postgres_server):
yield from build_socket_fixture("sqlalchemy", postgres_server)
def live_fractal_or_skip():
"""
Ensure Fractal live connection can be made
First looks for a local staging server, then tries QCArchive.
"""
try:
return FractalClient("localhost:7777", verify=False)
except (requests.exceptions.ConnectionError, ConnectionRefusedError):
print("Failed to connect to localhost, trying MolSSI QCArchive.")
try:
requests.get("https://api.qcarchive.molssi.org:443", json={}, timeout=5)
return FractalClient()
except (requests.exceptions.ConnectionError, ConnectionRefusedError):
return pytest.skip("Could not make a connection to central Fractal server")
def df_compare(df1, df2, sort=False):
"""checks equality even when columns contain numpy arrays, which .equals and == struggle with"""
if sort:
if isinstance(df1, pd.DataFrame):
df1 = df1.reindex(sorted(df1.columns), axis=1)
elif isinstance(df1, pd.Series):
df1 = df1.sort_index()
if isinstance(df2, pd.DataFrame):
df2 = df2.reindex(sorted(df2.columns), axis=1)
elif isinstance(df2, pd.Series):
df2 = df2.sort_index()
def element_equal(e1, e2):
if isinstance(e1, np.ndarray):
if not np.array_equal(e1, e2):
return False
elif isinstance(e1, Molecule):
if not e1.get_hash() == e2.get_hash():
return False
# Because nan != nan
elif isinstance(e1, float) and np.isnan(e1):
if not np.isnan(e2):
return False
else:
if not e1 == e2:
return False
return True
if isinstance(df1, pd.Series):
if not isinstance(df2, pd.Series):
return False
if len(df1) != len(df2):
return False
for i in range(len(df1)):
if not element_equal(df1[i], df2[i]):
return False
return True
for column in df1.columns:
if column.startswith("_"):
df1.drop(column, axis=1, inplace=True)
for column in df2.columns:
if column.startswith("_"):
df2.drop(column, axis=1, inplace=True)
if not all(df1.columns == df2.columns):
return False
if not all(df1.index.values == df2.index.values):
return False
for i in range(df1.shape[0]):
for j in range(df1.shape[1]):
if not element_equal(df1.iloc[i, j], df2.iloc[i, j]):
return False
return True
|
tracer.py | from bcc import BPF
import socket
import os
import seccompGenerator
import time
import threading
import grpc
from concurrent import futures
import service_pb2
import service_pb2_grpc
pathModules="modules.c"
pathSyscalls="syscalls.txt"
fileDesc={}
def load_modules():
with open(pathModules, "r") as f:
modules = f.read()
return modules
def load_syscalls():
with open(pathSyscalls, "r") as f:
syscalls = f.readlines()
return syscalls
def sendMessage(channel,utsMessage):
stub = service_pb2_grpc.ComunicationStub(channel)
message = service_pb2.Uts(uts=utsMessage)
response=""
try:
response = stub.AddUuts(message)
#print(response)
if response.confirm == 1:
fdTmp=fileDesc[utsMessage]
fdTmp.close()
seccompGenerator.EbpfMode(utsMessage)
print("Traced "+utsMessage)
else:
print("Error on gRPC ")
except:
print("ERROR " + response+" done")
def main():
channel = grpc.insecure_channel('localhost:50051')
logf = open("logTracer.log", "w")
prog=load_modules()
b = BPF(text=prog)
syscalls=load_syscalls()
for syscall in syscalls:
syscall=syscall.strip()
try:
b.attach_kprobe(event=b.get_syscall_fnname(syscall), fn_name="syscall_"+syscall)
#b.attach_kretprobe(event=b.get_syscall_fnname(syscall), fn_name="syscall_"+syscall)
logf.write("Tracing "+syscall+'\n')
except:
logf.write("Failed to trace "+syscall+'\n')
logf.close()
hostnameContainer = socket.gethostname()
hostnameHost= os.environ['HOST_HOSTNAME']
print("Tracing")
while 1:
try:
(task, pid, cpu, flags, ts, msg) = b.trace_fields()
except KeyboardInterrupt:
print("Exit")
exit(0)
msg=msg.decode("utf-8")
task=task.decode("utf-8")
msg=msg.split(':')
uts=msg[0]
syscall=msg[1]
if (uts!=hostnameHost and uts!=hostnameContainer):
if uts not in fileDesc:
fd = open("Captures/"+uts+".cap", "w")
fd.write("%s;%s;%s;%s" % ("TIME(s)", "COMM", "NAMESPACE", "SYSCALL\n"))
fileDesc[uts] = fd
x = threading.Thread(target=sendMessage, args=([channel,uts]))
x.start()
else:
fd=fileDesc[uts]
try:
fd.write("%f;%s;%s;%s\n" % (ts, task, uts, syscall))
#print("%f;%s;%s;%s" % (ts, task, uts, syscall))
except Exception:
print("Error on "+uts+ " "+ task+ " "+syscall)
if __name__== "__main__":
main()
|
multitester.py | """
Certbot Integration Test Tool
- Configures (canned) boulder server
- Launches EC2 instances with a given list of AMIs for different distros
- Copies certbot repo and puts it on the instances
- Runs certbot tests (bash scripts) on all of these
- Logs execution and success/fail for debugging
Notes:
- Some AWS images, e.g. official CentOS and FreeBSD images
require acceptance of user terms on the AWS marketplace
website. This can't be automated.
- AWS EC2 has a default limit of 20 t2/t1 instances, if more
are needed, they need to be requested via online webform.
Usage:
- Requires AWS IAM secrets to be set up with aws cli
- Requires an AWS associated keyfile <keyname>.pem
>aws configure --profile HappyHacker
[interactive: enter secrets for IAM role]
>aws ec2 create-key-pair --profile HappyHacker --key-name MyKeyPair \
--query 'KeyMaterial' --output text > MyKeyPair.pem
then:
>python multitester.py targets.yaml MyKeyPair.pem HappyHacker scripts/test_leauto_upgrades.sh
see:
https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html
https://docs.aws.amazon.com/cli/latest/userguide/cli-ec2-keypairs.html
"""
from __future__ import print_function
from __future__ import with_statement
import argparse
import multiprocessing as mp
from multiprocessing import Manager
import os
import socket
import sys
import time
import traceback
import boto3
from botocore.exceptions import ClientError
from six.moves.urllib import error as urllib_error
from six.moves.urllib import request as urllib_request
import yaml
from fabric import Config
from fabric import Connection
# Command line parser
#-------------------------------------------------------------------------------
parser = argparse.ArgumentParser(description='Builds EC2 cluster for testing.')
parser.add_argument('config_file',
help='yaml configuration file for AWS server cluster')
parser.add_argument('key_file',
help='key file (<keyname>.pem) for AWS')
parser.add_argument('aws_profile',
help='profile for AWS (i.e. as in ~/.aws/certificates)')
parser.add_argument('test_script',
default='test_letsencrypt_auto_certonly_standalone.sh',
help='path of bash script in to deploy and run')
parser.add_argument('--repo',
default='https://github.com/letsencrypt/letsencrypt.git',
help='certbot git repo to use')
parser.add_argument('--branch',
default='~',
help='certbot git branch to trial')
parser.add_argument('--pull_request',
default='~',
help='letsencrypt/letsencrypt pull request to trial')
parser.add_argument('--merge_master',
action='store_true',
help="if set merges PR into master branch of letsencrypt/letsencrypt")
parser.add_argument('--saveinstances',
action='store_true',
help="don't kill EC2 instances after run, useful for debugging")
parser.add_argument('--alt_pip',
default='',
help="server from which to pull candidate release packages")
parser.add_argument('--killboulder',
action='store_true',
help="do not leave a persistent boulder server running")
parser.add_argument('--boulderonly',
action='store_true',
help="only make a boulder server")
cl_args = parser.parse_args()
# Credential Variables
#-------------------------------------------------------------------------------
# assumes naming: <key_filename> = <keyname>.pem
KEYFILE = cl_args.key_file
KEYNAME = os.path.split(cl_args.key_file)[1].split('.pem')[0]
PROFILE = None if cl_args.aws_profile == 'SET_BY_ENV' else cl_args.aws_profile
# Globals
#-------------------------------------------------------------------------------
BOULDER_AMI = 'ami-072a9534772bec854' # premade shared boulder AMI 18.04LTS us-east-1
SECURITY_GROUP_NAME = 'certbot-security-group'
SENTINEL = None #queue kill signal
SUBNET_NAME = 'certbot-subnet'
class Status(object):
"""Possible statuses of client tests."""
PASS = 'pass'
FAIL = 'fail'
# Boto3/AWS automation functions
#-------------------------------------------------------------------------------
def should_use_subnet(subnet):
"""Should we use the given subnet for these tests?
We should if it is the default subnet for the availability zone or the
subnet is named "certbot-subnet".
"""
if not subnet.map_public_ip_on_launch:
return False
if subnet.default_for_az:
return True
for tag in subnet.tags:
if tag['Key'] == 'Name' and tag['Value'] == SUBNET_NAME:
return True
return False
def make_security_group(vpc):
"""Creates a security group in the given VPC."""
# will fail if security group of GroupName already exists
# cannot have duplicate SGs of the same name
mysg = vpc.create_security_group(GroupName=SECURITY_GROUP_NAME,
Description='security group for automated testing')
mysg.authorize_ingress(IpProtocol="tcp", CidrIp="0.0.0.0/0", FromPort=22, ToPort=22)
mysg.authorize_ingress(IpProtocol="tcp", CidrIp="0.0.0.0/0", FromPort=80, ToPort=80)
mysg.authorize_ingress(IpProtocol="tcp", CidrIp="0.0.0.0/0", FromPort=443, ToPort=443)
# for boulder wfe (http) server
mysg.authorize_ingress(IpProtocol="tcp", CidrIp="0.0.0.0/0", FromPort=4000, ToPort=4000)
# for mosh
mysg.authorize_ingress(IpProtocol="udp", CidrIp="0.0.0.0/0", FromPort=60000, ToPort=61000)
return mysg
def make_instance(ec2_client,
instance_name,
ami_id,
keyname,
security_group_id,
subnet_id,
machine_type='t2.micro',
userdata=""): #userdata contains bash or cloud-init script
block_device_mappings = _get_block_device_mappings(ec2_client, ami_id)
tags = [{'Key': 'Name', 'Value': instance_name}]
tag_spec = [{'ResourceType': 'instance', 'Tags': tags}]
return ec2_client.create_instances(
BlockDeviceMappings=block_device_mappings,
ImageId=ami_id,
SecurityGroupIds=[security_group_id],
SubnetId=subnet_id,
KeyName=keyname,
MinCount=1,
MaxCount=1,
UserData=userdata,
InstanceType=machine_type,
TagSpecifications=tag_spec)[0]
def _get_block_device_mappings(ec2_client, ami_id):
"""Returns the list of block device mappings to ensure cleanup.
This list sets connected EBS volumes to be deleted when the EC2
instance is terminated.
"""
# Not all devices use EBS, but the default value for DeleteOnTermination
# when the device does use EBS is true. See:
# * https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-blockdev-mapping.html
# * https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-blockdev-template.html
return [{'DeviceName': mapping['DeviceName'],
'Ebs': {'DeleteOnTermination': True}}
for mapping in ec2_client.Image(ami_id).block_device_mappings
if not mapping.get('Ebs', {}).get('DeleteOnTermination', True)]
# Helper Routines
#-------------------------------------------------------------------------------
def block_until_http_ready(urlstring, wait_time=10, timeout=240):
"Blocks until server at urlstring can respond to http requests"
server_ready = False
t_elapsed = 0
while not server_ready and t_elapsed < timeout:
try:
sys.stdout.write('.')
sys.stdout.flush()
req = urllib_request.Request(urlstring)
response = urllib_request.urlopen(req)
#if response.code == 200:
server_ready = True
except urllib_error.URLError:
pass
time.sleep(wait_time)
t_elapsed += wait_time
def block_until_ssh_open(ipstring, wait_time=10, timeout=120):
"Blocks until server at ipstring has an open port 22"
reached = False
t_elapsed = 0
while not reached and t_elapsed < timeout:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ipstring, 22))
reached = True
except socket.error as err:
time.sleep(wait_time)
t_elapsed += wait_time
sock.close()
def block_until_instance_ready(booting_instance, wait_time=5, extra_wait_time=20):
"Blocks booting_instance until AWS EC2 instance is ready to accept SSH connections"
state = booting_instance.state['Name']
ip = booting_instance.public_ip_address
while state != 'running' or ip is None:
time.sleep(wait_time)
# The instance needs to be reloaded to update its local attributes. See
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Instance.reload.
booting_instance.reload()
state = booting_instance.state['Name']
ip = booting_instance.public_ip_address
block_until_ssh_open(ip)
time.sleep(extra_wait_time)
return booting_instance
# Fabric Routines
#-------------------------------------------------------------------------------
def local_git_clone(local_cxn, repo_url, log_dir):
"""clones master of repo_url"""
local_cxn.local('cd %s && if [ -d letsencrypt ]; then rm -rf letsencrypt; fi' % log_dir)
local_cxn.local('cd %s && git clone %s letsencrypt'% (log_dir, repo_url))
local_cxn.local('cd %s && tar czf le.tar.gz letsencrypt'% log_dir)
def local_git_branch(local_cxn, repo_url, branch_name, log_dir):
"""clones branch <branch_name> of repo_url"""
local_cxn.local('cd %s && if [ -d letsencrypt ]; then rm -rf letsencrypt; fi' % log_dir)
local_cxn.local('cd %s && git clone %s letsencrypt --branch %s --single-branch'%
(log_dir, repo_url, branch_name))
local_cxn.local('cd %s && tar czf le.tar.gz letsencrypt' % log_dir)
def local_git_PR(local_cxn, repo_url, PRnumstr, log_dir, merge_master=True):
"""clones specified pull request from repo_url and optionally merges into master"""
local_cxn.local('cd %s && if [ -d letsencrypt ]; then rm -rf letsencrypt; fi' % log_dir)
local_cxn.local('cd %s && git clone %s letsencrypt' % (log_dir, repo_url))
local_cxn.local('cd %s && cd letsencrypt && '
'git fetch origin pull/%s/head:lePRtest' % (log_dir, PRnumstr))
local_cxn.local('cd %s && cd letsencrypt && git checkout lePRtest' % log_dir)
if merge_master:
local_cxn.local('cd %s && cd letsencrypt && git remote update origin' % log_dir)
local_cxn.local('cd %s && cd letsencrypt && '
'git merge origin/master -m "testmerge"' % log_dir)
local_cxn.local('cd %s && tar czf le.tar.gz letsencrypt' % log_dir)
def local_repo_to_remote(cxn, log_dir):
"""copies local tarball of repo to remote"""
filename = 'le.tar.gz'
local_path = os.path.join(log_dir, filename)
cxn.put(local=local_path, remote='')
cxn.run('tar xzf %s' % filename)
def local_repo_clean(local_cxn, log_dir):
"""delete tarball"""
filename = 'le.tar.gz'
local_path = os.path.join(log_dir, filename)
local_cxn.local('rm %s' % local_path)
def deploy_script(cxn, scriptpath, *args):
"""copies to remote and executes local script"""
cxn.put(local=scriptpath, remote='', preserve_mode=True)
scriptfile = os.path.split(scriptpath)[1]
args_str = ' '.join(args)
cxn.run('./'+scriptfile+' '+args_str)
def run_boulder(cxn):
boulder_path = '$GOPATH/src/github.com/letsencrypt/boulder'
cxn.run('cd %s && sudo docker-compose up -d' % boulder_path)
def config_and_launch_boulder(cxn, instance):
# yes, we're hardcoding the gopath. it's a predetermined AMI.
with cxn.prefix('export GOPATH=/home/ubuntu/gopath'):
deploy_script(cxn, 'scripts/boulder_config.sh')
run_boulder(cxn)
def install_and_launch_certbot(cxn, instance, boulder_url, target, log_dir):
local_repo_to_remote(cxn, log_dir)
# This needs to be like this, I promise. 1) The env argument to run doesn't work.
# See https://github.com/fabric/fabric/issues/1744. 2) prefix() sticks an && between
# the commands, so it needs to be exports rather than no &&s in between for the script subshell.
with cxn.prefix('export BOULDER_URL=%s && export PUBLIC_IP=%s && export PRIVATE_IP=%s && '
'export PUBLIC_HOSTNAME=%s && export PIP_EXTRA_INDEX_URL=%s && '
'export OS_TYPE=%s' %
(boulder_url,
instance.public_ip_address,
instance.private_ip_address,
instance.public_dns_name,
cl_args.alt_pip,
target['type'])):
deploy_script(cxn, cl_args.test_script)
def grab_certbot_log(cxn):
"grabs letsencrypt.log via cat into logged stdout"
cxn.sudo('/bin/bash -l -i -c \'if [ -f "/var/log/letsencrypt/letsencrypt.log" ]; then ' +
'cat "/var/log/letsencrypt/letsencrypt.log"; else echo "[novarlog]"; fi\'')
# fallback file if /var/log is unwriteable...? correct?
cxn.sudo('/bin/bash -l -i -c \'if [ -f ./certbot.log ]; then ' +
'cat ./certbot.log; else echo "[nolocallog]"; fi\'')
def create_client_instance(ec2_client, target, security_group_id, subnet_id):
"""Create a single client instance for running tests."""
if 'machine_type' in target:
machine_type = target['machine_type']
elif target['virt'] == 'hvm':
machine_type = 't2.medium'
else:
# 32 bit systems
machine_type = 'c1.medium'
if 'userdata' in target.keys():
userdata = target['userdata']
else:
userdata = ''
name = 'le-%s'%target['name']
print(name, end=" ")
return make_instance(ec2_client,
name,
target['ami'],
KEYNAME,
machine_type=machine_type,
security_group_id=security_group_id,
subnet_id=subnet_id,
userdata=userdata)
def test_client_process(fab_config, inqueue, outqueue, boulder_url, log_dir):
cur_proc = mp.current_process()
for inreq in iter(inqueue.get, SENTINEL):
ii, instance_id, target = inreq
# Each client process is given its own session due to the suggestion at
# https://boto3.amazonaws.com/v1/documentation/api/latest/guide/resources.html?highlight=multithreading#multithreading-multiprocessing.
aws_session = boto3.session.Session(profile_name=PROFILE)
ec2_client = aws_session.resource('ec2')
instance = ec2_client.Instance(id=instance_id)
#save all stdout to log file
sys.stdout = open(log_dir+'/'+'%d_%s.log'%(ii,target['name']), 'w')
print("[%s : client %d %s %s]" % (cur_proc.name, ii, target['ami'], target['name']))
instance = block_until_instance_ready(instance)
print("server %s at %s"%(instance, instance.public_ip_address))
host_string = "%s@%s"%(target['user'], instance.public_ip_address)
print(host_string)
with Connection(host_string, config=fab_config) as cxn:
try:
install_and_launch_certbot(cxn, instance, boulder_url, target, log_dir)
outqueue.put((ii, target, Status.PASS))
print("%s - %s SUCCESS"%(target['ami'], target['name']))
except:
outqueue.put((ii, target, Status.FAIL))
print("%s - %s FAIL"%(target['ami'], target['name']))
traceback.print_exc(file=sys.stdout)
pass
# append server certbot.log to each per-machine output log
print("\n\ncertbot.log\n" + "-"*80 + "\n")
try:
grab_certbot_log(cxn)
except:
print("log fail\n")
traceback.print_exc(file=sys.stdout)
pass
def cleanup(cl_args, instances, targetlist, boulder_server, log_dir):
print('Logs in ', log_dir)
# If lengths of instances and targetlist aren't equal, instances failed to
# start before running tests so leaving instances running for debugging
# isn't very useful. Let's cleanup after ourselves instead.
if len(instances) != len(targetlist) or not cl_args.saveinstances:
print('Terminating EC2 Instances')
if cl_args.killboulder:
boulder_server.terminate()
for instance in instances:
instance.terminate()
else:
# print login information for the boxes for debugging
for ii, target in enumerate(targetlist):
print(target['name'],
target['ami'],
"%s@%s"%(target['user'], instances[ii].public_ip_address))
def main():
# Fabric library controlled through global env parameters
fab_config = Config(overrides={
"connect_kwargs": {
"key_filename": [KEYFILE], # https://github.com/fabric/fabric/issues/2007
},
"run": {
"echo": True,
"pty": True,
},
"timeouts": {
"connect": 10,
},
})
# no network connection, so don't worry about closing this one.
local_cxn = Connection('localhost', config=fab_config)
# Set up local copy of git repo
#-------------------------------------------------------------------------------
log_dir = "letest-%d"%int(time.time()) #points to logging / working directory
print("Making local dir for test repo and logs: %s"%log_dir)
local_cxn.local('mkdir %s'%log_dir)
try:
# figure out what git object to test and locally create it in log_dir
print("Making local git repo")
if cl_args.pull_request != '~':
print('Testing PR %s ' % cl_args.pull_request,
"MERGING into master" if cl_args.merge_master else "")
local_git_PR(local_cxn, cl_args.repo, cl_args.pull_request, log_dir,
cl_args.merge_master)
elif cl_args.branch != '~':
print('Testing branch %s of %s' % (cl_args.branch, cl_args.repo))
local_git_branch(local_cxn, cl_args.repo, cl_args.branch, log_dir)
else:
print('Testing current branch of %s' % cl_args.repo, log_dir)
local_git_clone(local_cxn, cl_args.repo, log_dir)
except BaseException:
print("FAIL: trouble with git repo")
traceback.print_exc()
exit(1)
# Set up EC2 instances
#-------------------------------------------------------------------------------
configdata = yaml.safe_load(open(cl_args.config_file, 'r'))
targetlist = configdata['targets']
print('Testing against these images: [%d total]'%len(targetlist))
for target in targetlist:
print(target['ami'], target['name'])
print("Connecting to EC2 using\n profile %s\n keyname %s\n keyfile %s"%(PROFILE, KEYNAME, KEYFILE))
aws_session = boto3.session.Session(profile_name=PROFILE)
ec2_client = aws_session.resource('ec2')
print("Determining Subnet")
for subnet in ec2_client.subnets.all():
if should_use_subnet(subnet):
subnet_id = subnet.id
vpc_id = subnet.vpc.id
break
else:
print("No usable subnet exists!")
print("Please create a VPC with a subnet named {0}".format(SUBNET_NAME))
print("that maps public IPv4 addresses to instances launched in the subnet.")
sys.exit(1)
print("Making Security Group")
vpc = ec2_client.Vpc(vpc_id)
sg_exists = False
for sg in vpc.security_groups.all():
if sg.group_name == SECURITY_GROUP_NAME:
security_group_id = sg.id
sg_exists = True
print(" %s already exists"%SECURITY_GROUP_NAME)
if not sg_exists:
security_group_id = make_security_group(vpc).id
time.sleep(30)
boulder_preexists = False
boulder_servers = ec2_client.instances.filter(Filters=[
{'Name': 'tag:Name', 'Values': ['le-boulderserver']},
{'Name': 'instance-state-name', 'Values': ['running']}])
boulder_server = next(iter(boulder_servers), None)
print("Requesting Instances...")
if boulder_server:
print("Found existing boulder server:", boulder_server)
boulder_preexists = True
else:
print("Can't find a boulder server, starting one...")
boulder_server = make_instance(ec2_client,
'le-boulderserver',
BOULDER_AMI,
KEYNAME,
machine_type='t2.micro',
#machine_type='t2.medium',
security_group_id=security_group_id,
subnet_id=subnet_id)
instances = []
try:
if not cl_args.boulderonly:
print("Creating instances: ", end="")
for target in targetlist:
instances.append(
create_client_instance(ec2_client, target,
security_group_id, subnet_id)
)
print()
# Configure and launch boulder server
#-------------------------------------------------------------------------------
print("Waiting on Boulder Server")
boulder_server = block_until_instance_ready(boulder_server)
print(" server %s"%boulder_server)
# host_string defines the ssh user and host for connection
host_string = "ubuntu@%s"%boulder_server.public_ip_address
print("Boulder Server at (SSH):", host_string)
if not boulder_preexists:
print("Configuring and Launching Boulder")
with Connection(host_string, config=fab_config) as boulder_cxn:
config_and_launch_boulder(boulder_cxn, boulder_server)
# blocking often unnecessary, but cheap EC2 VMs can get very slow
block_until_http_ready('http://%s:4000'%boulder_server.public_ip_address,
wait_time=10, timeout=500)
boulder_url = "http://%s:4000/directory"%boulder_server.private_ip_address
print("Boulder Server at (public ip): http://%s:4000/directory"%boulder_server.public_ip_address)
print("Boulder Server at (EC2 private ip): %s"%boulder_url)
if cl_args.boulderonly:
sys.exit(0)
# Install and launch client scripts in parallel
#-------------------------------------------------------------------------------
print("Uploading and running test script in parallel: %s"%cl_args.test_script)
print("Output routed to log files in %s"%log_dir)
# (Advice: always use Manager.Queue, never regular multiprocessing.Queue
# the latter has implementation flaws that deadlock it in some circumstances)
manager = Manager()
outqueue = manager.Queue()
inqueue = manager.Queue()
# launch as many processes as clients to test
num_processes = len(targetlist)
jobs = [] #keep a reference to current procs
# initiate process execution
client_process_args=(fab_config, inqueue, outqueue, boulder_url, log_dir)
for i in range(num_processes):
p = mp.Process(target=test_client_process, args=client_process_args)
jobs.append(p)
p.daemon = True # kills subprocesses if parent is killed
p.start()
# fill up work queue
for ii, target in enumerate(targetlist):
inqueue.put((ii, instances[ii].id, target))
# add SENTINELs to end client processes
for i in range(num_processes):
inqueue.put(SENTINEL)
print('Waiting on client processes', end='')
for p in jobs:
while p.is_alive():
p.join(5 * 60)
# Regularly print output to keep Travis happy
print('.', end='')
sys.stdout.flush()
print()
# add SENTINEL to output queue
outqueue.put(SENTINEL)
# clean up
local_repo_clean(local_cxn, log_dir)
# print and save summary results
results_file = open(log_dir+'/results', 'w')
outputs = [outq for outq in iter(outqueue.get, SENTINEL)]
outputs.sort(key=lambda x: x[0])
failed = False
for outq in outputs:
ii, target, status = outq
if status == Status.FAIL:
failed = True
print('%d %s %s'%(ii, target['name'], status))
results_file.write('%d %s %s\n'%(ii, target['name'], status))
if len(outputs) != num_processes:
failed = True
failure_message = 'FAILURE: Some target machines failed to run and were not tested. ' +\
'Tests should be rerun.'
print(failure_message)
results_file.write(failure_message + '\n')
results_file.close()
if failed:
sys.exit(1)
finally:
cleanup(cl_args, instances, targetlist, boulder_server, log_dir)
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.